Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (154 commits)
mtd: cfi_cmdset_0002: use AMD standard command-set with Winbond flash chips
mtd: cfi_cmdset_0002: Fix MODULE_ALIAS and linkage for new 0701 commandset ID
mtd: mxc_nand: Remove duplicate NAND_CMD_RESET case value
mtd: update gfp/slab.h includes
jffs2: Stop triggering block erases from jffs2_write_super()
jffs2: Rename jffs2_erase_pending_trigger() to jffs2_dirty_trigger()
jffs2: Use jffs2_garbage_collect_trigger() to trigger pending erases
jffs2: Require jffs2_garbage_collect_trigger() to be called with lock held
jffs2: Wake GC thread when there are blocks to be erased
jffs2: Erase pending blocks in GC pass, avoid invalid -EIO return
jffs2: Add 'work_done' return value from jffs2_erase_pending_blocks()
mtd: mtdchar: Do not corrupt backing device of device node inode
mtd/maps/pcmciamtd: Fix printk format for ssize_t in debug messages
drivers/mtd: Use kmemdup
mtd: cfi_cmdset_0002: Fix argument order in bootloc warning
mtd: nand: add Toshiba TC58NVG0 device ID
pcmciamtd: add another ID
pcmciamtd: coding style cleanups
pcmciamtd: fixing obvious errors
mtd: chips: add SST39WF160x NOR-flashes
...

Trivial conflicts due to dev_node removal in drivers/mtd/maps/pcmciamtd.c

+10153 -1644
+6
MAINTAINERS
··· 4762 4762 F: Documentation/rfkill.txt 4763 4763 F: net/rfkill/ 4764 4764 4765 + RICOH SMARTMEDIA/XD DRIVER 4766 + M: Maxim Levitsky <maximlevitsky@gmail.com> 4767 + S: Maintained 4768 + F: drivers/mtd/nand/r822.c 4769 + F: drivers/mtd/nand/r822.h 4770 + 4765 4771 RISCOM8 DRIVER 4766 4772 S: Orphan 4767 4773 F: Documentation/serial/riscom8.txt
-19
arch/arm/mach-ep93xx/include/mach/ts72xx.h
··· 9 9 * febff000 22000000 4K model number register 10 10 * febfe000 22400000 4K options register 11 11 * febfd000 22800000 4K options register #2 12 - * febfc000 [67]0000000 4K NAND data register 13 - * febfb000 [67]0400000 4K NAND control register 14 - * febfa000 [67]0800000 4K NAND busy register 15 12 * febf9000 10800000 4K TS-5620 RTC index register 16 13 * febf8000 11700000 4K TS-5620 RTC data register 17 14 */ ··· 36 39 37 40 #define TS72XX_OPTIONS2_TS9420 0x04 38 41 #define TS72XX_OPTIONS2_TS9420_BOOT 0x02 39 - 40 - 41 - #define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000 42 - #define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000 43 - #define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000 44 - #define TS72XX_NAND_DATA_SIZE 0x00001000 45 - 46 - #define TS72XX_NAND1_CONTROL_PHYS_BASE 0x60400000 47 - #define TS72XX_NAND2_CONTROL_PHYS_BASE 0x70400000 48 - #define TS72XX_NAND_CONTROL_VIRT_BASE 0xfebfb000 49 - #define TS72XX_NAND_CONTROL_SIZE 0x00001000 50 - 51 - #define TS72XX_NAND1_BUSY_PHYS_BASE 0x60800000 52 - #define TS72XX_NAND2_BUSY_PHYS_BASE 0x70800000 53 - #define TS72XX_NAND_BUSY_VIRT_BASE 0xfebfa000 54 - #define TS72XX_NAND_BUSY_SIZE 0x00001000 55 42 56 43 57 44 #define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
+135 -61
arch/arm/mach-ep93xx/ts72xx.c
··· 10 10 * your option) any later version. 11 11 */ 12 12 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + 13 15 #include <linux/kernel.h> 14 16 #include <linux/init.h> 15 17 #include <linux/platform_device.h> 16 18 #include <linux/io.h> 17 19 #include <linux/m48t86.h> 18 20 #include <linux/mtd/physmap.h> 21 + #include <linux/mtd/nand.h> 22 + #include <linux/mtd/partitions.h> 19 23 20 24 #include <mach/hardware.h> 21 25 #include <mach/ts72xx.h> ··· 58 54 } 59 55 }; 60 56 61 - static struct map_desc ts72xx_nand_io_desc[] __initdata = { 62 - { 63 - .virtual = TS72XX_NAND_DATA_VIRT_BASE, 64 - .pfn = __phys_to_pfn(TS72XX_NAND1_DATA_PHYS_BASE), 65 - .length = TS72XX_NAND_DATA_SIZE, 66 - .type = MT_DEVICE, 67 - }, { 68 - .virtual = TS72XX_NAND_CONTROL_VIRT_BASE, 69 - .pfn = __phys_to_pfn(TS72XX_NAND1_CONTROL_PHYS_BASE), 70 - .length = TS72XX_NAND_CONTROL_SIZE, 71 - .type = MT_DEVICE, 72 - }, { 73 - .virtual = TS72XX_NAND_BUSY_VIRT_BASE, 74 - .pfn = __phys_to_pfn(TS72XX_NAND1_BUSY_PHYS_BASE), 75 - .length = TS72XX_NAND_BUSY_SIZE, 76 - .type = MT_DEVICE, 77 - } 78 - }; 79 - 80 - static struct map_desc ts72xx_alternate_nand_io_desc[] __initdata = { 81 - { 82 - .virtual = TS72XX_NAND_DATA_VIRT_BASE, 83 - .pfn = __phys_to_pfn(TS72XX_NAND2_DATA_PHYS_BASE), 84 - .length = TS72XX_NAND_DATA_SIZE, 85 - .type = MT_DEVICE, 86 - }, { 87 - .virtual = TS72XX_NAND_CONTROL_VIRT_BASE, 88 - .pfn = __phys_to_pfn(TS72XX_NAND2_CONTROL_PHYS_BASE), 89 - .length = TS72XX_NAND_CONTROL_SIZE, 90 - .type = MT_DEVICE, 91 - }, { 92 - .virtual = TS72XX_NAND_BUSY_VIRT_BASE, 93 - .pfn = __phys_to_pfn(TS72XX_NAND2_BUSY_PHYS_BASE), 94 - .length = TS72XX_NAND_BUSY_SIZE, 95 - .type = MT_DEVICE, 96 - } 97 - }; 98 - 99 57 static void __init ts72xx_map_io(void) 100 58 { 101 59 ep93xx_map_io(); 102 60 iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc)); 61 + } 103 62 104 - /* 105 - * The TS-7200 has NOR flash, the other models have NAND flash. 106 - */ 107 - if (!board_is_ts7200()) { 108 - if (is_ts9420_installed()) { 109 - iotable_init(ts72xx_alternate_nand_io_desc, 110 - ARRAY_SIZE(ts72xx_alternate_nand_io_desc)); 111 - } else { 112 - iotable_init(ts72xx_nand_io_desc, 113 - ARRAY_SIZE(ts72xx_nand_io_desc)); 114 - } 63 + 64 + /************************************************************************* 65 + * NAND flash 66 + *************************************************************************/ 67 + #define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */ 68 + #define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */ 69 + 70 + static void ts72xx_nand_hwcontrol(struct mtd_info *mtd, 71 + int cmd, unsigned int ctrl) 72 + { 73 + struct nand_chip *chip = mtd->priv; 74 + 75 + if (ctrl & NAND_CTRL_CHANGE) { 76 + void __iomem *addr = chip->IO_ADDR_R; 77 + unsigned char bits; 78 + 79 + addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE); 80 + 81 + bits = __raw_readb(addr) & ~0x07; 82 + bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */ 83 + bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */ 84 + bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */ 85 + 86 + __raw_writeb(bits, addr); 87 + } 88 + 89 + if (cmd != NAND_CMD_NONE) 90 + __raw_writeb(cmd, chip->IO_ADDR_W); 91 + } 92 + 93 + static int ts72xx_nand_device_ready(struct mtd_info *mtd) 94 + { 95 + struct nand_chip *chip = mtd->priv; 96 + void __iomem *addr = chip->IO_ADDR_R; 97 + 98 + addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE); 99 + 100 + return !!(__raw_readb(addr) & 0x20); 101 + } 102 + 103 + static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL }; 104 + 105 + #define TS72XX_BOOTROM_PART_SIZE (SZ_16K) 106 + #define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) 107 + 108 + static struct mtd_partition ts72xx_nand_parts[] = { 109 + { 110 + .name = "TS-BOOTROM", 111 + .offset = 0, 112 + .size = TS72XX_BOOTROM_PART_SIZE, 113 + .mask_flags = MTD_WRITEABLE, /* force read-only */ 114 + }, { 115 + .name = "Linux", 116 + .offset = MTDPART_OFS_APPEND, 117 + .size = 0, /* filled in later */ 118 + }, { 119 + .name = "RedBoot", 120 + .offset = MTDPART_OFS_APPEND, 121 + .size = MTDPART_SIZ_FULL, 122 + .mask_flags = MTD_WRITEABLE, /* force read-only */ 123 + }, 124 + }; 125 + 126 + static void ts72xx_nand_set_parts(uint64_t size, 127 + struct platform_nand_chip *chip) 128 + { 129 + /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */ 130 + if (size == SZ_32M || size == SZ_128M) { 131 + /* Set the "Linux" partition size */ 132 + ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE; 133 + 134 + chip->partitions = ts72xx_nand_parts; 135 + chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts); 136 + } else { 137 + pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20); 115 138 } 116 139 } 140 + 141 + static struct platform_nand_data ts72xx_nand_data = { 142 + .chip = { 143 + .nr_chips = 1, 144 + .chip_offset = 0, 145 + .chip_delay = 15, 146 + .part_probe_types = ts72xx_nand_part_probes, 147 + .set_parts = ts72xx_nand_set_parts, 148 + }, 149 + .ctrl = { 150 + .cmd_ctrl = ts72xx_nand_hwcontrol, 151 + .dev_ready = ts72xx_nand_device_ready, 152 + }, 153 + }; 154 + 155 + static struct resource ts72xx_nand_resource[] = { 156 + { 157 + .start = 0, /* filled in later */ 158 + .end = 0, /* filled in later */ 159 + .flags = IORESOURCE_MEM, 160 + }, 161 + }; 162 + 163 + static struct platform_device ts72xx_nand_flash = { 164 + .name = "gen_nand", 165 + .id = -1, 166 + .dev.platform_data = &ts72xx_nand_data, 167 + .resource = ts72xx_nand_resource, 168 + .num_resources = ARRAY_SIZE(ts72xx_nand_resource), 169 + }; 170 + 117 171 118 172 /************************************************************************* 119 173 * NOR flash (TS-7200 only) 120 174 *************************************************************************/ 121 - static struct physmap_flash_data ts72xx_flash_data = { 175 + static struct physmap_flash_data ts72xx_nor_data = { 122 176 .width = 2, 123 177 }; 124 178 125 - static struct resource ts72xx_flash_resource = { 179 + static struct resource ts72xx_nor_resource = { 126 180 .start = EP93XX_CS6_PHYS_BASE, 127 181 .end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1, 128 182 .flags = IORESOURCE_MEM, 129 183 }; 130 184 131 - static struct platform_device ts72xx_flash = { 132 - .name = "physmap-flash", 133 - .id = 0, 134 - .dev = { 135 - .platform_data = &ts72xx_flash_data, 136 - }, 137 - .num_resources = 1, 138 - .resource = &ts72xx_flash_resource, 185 + static struct platform_device ts72xx_nor_flash = { 186 + .name = "physmap-flash", 187 + .id = 0, 188 + .dev.platform_data = &ts72xx_nor_data, 189 + .resource = &ts72xx_nor_resource, 190 + .num_resources = 1, 139 191 }; 140 192 141 193 static void __init ts72xx_register_flash(void) 142 194 { 143 - if (board_is_ts7200()) 144 - platform_device_register(&ts72xx_flash); 195 + if (board_is_ts7200()) { 196 + platform_device_register(&ts72xx_nor_flash); 197 + } else { 198 + resource_size_t start; 199 + 200 + if (is_ts9420_installed()) 201 + start = EP93XX_CS7_PHYS_BASE; 202 + else 203 + start = EP93XX_CS6_PHYS_BASE; 204 + 205 + ts72xx_nand_resource[0].start = start; 206 + ts72xx_nand_resource[0].end = start + SZ_16M - 1; 207 + 208 + platform_device_register(&ts72xx_nand_flash); 209 + } 145 210 } 211 + 146 212 147 213 static unsigned char ts72xx_rtc_readbyte(unsigned long addr) 148 214 {
+9
arch/arm/mach-kirkwood/common.c
··· 305 305 platform_device_register(&kirkwood_nand_flash); 306 306 } 307 307 308 + void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, 309 + int (*dev_ready)(struct mtd_info *)) 310 + { 311 + kirkwood_clk_ctrl |= CGC_RUNIT; 312 + kirkwood_nand_data.parts = parts; 313 + kirkwood_nand_data.nr_parts = nr_parts; 314 + kirkwood_nand_data.dev_ready = dev_ready; 315 + platform_device_register(&kirkwood_nand_flash); 316 + } 308 317 309 318 /***************************************************************************** 310 319 * SoC RTC
+2
arch/arm/mach-kirkwood/common.h
··· 16 16 struct mv_sata_platform_data; 17 17 struct mvsdio_platform_data; 18 18 struct mtd_partition; 19 + struct mtd_info; 19 20 20 21 /* 21 22 * Basic Kirkwood init functions used early by machine-setup. ··· 42 41 void kirkwood_uart0_init(void); 43 42 void kirkwood_uart1_init(void); 44 43 void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay); 44 + void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *)); 45 45 46 46 extern int kirkwood_tclk; 47 47 extern struct sys_timer kirkwood_timer;
+1
arch/arm/plat-orion/include/plat/orion_nand.h
··· 14 14 */ 15 15 struct orion_nand_data { 16 16 struct mtd_partition *parts; 17 + int (*dev_ready)(struct mtd_info *mtd); 17 18 u32 nr_parts; 18 19 u8 ale; /* address line number connected to ALE */ 19 20 u8 cle; /* address line number connected to CLE */
+13
drivers/mtd/Kconfig
··· 304 304 This enables read only access to SmartMedia formatted NAND 305 305 flash. You can mount it with FAT file system. 306 306 307 + 308 + config SM_FTL 309 + tristate "SmartMedia/xD new translation layer" 310 + depends on EXPERIMENTAL && BLOCK 311 + select MTD_BLKDEVS 312 + select MTD_NAND_ECC 313 + help 314 + This enables new and very EXPERMENTAL support for SmartMedia/xD 315 + FTL (Flash translation layer). 316 + Write support isn't yet well tested, therefore this code IS likely to 317 + eat your card, so please don't use it together with valuable data. 318 + Use readonly driver (CONFIG_SSFDC) instead. 319 + 307 320 config MTD_OOPS 308 321 tristate "Log panic/oops to an MTD buffer" 309 322 depends on MTD
+1
drivers/mtd/Makefile
··· 24 24 obj-$(CONFIG_INFTL) += inftl.o 25 25 obj-$(CONFIG_RFD_FTL) += rfd_ftl.o 26 26 obj-$(CONFIG_SSFDC) += ssfdc.o 27 + obj-$(CONFIG_SM_FTL) += sm_ftl.o 27 28 obj-$(CONFIG_MTD_OOPS) += mtdoops.o 28 29 29 30 nftl-objs := nftlcore.o nftlmount.o
+67 -70
drivers/mtd/chips/cfi_cmdset_0001.c
··· 615 615 return mtd; 616 616 617 617 setup_err: 618 - if(mtd) { 619 - kfree(mtd->eraseregions); 620 - kfree(mtd); 621 - } 618 + kfree(mtd->eraseregions); 619 + kfree(mtd); 622 620 kfree(cfi->cmdset_priv); 623 621 return NULL; 624 622 } ··· 725 727 /* those should be reset too since 726 728 they create memory references. */ 727 729 init_waitqueue_head(&chip->wq); 728 - spin_lock_init(&chip->_spinlock); 729 - chip->mutex = &chip->_spinlock; 730 + mutex_init(&chip->mutex); 730 731 chip++; 731 732 } 732 733 } ··· 771 774 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) 772 775 break; 773 776 774 - spin_unlock(chip->mutex); 777 + mutex_unlock(&chip->mutex); 775 778 cfi_udelay(1); 776 - spin_lock(chip->mutex); 779 + mutex_lock(&chip->mutex); 777 780 /* Someone else might have been playing with it. */ 778 781 return -EAGAIN; 779 782 } ··· 820 823 return -EIO; 821 824 } 822 825 823 - spin_unlock(chip->mutex); 826 + mutex_unlock(&chip->mutex); 824 827 cfi_udelay(1); 825 - spin_lock(chip->mutex); 828 + mutex_lock(&chip->mutex); 826 829 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 827 830 So we can just loop here. */ 828 831 } ··· 849 852 sleep: 850 853 set_current_state(TASK_UNINTERRUPTIBLE); 851 854 add_wait_queue(&chip->wq, &wait); 852 - spin_unlock(chip->mutex); 855 + mutex_unlock(&chip->mutex); 853 856 schedule(); 854 857 remove_wait_queue(&chip->wq, &wait); 855 - spin_lock(chip->mutex); 858 + mutex_lock(&chip->mutex); 856 859 return -EAGAIN; 857 860 } 858 861 } ··· 898 901 * it'll happily send us to sleep. In any case, when 899 902 * get_chip returns success we're clear to go ahead. 900 903 */ 901 - ret = spin_trylock(contender->mutex); 904 + ret = mutex_trylock(&contender->mutex); 902 905 spin_unlock(&shared->lock); 903 906 if (!ret) 904 907 goto retry; 905 - spin_unlock(chip->mutex); 908 + mutex_unlock(&chip->mutex); 906 909 ret = chip_ready(map, contender, contender->start, mode); 907 - spin_lock(chip->mutex); 910 + mutex_lock(&chip->mutex); 908 911 909 912 if (ret == -EAGAIN) { 910 - spin_unlock(contender->mutex); 913 + mutex_unlock(&contender->mutex); 911 914 goto retry; 912 915 } 913 916 if (ret) { 914 - spin_unlock(contender->mutex); 917 + mutex_unlock(&contender->mutex); 915 918 return ret; 916 919 } 917 920 spin_lock(&shared->lock); ··· 920 923 * in FL_SYNCING state. Put contender and retry. */ 921 924 if (chip->state == FL_SYNCING) { 922 925 put_chip(map, contender, contender->start); 923 - spin_unlock(contender->mutex); 926 + mutex_unlock(&contender->mutex); 924 927 goto retry; 925 928 } 926 - spin_unlock(contender->mutex); 929 + mutex_unlock(&contender->mutex); 927 930 } 928 931 929 932 /* Check if we already have suspended erase ··· 933 936 spin_unlock(&shared->lock); 934 937 set_current_state(TASK_UNINTERRUPTIBLE); 935 938 add_wait_queue(&chip->wq, &wait); 936 - spin_unlock(chip->mutex); 939 + mutex_unlock(&chip->mutex); 937 940 schedule(); 938 941 remove_wait_queue(&chip->wq, &wait); 939 - spin_lock(chip->mutex); 942 + mutex_lock(&chip->mutex); 940 943 goto retry; 941 944 } 942 945 ··· 966 969 if (shared->writing && shared->writing != chip) { 967 970 /* give back ownership to who we loaned it from */ 968 971 struct flchip *loaner = shared->writing; 969 - spin_lock(loaner->mutex); 972 + mutex_lock(&loaner->mutex); 970 973 spin_unlock(&shared->lock); 971 - spin_unlock(chip->mutex); 974 + mutex_unlock(&chip->mutex); 972 975 put_chip(map, loaner, loaner->start); 973 - spin_lock(chip->mutex); 974 - spin_unlock(loaner->mutex); 976 + mutex_lock(&chip->mutex); 977 + mutex_unlock(&loaner->mutex); 975 978 wake_up(&chip->wq); 976 979 return; 977 980 } ··· 1141 1144 (void) map_read(map, adr); 1142 1145 xip_iprefetch(); 1143 1146 local_irq_enable(); 1144 - spin_unlock(chip->mutex); 1147 + mutex_unlock(&chip->mutex); 1145 1148 xip_iprefetch(); 1146 1149 cond_resched(); 1147 1150 ··· 1151 1154 * a suspended erase state. If so let's wait 1152 1155 * until it's done. 1153 1156 */ 1154 - spin_lock(chip->mutex); 1157 + mutex_lock(&chip->mutex); 1155 1158 while (chip->state != newstate) { 1156 1159 DECLARE_WAITQUEUE(wait, current); 1157 1160 set_current_state(TASK_UNINTERRUPTIBLE); 1158 1161 add_wait_queue(&chip->wq, &wait); 1159 - spin_unlock(chip->mutex); 1162 + mutex_unlock(&chip->mutex); 1160 1163 schedule(); 1161 1164 remove_wait_queue(&chip->wq, &wait); 1162 - spin_lock(chip->mutex); 1165 + mutex_lock(&chip->mutex); 1163 1166 } 1164 1167 /* Disallow XIP again */ 1165 1168 local_irq_disable(); ··· 1215 1218 int chip_state = chip->state; 1216 1219 unsigned int timeo, sleep_time, reset_timeo; 1217 1220 1218 - spin_unlock(chip->mutex); 1221 + mutex_unlock(&chip->mutex); 1219 1222 if (inval_len) 1220 1223 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); 1221 - spin_lock(chip->mutex); 1224 + mutex_lock(&chip->mutex); 1222 1225 1223 1226 timeo = chip_op_time_max; 1224 1227 if (!timeo) ··· 1238 1241 } 1239 1242 1240 1243 /* OK Still waiting. Drop the lock, wait a while and retry. */ 1241 - spin_unlock(chip->mutex); 1244 + mutex_unlock(&chip->mutex); 1242 1245 if (sleep_time >= 1000000/HZ) { 1243 1246 /* 1244 1247 * Half of the normal delay still remaining ··· 1253 1256 cond_resched(); 1254 1257 timeo--; 1255 1258 } 1256 - spin_lock(chip->mutex); 1259 + mutex_lock(&chip->mutex); 1257 1260 1258 1261 while (chip->state != chip_state) { 1259 1262 /* Someone's suspended the operation: sleep */ 1260 1263 DECLARE_WAITQUEUE(wait, current); 1261 1264 set_current_state(TASK_UNINTERRUPTIBLE); 1262 1265 add_wait_queue(&chip->wq, &wait); 1263 - spin_unlock(chip->mutex); 1266 + mutex_unlock(&chip->mutex); 1264 1267 schedule(); 1265 1268 remove_wait_queue(&chip->wq, &wait); 1266 - spin_lock(chip->mutex); 1269 + mutex_lock(&chip->mutex); 1267 1270 } 1268 1271 if (chip->erase_suspended && chip_state == FL_ERASING) { 1269 1272 /* Erase suspend occured while sleep: reset timeout */ ··· 1299 1302 /* Ensure cmd read/writes are aligned. */ 1300 1303 cmd_addr = adr & ~(map_bankwidth(map)-1); 1301 1304 1302 - spin_lock(chip->mutex); 1305 + mutex_lock(&chip->mutex); 1303 1306 1304 1307 ret = get_chip(map, chip, cmd_addr, FL_POINT); 1305 1308 ··· 1310 1313 chip->state = FL_POINT; 1311 1314 chip->ref_point_counter++; 1312 1315 } 1313 - spin_unlock(chip->mutex); 1316 + mutex_unlock(&chip->mutex); 1314 1317 1315 1318 return ret; 1316 1319 } ··· 1395 1398 else 1396 1399 thislen = len; 1397 1400 1398 - spin_lock(chip->mutex); 1401 + mutex_lock(&chip->mutex); 1399 1402 if (chip->state == FL_POINT) { 1400 1403 chip->ref_point_counter--; 1401 1404 if(chip->ref_point_counter == 0) ··· 1404 1407 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ 1405 1408 1406 1409 put_chip(map, chip, chip->start); 1407 - spin_unlock(chip->mutex); 1410 + mutex_unlock(&chip->mutex); 1408 1411 1409 1412 len -= thislen; 1410 1413 ofs = 0; ··· 1423 1426 /* Ensure cmd read/writes are aligned. */ 1424 1427 cmd_addr = adr & ~(map_bankwidth(map)-1); 1425 1428 1426 - spin_lock(chip->mutex); 1429 + mutex_lock(&chip->mutex); 1427 1430 ret = get_chip(map, chip, cmd_addr, FL_READY); 1428 1431 if (ret) { 1429 - spin_unlock(chip->mutex); 1432 + mutex_unlock(&chip->mutex); 1430 1433 return ret; 1431 1434 } 1432 1435 ··· 1440 1443 1441 1444 put_chip(map, chip, cmd_addr); 1442 1445 1443 - spin_unlock(chip->mutex); 1446 + mutex_unlock(&chip->mutex); 1444 1447 return 0; 1445 1448 } 1446 1449 ··· 1503 1506 return -EINVAL; 1504 1507 } 1505 1508 1506 - spin_lock(chip->mutex); 1509 + mutex_lock(&chip->mutex); 1507 1510 ret = get_chip(map, chip, adr, mode); 1508 1511 if (ret) { 1509 - spin_unlock(chip->mutex); 1512 + mutex_unlock(&chip->mutex); 1510 1513 return ret; 1511 1514 } 1512 1515 ··· 1552 1555 1553 1556 xip_enable(map, chip, adr); 1554 1557 out: put_chip(map, chip, adr); 1555 - spin_unlock(chip->mutex); 1558 + mutex_unlock(&chip->mutex); 1556 1559 return ret; 1557 1560 } 1558 1561 ··· 1661 1664 /* Let's determine this according to the interleave only once */ 1662 1665 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1663 1666 1664 - spin_lock(chip->mutex); 1667 + mutex_lock(&chip->mutex); 1665 1668 ret = get_chip(map, chip, cmd_adr, FL_WRITING); 1666 1669 if (ret) { 1667 - spin_unlock(chip->mutex); 1670 + mutex_unlock(&chip->mutex); 1668 1671 return ret; 1669 1672 } 1670 1673 ··· 1795 1798 1796 1799 xip_enable(map, chip, cmd_adr); 1797 1800 out: put_chip(map, chip, cmd_adr); 1798 - spin_unlock(chip->mutex); 1801 + mutex_unlock(&chip->mutex); 1799 1802 return ret; 1800 1803 } 1801 1804 ··· 1874 1877 adr += chip->start; 1875 1878 1876 1879 retry: 1877 - spin_lock(chip->mutex); 1880 + mutex_lock(&chip->mutex); 1878 1881 ret = get_chip(map, chip, adr, FL_ERASING); 1879 1882 if (ret) { 1880 - spin_unlock(chip->mutex); 1883 + mutex_unlock(&chip->mutex); 1881 1884 return ret; 1882 1885 } 1883 1886 ··· 1933 1936 } else if (chipstatus & 0x20 && retries--) { 1934 1937 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1935 1938 put_chip(map, chip, adr); 1936 - spin_unlock(chip->mutex); 1939 + mutex_unlock(&chip->mutex); 1937 1940 goto retry; 1938 1941 } else { 1939 1942 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); ··· 1945 1948 1946 1949 xip_enable(map, chip, adr); 1947 1950 out: put_chip(map, chip, adr); 1948 - spin_unlock(chip->mutex); 1951 + mutex_unlock(&chip->mutex); 1949 1952 return ret; 1950 1953 } 1951 1954 ··· 1978 1981 for (i=0; !ret && i<cfi->numchips; i++) { 1979 1982 chip = &cfi->chips[i]; 1980 1983 1981 - spin_lock(chip->mutex); 1984 + mutex_lock(&chip->mutex); 1982 1985 ret = get_chip(map, chip, chip->start, FL_SYNCING); 1983 1986 1984 1987 if (!ret) { ··· 1989 1992 * with the chip now anyway. 1990 1993 */ 1991 1994 } 1992 - spin_unlock(chip->mutex); 1995 + mutex_unlock(&chip->mutex); 1993 1996 } 1994 1997 1995 1998 /* Unlock the chips again */ ··· 1997 2000 for (i--; i >=0; i--) { 1998 2001 chip = &cfi->chips[i]; 1999 2002 2000 - spin_lock(chip->mutex); 2003 + mutex_lock(&chip->mutex); 2001 2004 2002 2005 if (chip->state == FL_SYNCING) { 2003 2006 chip->state = chip->oldstate; 2004 2007 chip->oldstate = FL_READY; 2005 2008 wake_up(&chip->wq); 2006 2009 } 2007 - spin_unlock(chip->mutex); 2010 + mutex_unlock(&chip->mutex); 2008 2011 } 2009 2012 } 2010 2013 ··· 2050 2053 2051 2054 adr += chip->start; 2052 2055 2053 - spin_lock(chip->mutex); 2056 + mutex_lock(&chip->mutex); 2054 2057 ret = get_chip(map, chip, adr, FL_LOCKING); 2055 2058 if (ret) { 2056 - spin_unlock(chip->mutex); 2059 + mutex_unlock(&chip->mutex); 2057 2060 return ret; 2058 2061 } 2059 2062 ··· 2087 2090 2088 2091 xip_enable(map, chip, adr); 2089 2092 out: put_chip(map, chip, adr); 2090 - spin_unlock(chip->mutex); 2093 + mutex_unlock(&chip->mutex); 2091 2094 return ret; 2092 2095 } 2093 2096 ··· 2152 2155 struct cfi_private *cfi = map->fldrv_priv; 2153 2156 int ret; 2154 2157 2155 - spin_lock(chip->mutex); 2158 + mutex_lock(&chip->mutex); 2156 2159 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); 2157 2160 if (ret) { 2158 - spin_unlock(chip->mutex); 2161 + mutex_unlock(&chip->mutex); 2159 2162 return ret; 2160 2163 } 2161 2164 ··· 2174 2177 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); 2175 2178 2176 2179 put_chip(map, chip, chip->start); 2177 - spin_unlock(chip->mutex); 2180 + mutex_unlock(&chip->mutex); 2178 2181 return 0; 2179 2182 } 2180 2183 ··· 2449 2452 for (i=0; !ret && i<cfi->numchips; i++) { 2450 2453 chip = &cfi->chips[i]; 2451 2454 2452 - spin_lock(chip->mutex); 2455 + mutex_lock(&chip->mutex); 2453 2456 2454 2457 switch (chip->state) { 2455 2458 case FL_READY: ··· 2481 2484 case FL_PM_SUSPENDED: 2482 2485 break; 2483 2486 } 2484 - spin_unlock(chip->mutex); 2487 + mutex_unlock(&chip->mutex); 2485 2488 } 2486 2489 2487 2490 /* Unlock the chips again */ ··· 2490 2493 for (i--; i >=0; i--) { 2491 2494 chip = &cfi->chips[i]; 2492 2495 2493 - spin_lock(chip->mutex); 2496 + mutex_lock(&chip->mutex); 2494 2497 2495 2498 if (chip->state == FL_PM_SUSPENDED) { 2496 2499 /* No need to force it into a known state here, ··· 2500 2503 chip->oldstate = FL_READY; 2501 2504 wake_up(&chip->wq); 2502 2505 } 2503 - spin_unlock(chip->mutex); 2506 + mutex_unlock(&chip->mutex); 2504 2507 } 2505 2508 } 2506 2509 ··· 2541 2544 2542 2545 chip = &cfi->chips[i]; 2543 2546 2544 - spin_lock(chip->mutex); 2547 + mutex_lock(&chip->mutex); 2545 2548 2546 2549 /* Go to known state. Chip may have been power cycled */ 2547 2550 if (chip->state == FL_PM_SUSPENDED) { ··· 2550 2553 wake_up(&chip->wq); 2551 2554 } 2552 2555 2553 - spin_unlock(chip->mutex); 2556 + mutex_unlock(&chip->mutex); 2554 2557 } 2555 2558 2556 2559 if ((mtd->flags & MTD_POWERUP_LOCK) ··· 2570 2573 /* force the completion of any ongoing operation 2571 2574 and switch to array mode so any bootloader in 2572 2575 flash is accessible for soft reboot. */ 2573 - spin_lock(chip->mutex); 2576 + mutex_lock(&chip->mutex); 2574 2577 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2575 2578 if (!ret) { 2576 2579 map_write(map, CMD(0xff), chip->start); 2577 2580 chip->state = FL_SHUTDOWN; 2578 2581 put_chip(map, chip, chip->start); 2579 2582 } 2580 - spin_unlock(chip->mutex); 2583 + mutex_unlock(&chip->mutex); 2581 2584 } 2582 2585 2583 2586 return 0;
+228 -120
drivers/mtd/chips/cfi_cmdset_0002.c
··· 32 32 #include <linux/slab.h> 33 33 #include <linux/delay.h> 34 34 #include <linux/interrupt.h> 35 + #include <linux/reboot.h> 35 36 #include <linux/mtd/compatmac.h> 36 37 #include <linux/mtd/map.h> 37 38 #include <linux/mtd/mtd.h> ··· 44 43 45 44 #define MAX_WORD_RETRIES 3 46 45 47 - #define MANUFACTURER_AMD 0x0001 48 - #define MANUFACTURER_ATMEL 0x001F 49 - #define MANUFACTURER_MACRONIX 0x00C2 50 - #define MANUFACTURER_SST 0x00BF 51 46 #define SST49LF004B 0x0060 52 47 #define SST49LF040B 0x0050 53 48 #define SST49LF008A 0x005a ··· 57 60 static void cfi_amdstd_sync (struct mtd_info *); 58 61 static int cfi_amdstd_suspend (struct mtd_info *); 59 62 static void cfi_amdstd_resume (struct mtd_info *); 63 + static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 64 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 65 62 66 static void cfi_amdstd_destroy(struct mtd_info *); ··· 166 168 * This reduces the risk of false detection due to 167 169 * the 8-bit device ID. 168 170 */ 169 - (cfi->mfr == MANUFACTURER_MACRONIX)) { 171 + (cfi->mfr == CFI_MFR_MACRONIX)) { 170 172 DEBUG(MTD_DEBUG_LEVEL1, 171 173 "%s: Macronix MX29LV400C with bottom boot block" 172 174 " detected\n", map->name); ··· 258 260 mtd->flags |= MTD_POWERUP_LOCK; 259 261 } 260 262 263 + static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 264 + { 265 + struct map_info *map = mtd->priv; 266 + struct cfi_private *cfi = map->fldrv_priv; 267 + 268 + /* 269 + * These flashes report two seperate eraseblock regions based on the 270 + * sector_erase-size and block_erase-size, although they both operate on the 271 + * same memory. This is not allowed according to CFI, so we just pick the 272 + * sector_erase-size. 273 + */ 274 + cfi->cfiq->NumEraseRegions = 1; 275 + } 276 + 277 + static void fixup_sst39vf(struct mtd_info *mtd, void *param) 278 + { 279 + struct map_info *map = mtd->priv; 280 + struct cfi_private *cfi = map->fldrv_priv; 281 + 282 + fixup_old_sst_eraseregion(mtd); 283 + 284 + cfi->addr_unlock1 = 0x5555; 285 + cfi->addr_unlock2 = 0x2AAA; 286 + } 287 + 288 + static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param) 289 + { 290 + struct map_info *map = mtd->priv; 291 + struct cfi_private *cfi = map->fldrv_priv; 292 + 293 + fixup_old_sst_eraseregion(mtd); 294 + 295 + cfi->addr_unlock1 = 0x555; 296 + cfi->addr_unlock2 = 0x2AA; 297 + } 298 + 261 299 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param) 262 300 { 263 301 struct map_info *map = mtd->priv; ··· 316 282 } 317 283 } 318 284 285 + /* Used to fix CFI-Tables of chips without Extended Query Tables */ 286 + static struct cfi_fixup cfi_nopri_fixup_table[] = { 287 + { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602 288 + { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601 289 + { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202 290 + { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201 291 + { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B 292 + { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B 293 + { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B 294 + { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B 295 + { 0, 0, NULL, NULL } 296 + }; 297 + 319 298 static struct cfi_fixup cfi_fixup_table[] = { 320 299 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 321 300 #ifdef AMD_BOOTLOC_BUG 322 301 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 323 - { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 302 + { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 324 303 #endif 325 304 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, 326 305 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, ··· 351 304 { 0, 0, NULL, NULL } 352 305 }; 353 306 static struct cfi_fixup jedec_fixup_table[] = { 354 - { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 355 - { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 356 - { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 307 + { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, 308 + { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, }, 309 + { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, 357 310 { 0, 0, NULL, NULL } 358 311 }; 359 312 ··· 402 355 mtd->name = map->name; 403 356 mtd->writesize = 1; 404 357 358 + mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 359 + 405 360 if (cfi->cfi_mode==CFI_MODE_CFI){ 406 361 unsigned char bootloc; 407 - /* 408 - * It's a real CFI chip, not one for which the probe 409 - * routine faked a CFI structure. So we read the feature 410 - * table from it. 411 - */ 412 362 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 413 363 struct cfi_pri_amdstd *extp; 414 364 415 365 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 416 - if (!extp) { 417 - kfree(mtd); 418 - return NULL; 419 - } 366 + if (extp) { 367 + /* 368 + * It's a real CFI chip, not one for which the probe 369 + * routine faked a CFI structure. 370 + */ 371 + cfi_fixup_major_minor(cfi, extp); 420 372 421 - cfi_fixup_major_minor(cfi, extp); 373 + if (extp->MajorVersion != '1' || 374 + (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 375 + printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 376 + "version %c.%c.\n", extp->MajorVersion, 377 + extp->MinorVersion); 378 + kfree(extp); 379 + kfree(mtd); 380 + return NULL; 381 + } 422 382 423 - if (extp->MajorVersion != '1' || 424 - (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 425 - printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 426 - "version %c.%c.\n", extp->MajorVersion, 427 - extp->MinorVersion); 428 - kfree(extp); 429 - kfree(mtd); 430 - return NULL; 431 - } 383 + /* Install our own private info structure */ 384 + cfi->cmdset_priv = extp; 432 385 433 - /* Install our own private info structure */ 434 - cfi->cmdset_priv = extp; 435 - 436 - /* Apply cfi device specific fixups */ 437 - cfi_fixup(mtd, cfi_fixup_table); 386 + /* Apply cfi device specific fixups */ 387 + cfi_fixup(mtd, cfi_fixup_table); 438 388 439 389 #ifdef DEBUG_CFI_FEATURES 440 - /* Tell the user about it in lots of lovely detail */ 441 - cfi_tell_features(extp); 390 + /* Tell the user about it in lots of lovely detail */ 391 + cfi_tell_features(extp); 442 392 #endif 443 393 444 - bootloc = extp->TopBottom; 445 - if ((bootloc != 2) && (bootloc != 3)) { 446 - printk(KERN_WARNING "%s: CFI does not contain boot " 447 - "bank location. Assuming top.\n", map->name); 448 - bootloc = 2; 449 - } 450 - 451 - if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 452 - printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); 453 - 454 - for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 455 - int j = (cfi->cfiq->NumEraseRegions-1)-i; 456 - __u32 swap; 457 - 458 - swap = cfi->cfiq->EraseRegionInfo[i]; 459 - cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 460 - cfi->cfiq->EraseRegionInfo[j] = swap; 394 + bootloc = extp->TopBottom; 395 + if ((bootloc < 2) || (bootloc > 5)) { 396 + printk(KERN_WARNING "%s: CFI contains unrecognised boot " 397 + "bank location (%d). Assuming bottom.\n", 398 + map->name, bootloc); 399 + bootloc = 2; 461 400 } 401 + 402 + if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 403 + printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 404 + 405 + for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 406 + int j = (cfi->cfiq->NumEraseRegions-1)-i; 407 + __u32 swap; 408 + 409 + swap = cfi->cfiq->EraseRegionInfo[i]; 410 + cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 411 + cfi->cfiq->EraseRegionInfo[j] = swap; 412 + } 413 + } 414 + /* Set the default CFI lock/unlock addresses */ 415 + cfi->addr_unlock1 = 0x555; 416 + cfi->addr_unlock2 = 0x2aa; 462 417 } 463 - /* Set the default CFI lock/unlock addresses */ 464 - cfi->addr_unlock1 = 0x555; 465 - cfi->addr_unlock2 = 0x2aa; 418 + cfi_fixup(mtd, cfi_nopri_fixup_table); 419 + 420 + if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 421 + kfree(mtd); 422 + return NULL; 423 + } 466 424 467 425 } /* CFI mode */ 468 426 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { ··· 489 437 490 438 return cfi_amdstd_setup(mtd); 491 439 } 440 + struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 441 + struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 492 442 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 443 + EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 444 + EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 493 445 494 446 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 495 447 { ··· 547 491 #endif 548 492 549 493 __module_get(THIS_MODULE); 494 + register_reboot_notifier(&mtd->reboot_notifier); 550 495 return mtd; 551 496 552 497 setup_err: 553 - if(mtd) { 554 - kfree(mtd->eraseregions); 555 - kfree(mtd); 556 - } 498 + kfree(mtd->eraseregions); 499 + kfree(mtd); 557 500 kfree(cfi->cmdset_priv); 558 501 kfree(cfi->cfiq); 559 502 return NULL; ··· 626 571 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 627 572 return -EIO; 628 573 } 629 - spin_unlock(chip->mutex); 574 + mutex_unlock(&chip->mutex); 630 575 cfi_udelay(1); 631 - spin_lock(chip->mutex); 576 + mutex_lock(&chip->mutex); 632 577 /* Someone else might have been playing with it. */ 633 578 goto retry; 634 579 } ··· 672 617 return -EIO; 673 618 } 674 619 675 - spin_unlock(chip->mutex); 620 + mutex_unlock(&chip->mutex); 676 621 cfi_udelay(1); 677 - spin_lock(chip->mutex); 622 + mutex_lock(&chip->mutex); 678 623 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 679 624 So we can just loop here. */ 680 625 } ··· 689 634 chip->state = FL_READY; 690 635 return 0; 691 636 637 + case FL_SHUTDOWN: 638 + /* The machine is rebooting */ 639 + return -EIO; 640 + 692 641 case FL_POINT: 693 642 /* Only if there's no operation suspended... */ 694 643 if (mode == FL_READY && chip->oldstate == FL_READY) ··· 702 643 sleep: 703 644 set_current_state(TASK_UNINTERRUPTIBLE); 704 645 add_wait_queue(&chip->wq, &wait); 705 - spin_unlock(chip->mutex); 646 + mutex_unlock(&chip->mutex); 706 647 schedule(); 707 648 remove_wait_queue(&chip->wq, &wait); 708 - spin_lock(chip->mutex); 649 + mutex_lock(&chip->mutex); 709 650 goto resettime; 710 651 } 711 652 } ··· 837 778 (void) map_read(map, adr); 838 779 xip_iprefetch(); 839 780 local_irq_enable(); 840 - spin_unlock(chip->mutex); 781 + mutex_unlock(&chip->mutex); 841 782 xip_iprefetch(); 842 783 cond_resched(); 843 784 ··· 847 788 * a suspended erase state. If so let's wait 848 789 * until it's done. 849 790 */ 850 - spin_lock(chip->mutex); 791 + mutex_lock(&chip->mutex); 851 792 while (chip->state != FL_XIP_WHILE_ERASING) { 852 793 DECLARE_WAITQUEUE(wait, current); 853 794 set_current_state(TASK_UNINTERRUPTIBLE); 854 795 add_wait_queue(&chip->wq, &wait); 855 - spin_unlock(chip->mutex); 796 + mutex_unlock(&chip->mutex); 856 797 schedule(); 857 798 remove_wait_queue(&chip->wq, &wait); 858 - spin_lock(chip->mutex); 799 + mutex_lock(&chip->mutex); 859 800 } 860 801 /* Disallow XIP again */ 861 802 local_irq_disable(); ··· 917 858 918 859 #define UDELAY(map, chip, adr, usec) \ 919 860 do { \ 920 - spin_unlock(chip->mutex); \ 861 + mutex_unlock(&chip->mutex); \ 921 862 cfi_udelay(usec); \ 922 - spin_lock(chip->mutex); \ 863 + mutex_lock(&chip->mutex); \ 923 864 } while (0) 924 865 925 866 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 926 867 do { \ 927 - spin_unlock(chip->mutex); \ 868 + mutex_unlock(&chip->mutex); \ 928 869 INVALIDATE_CACHED_RANGE(map, adr, len); \ 929 870 cfi_udelay(usec); \ 930 - spin_lock(chip->mutex); \ 871 + mutex_lock(&chip->mutex); \ 931 872 } while (0) 932 873 933 874 #endif ··· 943 884 /* Ensure cmd read/writes are aligned. */ 944 885 cmd_addr = adr & ~(map_bankwidth(map)-1); 945 886 946 - spin_lock(chip->mutex); 887 + mutex_lock(&chip->mutex); 947 888 ret = get_chip(map, chip, cmd_addr, FL_READY); 948 889 if (ret) { 949 - spin_unlock(chip->mutex); 890 + mutex_unlock(&chip->mutex); 950 891 return ret; 951 892 } 952 893 ··· 959 900 960 901 put_chip(map, chip, cmd_addr); 961 902 962 - spin_unlock(chip->mutex); 903 + mutex_unlock(&chip->mutex); 963 904 return 0; 964 905 } 965 906 ··· 1013 954 struct cfi_private *cfi = map->fldrv_priv; 1014 955 1015 956 retry: 1016 - spin_lock(chip->mutex); 957 + mutex_lock(&chip->mutex); 1017 958 1018 959 if (chip->state != FL_READY){ 1019 960 #if 0 ··· 1022 963 set_current_state(TASK_UNINTERRUPTIBLE); 1023 964 add_wait_queue(&chip->wq, &wait); 1024 965 1025 - spin_unlock(chip->mutex); 966 + mutex_unlock(&chip->mutex); 1026 967 1027 968 schedule(); 1028 969 remove_wait_queue(&chip->wq, &wait); ··· 1051 992 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1052 993 1053 994 wake_up(&chip->wq); 1054 - spin_unlock(chip->mutex); 995 + mutex_unlock(&chip->mutex); 1055 996 1056 997 return 0; 1057 998 } ··· 1120 1061 1121 1062 adr += chip->start; 1122 1063 1123 - spin_lock(chip->mutex); 1064 + mutex_lock(&chip->mutex); 1124 1065 ret = get_chip(map, chip, adr, FL_WRITING); 1125 1066 if (ret) { 1126 - spin_unlock(chip->mutex); 1067 + mutex_unlock(&chip->mutex); 1127 1068 return ret; 1128 1069 } 1129 1070 ··· 1166 1107 1167 1108 set_current_state(TASK_UNINTERRUPTIBLE); 1168 1109 add_wait_queue(&chip->wq, &wait); 1169 - spin_unlock(chip->mutex); 1110 + mutex_unlock(&chip->mutex); 1170 1111 schedule(); 1171 1112 remove_wait_queue(&chip->wq, &wait); 1172 1113 timeo = jiffies + (HZ / 2); /* FIXME */ 1173 - spin_lock(chip->mutex); 1114 + mutex_lock(&chip->mutex); 1174 1115 continue; 1175 1116 } 1176 1117 ··· 1202 1143 op_done: 1203 1144 chip->state = FL_READY; 1204 1145 put_chip(map, chip, adr); 1205 - spin_unlock(chip->mutex); 1146 + mutex_unlock(&chip->mutex); 1206 1147 1207 1148 return ret; 1208 1149 } ··· 1234 1175 map_word tmp_buf; 1235 1176 1236 1177 retry: 1237 - spin_lock(cfi->chips[chipnum].mutex); 1178 + mutex_lock(&cfi->chips[chipnum].mutex); 1238 1179 1239 1180 if (cfi->chips[chipnum].state != FL_READY) { 1240 1181 #if 0 ··· 1243 1184 set_current_state(TASK_UNINTERRUPTIBLE); 1244 1185 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1245 1186 1246 - spin_unlock(cfi->chips[chipnum].mutex); 1187 + mutex_unlock(&cfi->chips[chipnum].mutex); 1247 1188 1248 1189 schedule(); 1249 1190 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); ··· 1257 1198 /* Load 'tmp_buf' with old contents of flash */ 1258 1199 tmp_buf = map_read(map, bus_ofs+chipstart); 1259 1200 1260 - spin_unlock(cfi->chips[chipnum].mutex); 1201 + mutex_unlock(&cfi->chips[chipnum].mutex); 1261 1202 1262 1203 /* Number of bytes to copy from buffer */ 1263 1204 n = min_t(int, len, map_bankwidth(map)-i); ··· 1312 1253 map_word tmp_buf; 1313 1254 1314 1255 retry1: 1315 - spin_lock(cfi->chips[chipnum].mutex); 1256 + mutex_lock(&cfi->chips[chipnum].mutex); 1316 1257 1317 1258 if (cfi->chips[chipnum].state != FL_READY) { 1318 1259 #if 0 ··· 1321 1262 set_current_state(TASK_UNINTERRUPTIBLE); 1322 1263 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1323 1264 1324 - spin_unlock(cfi->chips[chipnum].mutex); 1265 + mutex_unlock(&cfi->chips[chipnum].mutex); 1325 1266 1326 1267 schedule(); 1327 1268 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); ··· 1334 1275 1335 1276 tmp_buf = map_read(map, ofs + chipstart); 1336 1277 1337 - spin_unlock(cfi->chips[chipnum].mutex); 1278 + mutex_unlock(&cfi->chips[chipnum].mutex); 1338 1279 1339 1280 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1340 1281 ··· 1369 1310 adr += chip->start; 1370 1311 cmd_adr = adr; 1371 1312 1372 - spin_lock(chip->mutex); 1313 + mutex_lock(&chip->mutex); 1373 1314 ret = get_chip(map, chip, adr, FL_WRITING); 1374 1315 if (ret) { 1375 - spin_unlock(chip->mutex); 1316 + mutex_unlock(&chip->mutex); 1376 1317 return ret; 1377 1318 } 1378 1319 ··· 1427 1368 1428 1369 set_current_state(TASK_UNINTERRUPTIBLE); 1429 1370 add_wait_queue(&chip->wq, &wait); 1430 - spin_unlock(chip->mutex); 1371 + mutex_unlock(&chip->mutex); 1431 1372 schedule(); 1432 1373 remove_wait_queue(&chip->wq, &wait); 1433 1374 timeo = jiffies + (HZ / 2); /* FIXME */ 1434 - spin_lock(chip->mutex); 1375 + mutex_lock(&chip->mutex); 1435 1376 continue; 1436 1377 } 1437 1378 ··· 1459 1400 op_done: 1460 1401 chip->state = FL_READY; 1461 1402 put_chip(map, chip, adr); 1462 - spin_unlock(chip->mutex); 1403 + mutex_unlock(&chip->mutex); 1463 1404 1464 1405 return ret; 1465 1406 } ··· 1559 1500 1560 1501 adr = cfi->addr_unlock1; 1561 1502 1562 - spin_lock(chip->mutex); 1503 + mutex_lock(&chip->mutex); 1563 1504 ret = get_chip(map, chip, adr, FL_WRITING); 1564 1505 if (ret) { 1565 - spin_unlock(chip->mutex); 1506 + mutex_unlock(&chip->mutex); 1566 1507 return ret; 1567 1508 } 1568 1509 ··· 1595 1536 /* Someone's suspended the erase. Sleep */ 1596 1537 set_current_state(TASK_UNINTERRUPTIBLE); 1597 1538 add_wait_queue(&chip->wq, &wait); 1598 - spin_unlock(chip->mutex); 1539 + mutex_unlock(&chip->mutex); 1599 1540 schedule(); 1600 1541 remove_wait_queue(&chip->wq, &wait); 1601 - spin_lock(chip->mutex); 1542 + mutex_lock(&chip->mutex); 1602 1543 continue; 1603 1544 } 1604 1545 if (chip->erase_suspended) { ··· 1632 1573 chip->state = FL_READY; 1633 1574 xip_enable(map, chip, adr); 1634 1575 put_chip(map, chip, adr); 1635 - spin_unlock(chip->mutex); 1576 + mutex_unlock(&chip->mutex); 1636 1577 1637 1578 return ret; 1638 1579 } ··· 1647 1588 1648 1589 adr += chip->start; 1649 1590 1650 - spin_lock(chip->mutex); 1591 + mutex_lock(&chip->mutex); 1651 1592 ret = get_chip(map, chip, adr, FL_ERASING); 1652 1593 if (ret) { 1653 - spin_unlock(chip->mutex); 1594 + mutex_unlock(&chip->mutex); 1654 1595 return ret; 1655 1596 } 1656 1597 ··· 1683 1624 /* Someone's suspended the erase. Sleep */ 1684 1625 set_current_state(TASK_UNINTERRUPTIBLE); 1685 1626 add_wait_queue(&chip->wq, &wait); 1686 - spin_unlock(chip->mutex); 1627 + mutex_unlock(&chip->mutex); 1687 1628 schedule(); 1688 1629 remove_wait_queue(&chip->wq, &wait); 1689 - spin_lock(chip->mutex); 1630 + mutex_lock(&chip->mutex); 1690 1631 continue; 1691 1632 } 1692 1633 if (chip->erase_suspended) { ··· 1722 1663 1723 1664 chip->state = FL_READY; 1724 1665 put_chip(map, chip, adr); 1725 - spin_unlock(chip->mutex); 1666 + mutex_unlock(&chip->mutex); 1726 1667 return ret; 1727 1668 } 1728 1669 ··· 1774 1715 struct cfi_private *cfi = map->fldrv_priv; 1775 1716 int ret; 1776 1717 1777 - spin_lock(chip->mutex); 1718 + mutex_lock(&chip->mutex); 1778 1719 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 1779 1720 if (ret) 1780 1721 goto out_unlock; ··· 1800 1741 ret = 0; 1801 1742 1802 1743 out_unlock: 1803 - spin_unlock(chip->mutex); 1744 + mutex_unlock(&chip->mutex); 1804 1745 return ret; 1805 1746 } 1806 1747 ··· 1810 1751 struct cfi_private *cfi = map->fldrv_priv; 1811 1752 int ret; 1812 1753 1813 - spin_lock(chip->mutex); 1754 + mutex_lock(&chip->mutex); 1814 1755 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 1815 1756 if (ret) 1816 1757 goto out_unlock; ··· 1828 1769 ret = 0; 1829 1770 1830 1771 out_unlock: 1831 - spin_unlock(chip->mutex); 1772 + mutex_unlock(&chip->mutex); 1832 1773 return ret; 1833 1774 } 1834 1775 ··· 1856 1797 chip = &cfi->chips[i]; 1857 1798 1858 1799 retry: 1859 - spin_lock(chip->mutex); 1800 + mutex_lock(&chip->mutex); 1860 1801 1861 1802 switch(chip->state) { 1862 1803 case FL_READY: ··· 1870 1811 * with the chip now anyway. 1871 1812 */ 1872 1813 case FL_SYNCING: 1873 - spin_unlock(chip->mutex); 1814 + mutex_unlock(&chip->mutex); 1874 1815 break; 1875 1816 1876 1817 default: ··· 1878 1819 set_current_state(TASK_UNINTERRUPTIBLE); 1879 1820 add_wait_queue(&chip->wq, &wait); 1880 1821 1881 - spin_unlock(chip->mutex); 1822 + mutex_unlock(&chip->mutex); 1882 1823 1883 1824 schedule(); 1884 1825 ··· 1893 1834 for (i--; i >=0; i--) { 1894 1835 chip = &cfi->chips[i]; 1895 1836 1896 - spin_lock(chip->mutex); 1837 + mutex_lock(&chip->mutex); 1897 1838 1898 1839 if (chip->state == FL_SYNCING) { 1899 1840 chip->state = chip->oldstate; 1900 1841 wake_up(&chip->wq); 1901 1842 } 1902 - spin_unlock(chip->mutex); 1843 + mutex_unlock(&chip->mutex); 1903 1844 } 1904 1845 } 1905 1846 ··· 1915 1856 for (i=0; !ret && i<cfi->numchips; i++) { 1916 1857 chip = &cfi->chips[i]; 1917 1858 1918 - spin_lock(chip->mutex); 1859 + mutex_lock(&chip->mutex); 1919 1860 1920 1861 switch(chip->state) { 1921 1862 case FL_READY: ··· 1935 1876 ret = -EAGAIN; 1936 1877 break; 1937 1878 } 1938 - spin_unlock(chip->mutex); 1879 + mutex_unlock(&chip->mutex); 1939 1880 } 1940 1881 1941 1882 /* Unlock the chips again */ ··· 1944 1885 for (i--; i >=0; i--) { 1945 1886 chip = &cfi->chips[i]; 1946 1887 1947 - spin_lock(chip->mutex); 1888 + mutex_lock(&chip->mutex); 1948 1889 1949 1890 if (chip->state == FL_PM_SUSPENDED) { 1950 1891 chip->state = chip->oldstate; 1951 1892 wake_up(&chip->wq); 1952 1893 } 1953 - spin_unlock(chip->mutex); 1894 + mutex_unlock(&chip->mutex); 1954 1895 } 1955 1896 } 1956 1897 ··· 1969 1910 1970 1911 chip = &cfi->chips[i]; 1971 1912 1972 - spin_lock(chip->mutex); 1913 + mutex_lock(&chip->mutex); 1973 1914 1974 1915 if (chip->state == FL_PM_SUSPENDED) { 1975 1916 chip->state = FL_READY; ··· 1979 1920 else 1980 1921 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 1981 1922 1982 - spin_unlock(chip->mutex); 1923 + mutex_unlock(&chip->mutex); 1983 1924 } 1984 1925 } 1926 + 1927 + 1928 + /* 1929 + * Ensure that the flash device is put back into read array mode before 1930 + * unloading the driver or rebooting. On some systems, rebooting while 1931 + * the flash is in query/program/erase mode will prevent the CPU from 1932 + * fetching the bootloader code, requiring a hard reset or power cycle. 1933 + */ 1934 + static int cfi_amdstd_reset(struct mtd_info *mtd) 1935 + { 1936 + struct map_info *map = mtd->priv; 1937 + struct cfi_private *cfi = map->fldrv_priv; 1938 + int i, ret; 1939 + struct flchip *chip; 1940 + 1941 + for (i = 0; i < cfi->numchips; i++) { 1942 + 1943 + chip = &cfi->chips[i]; 1944 + 1945 + mutex_lock(&chip->mutex); 1946 + 1947 + ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 1948 + if (!ret) { 1949 + map_write(map, CMD(0xF0), chip->start); 1950 + chip->state = FL_SHUTDOWN; 1951 + put_chip(map, chip, chip->start); 1952 + } 1953 + 1954 + mutex_unlock(&chip->mutex); 1955 + } 1956 + 1957 + return 0; 1958 + } 1959 + 1960 + 1961 + static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 1962 + void *v) 1963 + { 1964 + struct mtd_info *mtd; 1965 + 1966 + mtd = container_of(nb, struct mtd_info, reboot_notifier); 1967 + cfi_amdstd_reset(mtd); 1968 + return NOTIFY_DONE; 1969 + } 1970 + 1985 1971 1986 1972 static void cfi_amdstd_destroy(struct mtd_info *mtd) 1987 1973 { 1988 1974 struct map_info *map = mtd->priv; 1989 1975 struct cfi_private *cfi = map->fldrv_priv; 1990 1976 1977 + cfi_amdstd_reset(mtd); 1978 + unregister_reboot_notifier(&mtd->reboot_notifier); 1991 1979 kfree(cfi->cmdset_priv); 1992 1980 kfree(cfi->cfiq); 1993 1981 kfree(cfi); ··· 2044 1938 MODULE_LICENSE("GPL"); 2045 1939 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2046 1940 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 1941 + MODULE_ALIAS("cfi_cmdset_0006"); 1942 + MODULE_ALIAS("cfi_cmdset_0701");
+68 -68
drivers/mtd/chips/cfi_cmdset_0020.c
··· 265 265 266 266 timeo = jiffies + HZ; 267 267 retry: 268 - spin_lock_bh(chip->mutex); 268 + mutex_lock(&chip->mutex); 269 269 270 270 /* Check that the chip's ready to talk to us. 271 271 * If it's in FL_ERASING state, suspend it and make it talk now. ··· 296 296 /* make sure we're in 'read status' mode */ 297 297 map_write(map, CMD(0x70), cmd_addr); 298 298 chip->state = FL_ERASING; 299 - spin_unlock_bh(chip->mutex); 299 + mutex_unlock(&chip->mutex); 300 300 printk(KERN_ERR "Chip not ready after erase " 301 301 "suspended: status = 0x%lx\n", status.x[0]); 302 302 return -EIO; 303 303 } 304 304 305 - spin_unlock_bh(chip->mutex); 305 + mutex_unlock(&chip->mutex); 306 306 cfi_udelay(1); 307 - spin_lock_bh(chip->mutex); 307 + mutex_lock(&chip->mutex); 308 308 } 309 309 310 310 suspended = 1; ··· 335 335 336 336 /* Urgh. Chip not yet ready to talk to us. */ 337 337 if (time_after(jiffies, timeo)) { 338 - spin_unlock_bh(chip->mutex); 338 + mutex_unlock(&chip->mutex); 339 339 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); 340 340 return -EIO; 341 341 } 342 342 343 343 /* Latency issues. Drop the lock, wait a while and retry */ 344 - spin_unlock_bh(chip->mutex); 344 + mutex_unlock(&chip->mutex); 345 345 cfi_udelay(1); 346 346 goto retry; 347 347 ··· 351 351 someone changes the status */ 352 352 set_current_state(TASK_UNINTERRUPTIBLE); 353 353 add_wait_queue(&chip->wq, &wait); 354 - spin_unlock_bh(chip->mutex); 354 + mutex_unlock(&chip->mutex); 355 355 schedule(); 356 356 remove_wait_queue(&chip->wq, &wait); 357 357 timeo = jiffies + HZ; ··· 376 376 } 377 377 378 378 wake_up(&chip->wq); 379 - spin_unlock_bh(chip->mutex); 379 + mutex_unlock(&chip->mutex); 380 380 return 0; 381 381 } 382 382 ··· 445 445 #ifdef DEBUG_CFI_FEATURES 446 446 printk("%s: chip->state[%d]\n", __func__, chip->state); 447 447 #endif 448 - spin_lock_bh(chip->mutex); 448 + mutex_lock(&chip->mutex); 449 449 450 450 /* Check that the chip's ready to talk to us. 451 451 * Later, we can actually think about interrupting it ··· 470 470 break; 471 471 /* Urgh. Chip not yet ready to talk to us. */ 472 472 if (time_after(jiffies, timeo)) { 473 - spin_unlock_bh(chip->mutex); 473 + mutex_unlock(&chip->mutex); 474 474 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", 475 475 status.x[0], map_read(map, cmd_adr).x[0]); 476 476 return -EIO; 477 477 } 478 478 479 479 /* Latency issues. Drop the lock, wait a while and retry */ 480 - spin_unlock_bh(chip->mutex); 480 + mutex_unlock(&chip->mutex); 481 481 cfi_udelay(1); 482 482 goto retry; 483 483 ··· 486 486 someone changes the status */ 487 487 set_current_state(TASK_UNINTERRUPTIBLE); 488 488 add_wait_queue(&chip->wq, &wait); 489 - spin_unlock_bh(chip->mutex); 489 + mutex_unlock(&chip->mutex); 490 490 schedule(); 491 491 remove_wait_queue(&chip->wq, &wait); 492 492 timeo = jiffies + HZ; ··· 503 503 if (map_word_andequal(map, status, status_OK, status_OK)) 504 504 break; 505 505 506 - spin_unlock_bh(chip->mutex); 506 + mutex_unlock(&chip->mutex); 507 507 cfi_udelay(1); 508 - spin_lock_bh(chip->mutex); 508 + mutex_lock(&chip->mutex); 509 509 510 510 if (++z > 100) { 511 511 /* Argh. Not ready for write to buffer */ 512 512 DISABLE_VPP(map); 513 513 map_write(map, CMD(0x70), cmd_adr); 514 514 chip->state = FL_STATUS; 515 - spin_unlock_bh(chip->mutex); 515 + mutex_unlock(&chip->mutex); 516 516 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); 517 517 return -EIO; 518 518 } ··· 532 532 map_write(map, CMD(0xd0), cmd_adr); 533 533 chip->state = FL_WRITING; 534 534 535 - spin_unlock_bh(chip->mutex); 535 + mutex_unlock(&chip->mutex); 536 536 cfi_udelay(chip->buffer_write_time); 537 - spin_lock_bh(chip->mutex); 537 + mutex_lock(&chip->mutex); 538 538 539 539 timeo = jiffies + (HZ/2); 540 540 z = 0; ··· 543 543 /* Someone's suspended the write. Sleep */ 544 544 set_current_state(TASK_UNINTERRUPTIBLE); 545 545 add_wait_queue(&chip->wq, &wait); 546 - spin_unlock_bh(chip->mutex); 546 + mutex_unlock(&chip->mutex); 547 547 schedule(); 548 548 remove_wait_queue(&chip->wq, &wait); 549 549 timeo = jiffies + (HZ / 2); /* FIXME */ 550 - spin_lock_bh(chip->mutex); 550 + mutex_lock(&chip->mutex); 551 551 continue; 552 552 } 553 553 ··· 563 563 map_write(map, CMD(0x70), adr); 564 564 chip->state = FL_STATUS; 565 565 DISABLE_VPP(map); 566 - spin_unlock_bh(chip->mutex); 566 + mutex_unlock(&chip->mutex); 567 567 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); 568 568 return -EIO; 569 569 } 570 570 571 571 /* Latency issues. Drop the lock, wait a while and retry */ 572 - spin_unlock_bh(chip->mutex); 572 + mutex_unlock(&chip->mutex); 573 573 cfi_udelay(1); 574 574 z++; 575 - spin_lock_bh(chip->mutex); 575 + mutex_lock(&chip->mutex); 576 576 } 577 577 if (!z) { 578 578 chip->buffer_write_time--; ··· 596 596 /* put back into read status register mode */ 597 597 map_write(map, CMD(0x70), adr); 598 598 wake_up(&chip->wq); 599 - spin_unlock_bh(chip->mutex); 599 + mutex_unlock(&chip->mutex); 600 600 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; 601 601 } 602 602 wake_up(&chip->wq); 603 - spin_unlock_bh(chip->mutex); 603 + mutex_unlock(&chip->mutex); 604 604 605 605 return 0; 606 606 } ··· 749 749 750 750 timeo = jiffies + HZ; 751 751 retry: 752 - spin_lock_bh(chip->mutex); 752 + mutex_lock(&chip->mutex); 753 753 754 754 /* Check that the chip's ready to talk to us. */ 755 755 switch (chip->state) { ··· 766 766 767 767 /* Urgh. Chip not yet ready to talk to us. */ 768 768 if (time_after(jiffies, timeo)) { 769 - spin_unlock_bh(chip->mutex); 769 + mutex_unlock(&chip->mutex); 770 770 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); 771 771 return -EIO; 772 772 } 773 773 774 774 /* Latency issues. Drop the lock, wait a while and retry */ 775 - spin_unlock_bh(chip->mutex); 775 + mutex_unlock(&chip->mutex); 776 776 cfi_udelay(1); 777 777 goto retry; 778 778 ··· 781 781 someone changes the status */ 782 782 set_current_state(TASK_UNINTERRUPTIBLE); 783 783 add_wait_queue(&chip->wq, &wait); 784 - spin_unlock_bh(chip->mutex); 784 + mutex_unlock(&chip->mutex); 785 785 schedule(); 786 786 remove_wait_queue(&chip->wq, &wait); 787 787 timeo = jiffies + HZ; ··· 797 797 map_write(map, CMD(0xD0), adr); 798 798 chip->state = FL_ERASING; 799 799 800 - spin_unlock_bh(chip->mutex); 800 + mutex_unlock(&chip->mutex); 801 801 msleep(1000); 802 - spin_lock_bh(chip->mutex); 802 + mutex_lock(&chip->mutex); 803 803 804 804 /* FIXME. Use a timer to check this, and return immediately. */ 805 805 /* Once the state machine's known to be working I'll do that */ ··· 810 810 /* Someone's suspended the erase. Sleep */ 811 811 set_current_state(TASK_UNINTERRUPTIBLE); 812 812 add_wait_queue(&chip->wq, &wait); 813 - spin_unlock_bh(chip->mutex); 813 + mutex_unlock(&chip->mutex); 814 814 schedule(); 815 815 remove_wait_queue(&chip->wq, &wait); 816 816 timeo = jiffies + (HZ*20); /* FIXME */ 817 - spin_lock_bh(chip->mutex); 817 + mutex_lock(&chip->mutex); 818 818 continue; 819 819 } 820 820 ··· 828 828 chip->state = FL_STATUS; 829 829 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 830 830 DISABLE_VPP(map); 831 - spin_unlock_bh(chip->mutex); 831 + mutex_unlock(&chip->mutex); 832 832 return -EIO; 833 833 } 834 834 835 835 /* Latency issues. Drop the lock, wait a while and retry */ 836 - spin_unlock_bh(chip->mutex); 836 + mutex_unlock(&chip->mutex); 837 837 cfi_udelay(1); 838 - spin_lock_bh(chip->mutex); 838 + mutex_lock(&chip->mutex); 839 839 } 840 840 841 841 DISABLE_VPP(map); ··· 878 878 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); 879 879 timeo = jiffies + HZ; 880 880 chip->state = FL_STATUS; 881 - spin_unlock_bh(chip->mutex); 881 + mutex_unlock(&chip->mutex); 882 882 goto retry; 883 883 } 884 884 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); ··· 887 887 } 888 888 889 889 wake_up(&chip->wq); 890 - spin_unlock_bh(chip->mutex); 890 + mutex_unlock(&chip->mutex); 891 891 return ret; 892 892 } 893 893 ··· 995 995 chip = &cfi->chips[i]; 996 996 997 997 retry: 998 - spin_lock_bh(chip->mutex); 998 + mutex_lock(&chip->mutex); 999 999 1000 1000 switch(chip->state) { 1001 1001 case FL_READY: ··· 1009 1009 * with the chip now anyway. 1010 1010 */ 1011 1011 case FL_SYNCING: 1012 - spin_unlock_bh(chip->mutex); 1012 + mutex_unlock(&chip->mutex); 1013 1013 break; 1014 1014 1015 1015 default: ··· 1017 1017 set_current_state(TASK_UNINTERRUPTIBLE); 1018 1018 add_wait_queue(&chip->wq, &wait); 1019 1019 1020 - spin_unlock_bh(chip->mutex); 1020 + mutex_unlock(&chip->mutex); 1021 1021 schedule(); 1022 1022 remove_wait_queue(&chip->wq, &wait); 1023 1023 ··· 1030 1030 for (i--; i >=0; i--) { 1031 1031 chip = &cfi->chips[i]; 1032 1032 1033 - spin_lock_bh(chip->mutex); 1033 + mutex_lock(&chip->mutex); 1034 1034 1035 1035 if (chip->state == FL_SYNCING) { 1036 1036 chip->state = chip->oldstate; 1037 1037 wake_up(&chip->wq); 1038 1038 } 1039 - spin_unlock_bh(chip->mutex); 1039 + mutex_unlock(&chip->mutex); 1040 1040 } 1041 1041 } 1042 1042 ··· 1054 1054 1055 1055 timeo = jiffies + HZ; 1056 1056 retry: 1057 - spin_lock_bh(chip->mutex); 1057 + mutex_lock(&chip->mutex); 1058 1058 1059 1059 /* Check that the chip's ready to talk to us. */ 1060 1060 switch (chip->state) { ··· 1071 1071 1072 1072 /* Urgh. Chip not yet ready to talk to us. */ 1073 1073 if (time_after(jiffies, timeo)) { 1074 - spin_unlock_bh(chip->mutex); 1074 + mutex_unlock(&chip->mutex); 1075 1075 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); 1076 1076 return -EIO; 1077 1077 } 1078 1078 1079 1079 /* Latency issues. Drop the lock, wait a while and retry */ 1080 - spin_unlock_bh(chip->mutex); 1080 + mutex_unlock(&chip->mutex); 1081 1081 cfi_udelay(1); 1082 1082 goto retry; 1083 1083 ··· 1086 1086 someone changes the status */ 1087 1087 set_current_state(TASK_UNINTERRUPTIBLE); 1088 1088 add_wait_queue(&chip->wq, &wait); 1089 - spin_unlock_bh(chip->mutex); 1089 + mutex_unlock(&chip->mutex); 1090 1090 schedule(); 1091 1091 remove_wait_queue(&chip->wq, &wait); 1092 1092 timeo = jiffies + HZ; ··· 1098 1098 map_write(map, CMD(0x01), adr); 1099 1099 chip->state = FL_LOCKING; 1100 1100 1101 - spin_unlock_bh(chip->mutex); 1101 + mutex_unlock(&chip->mutex); 1102 1102 msleep(1000); 1103 - spin_lock_bh(chip->mutex); 1103 + mutex_lock(&chip->mutex); 1104 1104 1105 1105 /* FIXME. Use a timer to check this, and return immediately. */ 1106 1106 /* Once the state machine's known to be working I'll do that */ ··· 1118 1118 chip->state = FL_STATUS; 1119 1119 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1120 1120 DISABLE_VPP(map); 1121 - spin_unlock_bh(chip->mutex); 1121 + mutex_unlock(&chip->mutex); 1122 1122 return -EIO; 1123 1123 } 1124 1124 1125 1125 /* Latency issues. Drop the lock, wait a while and retry */ 1126 - spin_unlock_bh(chip->mutex); 1126 + mutex_unlock(&chip->mutex); 1127 1127 cfi_udelay(1); 1128 - spin_lock_bh(chip->mutex); 1128 + mutex_lock(&chip->mutex); 1129 1129 } 1130 1130 1131 1131 /* Done and happy. */ 1132 1132 chip->state = FL_STATUS; 1133 1133 DISABLE_VPP(map); 1134 1134 wake_up(&chip->wq); 1135 - spin_unlock_bh(chip->mutex); 1135 + mutex_unlock(&chip->mutex); 1136 1136 return 0; 1137 1137 } 1138 1138 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ··· 1203 1203 1204 1204 timeo = jiffies + HZ; 1205 1205 retry: 1206 - spin_lock_bh(chip->mutex); 1206 + mutex_lock(&chip->mutex); 1207 1207 1208 1208 /* Check that the chip's ready to talk to us. */ 1209 1209 switch (chip->state) { ··· 1220 1220 1221 1221 /* Urgh. Chip not yet ready to talk to us. */ 1222 1222 if (time_after(jiffies, timeo)) { 1223 - spin_unlock_bh(chip->mutex); 1223 + mutex_unlock(&chip->mutex); 1224 1224 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); 1225 1225 return -EIO; 1226 1226 } 1227 1227 1228 1228 /* Latency issues. Drop the lock, wait a while and retry */ 1229 - spin_unlock_bh(chip->mutex); 1229 + mutex_unlock(&chip->mutex); 1230 1230 cfi_udelay(1); 1231 1231 goto retry; 1232 1232 ··· 1235 1235 someone changes the status */ 1236 1236 set_current_state(TASK_UNINTERRUPTIBLE); 1237 1237 add_wait_queue(&chip->wq, &wait); 1238 - spin_unlock_bh(chip->mutex); 1238 + mutex_unlock(&chip->mutex); 1239 1239 schedule(); 1240 1240 remove_wait_queue(&chip->wq, &wait); 1241 1241 timeo = jiffies + HZ; ··· 1247 1247 map_write(map, CMD(0xD0), adr); 1248 1248 chip->state = FL_UNLOCKING; 1249 1249 1250 - spin_unlock_bh(chip->mutex); 1250 + mutex_unlock(&chip->mutex); 1251 1251 msleep(1000); 1252 - spin_lock_bh(chip->mutex); 1252 + mutex_lock(&chip->mutex); 1253 1253 1254 1254 /* FIXME. Use a timer to check this, and return immediately. */ 1255 1255 /* Once the state machine's known to be working I'll do that */ ··· 1267 1267 chip->state = FL_STATUS; 1268 1268 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); 1269 1269 DISABLE_VPP(map); 1270 - spin_unlock_bh(chip->mutex); 1270 + mutex_unlock(&chip->mutex); 1271 1271 return -EIO; 1272 1272 } 1273 1273 1274 1274 /* Latency issues. Drop the unlock, wait a while and retry */ 1275 - spin_unlock_bh(chip->mutex); 1275 + mutex_unlock(&chip->mutex); 1276 1276 cfi_udelay(1); 1277 - spin_lock_bh(chip->mutex); 1277 + mutex_lock(&chip->mutex); 1278 1278 } 1279 1279 1280 1280 /* Done and happy. */ 1281 1281 chip->state = FL_STATUS; 1282 1282 DISABLE_VPP(map); 1283 1283 wake_up(&chip->wq); 1284 - spin_unlock_bh(chip->mutex); 1284 + mutex_unlock(&chip->mutex); 1285 1285 return 0; 1286 1286 } 1287 1287 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ··· 1334 1334 for (i=0; !ret && i<cfi->numchips; i++) { 1335 1335 chip = &cfi->chips[i]; 1336 1336 1337 - spin_lock_bh(chip->mutex); 1337 + mutex_lock(&chip->mutex); 1338 1338 1339 1339 switch(chip->state) { 1340 1340 case FL_READY: ··· 1354 1354 ret = -EAGAIN; 1355 1355 break; 1356 1356 } 1357 - spin_unlock_bh(chip->mutex); 1357 + mutex_unlock(&chip->mutex); 1358 1358 } 1359 1359 1360 1360 /* Unlock the chips again */ ··· 1363 1363 for (i--; i >=0; i--) { 1364 1364 chip = &cfi->chips[i]; 1365 1365 1366 - spin_lock_bh(chip->mutex); 1366 + mutex_lock(&chip->mutex); 1367 1367 1368 1368 if (chip->state == FL_PM_SUSPENDED) { 1369 1369 /* No need to force it into a known state here, ··· 1372 1372 chip->state = chip->oldstate; 1373 1373 wake_up(&chip->wq); 1374 1374 } 1375 - spin_unlock_bh(chip->mutex); 1375 + mutex_unlock(&chip->mutex); 1376 1376 } 1377 1377 } 1378 1378 ··· 1390 1390 1391 1391 chip = &cfi->chips[i]; 1392 1392 1393 - spin_lock_bh(chip->mutex); 1393 + mutex_lock(&chip->mutex); 1394 1394 1395 1395 /* Go to known state. Chip may have been power cycled */ 1396 1396 if (chip->state == FL_PM_SUSPENDED) { ··· 1399 1399 wake_up(&chip->wq); 1400 1400 } 1401 1401 1402 - spin_unlock_bh(chip->mutex); 1402 + mutex_unlock(&chip->mutex); 1403 1403 } 1404 1404 } 1405 1405
+33 -23
drivers/mtd/chips/cfi_probe.c
··· 158 158 __u32 base = 0; 159 159 int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); 160 160 int i; 161 + int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA; 161 162 162 163 xip_enable(base, map, cfi); 163 164 #ifdef DEBUG_CFI ··· 181 180 xip_disable_qry(base, map, cfi); 182 181 for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) 183 182 ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); 184 - 185 - /* Note we put the device back into Read Mode BEFORE going into Auto 186 - * Select Mode, as some devices support nesting of modes, others 187 - * don't. This way should always work. 188 - * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and 189 - * so should be treated as nops or illegal (and so put the device 190 - * back into Read Mode, which is a nop in this case). 191 - */ 192 - cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); 193 - cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); 194 - cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); 195 - cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); 196 - cfi->mfr = cfi_read_query16(map, base); 197 - cfi->id = cfi_read_query16(map, base + ofs_factor); 198 - 199 - /* Get AMD/Spansion extended JEDEC ID */ 200 - if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) 201 - cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | 202 - cfi_read_query(map, base + 0xf * ofs_factor); 203 - 204 - /* Put it back into Read Mode */ 205 - cfi_qry_mode_off(base, map, cfi); 206 - xip_allowed(base, map); 207 183 208 184 /* Do any necessary byteswapping */ 209 185 cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); ··· 205 227 (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); 206 228 #endif 207 229 } 230 + 231 + if (cfi->cfiq->P_ID == P_ID_SST_OLD) { 232 + addr_unlock1 = 0x5555; 233 + addr_unlock2 = 0x2AAA; 234 + } 235 + 236 + /* 237 + * Note we put the device back into Read Mode BEFORE going into Auto 238 + * Select Mode, as some devices support nesting of modes, others 239 + * don't. This way should always work. 240 + * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and 241 + * so should be treated as nops or illegal (and so put the device 242 + * back into Read Mode, which is a nop in this case). 243 + */ 244 + cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); 245 + cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL); 246 + cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL); 247 + cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL); 248 + cfi->mfr = cfi_read_query16(map, base); 249 + cfi->id = cfi_read_query16(map, base + ofs_factor); 250 + 251 + /* Get AMD/Spansion extended JEDEC ID */ 252 + if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) 253 + cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | 254 + cfi_read_query(map, base + 0xf * ofs_factor); 255 + 256 + /* Put it back into Read Mode */ 257 + cfi_qry_mode_off(base, map, cfi); 258 + xip_allowed(base, map); 208 259 209 260 printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", 210 261 map->name, cfi->interleave, cfi->device_type*8, base, ··· 275 268 276 269 case P_ID_SST_PAGE: 277 270 return "SST Page Write"; 271 + 272 + case P_ID_SST_OLD: 273 + return "SST 39VF160x/39VF320x"; 278 274 279 275 case P_ID_INTEL_PERFORMANCE: 280 276 return "Intel Performance Code";
+2 -1
drivers/mtd/chips/cfi_util.c
··· 104 104 int i; 105 105 struct cfi_extquery *extp = NULL; 106 106 107 - printk(" %s Extended Query Table at 0x%4.4X\n", name, adr); 108 107 if (!adr) 109 108 goto out; 109 + 110 + printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); 110 111 111 112 extp = kmalloc(size, GFP_KERNEL); 112 113 if (!extp) {
+3 -3
drivers/mtd/chips/fwh_lock.h
··· 58 58 * to flash memory - that means that we don't have to check status 59 59 * and timeout. 60 60 */ 61 - spin_lock(chip->mutex); 61 + mutex_lock(&chip->mutex); 62 62 ret = get_chip(map, chip, adr, FL_LOCKING); 63 63 if (ret) { 64 - spin_unlock(chip->mutex); 64 + mutex_unlock(&chip->mutex); 65 65 return ret; 66 66 } 67 67 ··· 72 72 /* Done and happy. */ 73 73 chip->state = chip->oldstate; 74 74 put_chip(map, chip, adr); 75 - spin_unlock(chip->mutex); 75 + mutex_unlock(&chip->mutex); 76 76 return 0; 77 77 } 78 78
+8 -7
drivers/mtd/chips/gen_probe.c
··· 155 155 pchip->start = (i << cfi.chipshift); 156 156 pchip->state = FL_READY; 157 157 init_waitqueue_head(&pchip->wq); 158 - spin_lock_init(&pchip->_spinlock); 159 - pchip->mutex = &pchip->_spinlock; 158 + mutex_init(&pchip->mutex); 160 159 } 161 160 } 162 161 ··· 241 242 /* We need these for the !CONFIG_MODULES case, 242 243 because symbol_get() doesn't work there */ 243 244 #ifdef CONFIG_MTD_CFI_INTELEXT 244 - case 0x0001: 245 - case 0x0003: 246 - case 0x0200: 245 + case P_ID_INTEL_EXT: 246 + case P_ID_INTEL_STD: 247 + case P_ID_INTEL_PERFORMANCE: 247 248 return cfi_cmdset_0001(map, primary); 248 249 #endif 249 250 #ifdef CONFIG_MTD_CFI_AMDSTD 250 - case 0x0002: 251 + case P_ID_AMD_STD: 252 + case P_ID_SST_OLD: 253 + case P_ID_WINBOND: 251 254 return cfi_cmdset_0002(map, primary); 252 255 #endif 253 256 #ifdef CONFIG_MTD_CFI_STAA 254 - case 0x0020: 257 + case P_ID_ST_ADV: 255 258 return cfi_cmdset_0020(map, primary); 256 259 #endif 257 260 default:
+150 -138
drivers/mtd/chips/jedec_probe.c
··· 22 22 #include <linux/mtd/cfi.h> 23 23 #include <linux/mtd/gen_probe.h> 24 24 25 - /* Manufacturers */ 26 - #define MANUFACTURER_AMD 0x0001 27 - #define MANUFACTURER_ATMEL 0x001f 28 - #define MANUFACTURER_EON 0x001c 29 - #define MANUFACTURER_FUJITSU 0x0004 30 - #define MANUFACTURER_HYUNDAI 0x00AD 31 - #define MANUFACTURER_INTEL 0x0089 32 - #define MANUFACTURER_MACRONIX 0x00C2 33 - #define MANUFACTURER_NEC 0x0010 34 - #define MANUFACTURER_PMC 0x009D 35 - #define MANUFACTURER_SHARP 0x00b0 36 - #define MANUFACTURER_SST 0x00BF 37 - #define MANUFACTURER_ST 0x0020 38 - #define MANUFACTURER_TOSHIBA 0x0098 39 - #define MANUFACTURER_WINBOND 0x00da 40 - #define CONTINUATION_CODE 0x007f 41 - 42 - 43 25 /* AMD */ 44 26 #define AM29DL800BB 0x22CB 45 27 #define AM29DL800BT 0x224A ··· 148 166 #define SST39LF160 0x2782 149 167 #define SST39VF1601 0x234b 150 168 #define SST39VF3201 0x235b 169 + #define SST39WF1601 0x274b 170 + #define SST39WF1602 0x274a 151 171 #define SST39LF512 0x00D4 152 172 #define SST39LF010 0x00D5 153 173 #define SST39LF020 0x00D6 ··· 293 309 */ 294 310 static const struct amd_flash_info jedec_table[] = { 295 311 { 296 - .mfr_id = MANUFACTURER_AMD, 312 + .mfr_id = CFI_MFR_AMD, 297 313 .dev_id = AM29F032B, 298 314 .name = "AMD AM29F032B", 299 315 .uaddr = MTD_UADDR_0x0555_0x02AA, ··· 305 321 ERASEINFO(0x10000,64) 306 322 } 307 323 }, { 308 - .mfr_id = MANUFACTURER_AMD, 324 + .mfr_id = CFI_MFR_AMD, 309 325 .dev_id = AM29LV160DT, 310 326 .name = "AMD AM29LV160DT", 311 327 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 320 336 ERASEINFO(0x04000,1) 321 337 } 322 338 }, { 323 - .mfr_id = MANUFACTURER_AMD, 339 + .mfr_id = CFI_MFR_AMD, 324 340 .dev_id = AM29LV160DB, 325 341 .name = "AMD AM29LV160DB", 326 342 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 335 351 ERASEINFO(0x10000,31) 336 352 } 337 353 }, { 338 - .mfr_id = MANUFACTURER_AMD, 354 + .mfr_id = CFI_MFR_AMD, 339 355 .dev_id = AM29LV400BB, 340 356 .name = "AMD AM29LV400BB", 341 357 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 350 366 ERASEINFO(0x10000,7) 351 367 } 352 368 }, { 353 - .mfr_id = MANUFACTURER_AMD, 369 + .mfr_id = CFI_MFR_AMD, 354 370 .dev_id = AM29LV400BT, 355 371 .name = "AMD AM29LV400BT", 356 372 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 365 381 ERASEINFO(0x04000,1) 366 382 } 367 383 }, { 368 - .mfr_id = MANUFACTURER_AMD, 384 + .mfr_id = CFI_MFR_AMD, 369 385 .dev_id = AM29LV800BB, 370 386 .name = "AMD AM29LV800BB", 371 387 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 381 397 } 382 398 }, { 383 399 /* add DL */ 384 - .mfr_id = MANUFACTURER_AMD, 400 + .mfr_id = CFI_MFR_AMD, 385 401 .dev_id = AM29DL800BB, 386 402 .name = "AMD AM29DL800BB", 387 403 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 398 414 ERASEINFO(0x10000,14) 399 415 } 400 416 }, { 401 - .mfr_id = MANUFACTURER_AMD, 417 + .mfr_id = CFI_MFR_AMD, 402 418 .dev_id = AM29DL800BT, 403 419 .name = "AMD AM29DL800BT", 404 420 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 415 431 ERASEINFO(0x04000,1) 416 432 } 417 433 }, { 418 - .mfr_id = MANUFACTURER_AMD, 434 + .mfr_id = CFI_MFR_AMD, 419 435 .dev_id = AM29F800BB, 420 436 .name = "AMD AM29F800BB", 421 437 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 430 446 ERASEINFO(0x10000,15), 431 447 } 432 448 }, { 433 - .mfr_id = MANUFACTURER_AMD, 449 + .mfr_id = CFI_MFR_AMD, 434 450 .dev_id = AM29LV800BT, 435 451 .name = "AMD AM29LV800BT", 436 452 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 445 461 ERASEINFO(0x04000,1) 446 462 } 447 463 }, { 448 - .mfr_id = MANUFACTURER_AMD, 464 + .mfr_id = CFI_MFR_AMD, 449 465 .dev_id = AM29F800BT, 450 466 .name = "AMD AM29F800BT", 451 467 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 460 476 ERASEINFO(0x04000,1) 461 477 } 462 478 }, { 463 - .mfr_id = MANUFACTURER_AMD, 479 + .mfr_id = CFI_MFR_AMD, 464 480 .dev_id = AM29F017D, 465 481 .name = "AMD AM29F017D", 466 482 .devtypes = CFI_DEVICETYPE_X8, ··· 472 488 ERASEINFO(0x10000,32), 473 489 } 474 490 }, { 475 - .mfr_id = MANUFACTURER_AMD, 491 + .mfr_id = CFI_MFR_AMD, 476 492 .dev_id = AM29F016D, 477 493 .name = "AMD AM29F016D", 478 494 .devtypes = CFI_DEVICETYPE_X8, ··· 484 500 ERASEINFO(0x10000,32), 485 501 } 486 502 }, { 487 - .mfr_id = MANUFACTURER_AMD, 503 + .mfr_id = CFI_MFR_AMD, 488 504 .dev_id = AM29F080, 489 505 .name = "AMD AM29F080", 490 506 .devtypes = CFI_DEVICETYPE_X8, ··· 496 512 ERASEINFO(0x10000,16), 497 513 } 498 514 }, { 499 - .mfr_id = MANUFACTURER_AMD, 515 + .mfr_id = CFI_MFR_AMD, 500 516 .dev_id = AM29F040, 501 517 .name = "AMD AM29F040", 502 518 .devtypes = CFI_DEVICETYPE_X8, ··· 508 524 ERASEINFO(0x10000,8), 509 525 } 510 526 }, { 511 - .mfr_id = MANUFACTURER_AMD, 527 + .mfr_id = CFI_MFR_AMD, 512 528 .dev_id = AM29LV040B, 513 529 .name = "AMD AM29LV040B", 514 530 .devtypes = CFI_DEVICETYPE_X8, ··· 520 536 ERASEINFO(0x10000,8), 521 537 } 522 538 }, { 523 - .mfr_id = MANUFACTURER_AMD, 539 + .mfr_id = CFI_MFR_AMD, 524 540 .dev_id = AM29F002T, 525 541 .name = "AMD AM29F002T", 526 542 .devtypes = CFI_DEVICETYPE_X8, ··· 535 551 ERASEINFO(0x04000,1), 536 552 } 537 553 }, { 538 - .mfr_id = MANUFACTURER_AMD, 554 + .mfr_id = CFI_MFR_AMD, 539 555 .dev_id = AM29SL800DT, 540 556 .name = "AMD AM29SL800DT", 541 557 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 550 566 ERASEINFO(0x04000,1), 551 567 } 552 568 }, { 553 - .mfr_id = MANUFACTURER_AMD, 569 + .mfr_id = CFI_MFR_AMD, 554 570 .dev_id = AM29SL800DB, 555 571 .name = "AMD AM29SL800DB", 556 572 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 565 581 ERASEINFO(0x10000,15), 566 582 } 567 583 }, { 568 - .mfr_id = MANUFACTURER_ATMEL, 584 + .mfr_id = CFI_MFR_ATMEL, 569 585 .dev_id = AT49BV512, 570 586 .name = "Atmel AT49BV512", 571 587 .devtypes = CFI_DEVICETYPE_X8, ··· 577 593 ERASEINFO(0x10000,1) 578 594 } 579 595 }, { 580 - .mfr_id = MANUFACTURER_ATMEL, 596 + .mfr_id = CFI_MFR_ATMEL, 581 597 .dev_id = AT29LV512, 582 598 .name = "Atmel AT29LV512", 583 599 .devtypes = CFI_DEVICETYPE_X8, ··· 590 606 ERASEINFO(0x80,256) 591 607 } 592 608 }, { 593 - .mfr_id = MANUFACTURER_ATMEL, 609 + .mfr_id = CFI_MFR_ATMEL, 594 610 .dev_id = AT49BV16X, 595 611 .name = "Atmel AT49BV16X", 596 612 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 603 619 ERASEINFO(0x10000,31) 604 620 } 605 621 }, { 606 - .mfr_id = MANUFACTURER_ATMEL, 622 + .mfr_id = CFI_MFR_ATMEL, 607 623 .dev_id = AT49BV16XT, 608 624 .name = "Atmel AT49BV16XT", 609 625 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 616 632 ERASEINFO(0x02000,8) 617 633 } 618 634 }, { 619 - .mfr_id = MANUFACTURER_ATMEL, 635 + .mfr_id = CFI_MFR_ATMEL, 620 636 .dev_id = AT49BV32X, 621 637 .name = "Atmel AT49BV32X", 622 638 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 629 645 ERASEINFO(0x10000,63) 630 646 } 631 647 }, { 632 - .mfr_id = MANUFACTURER_ATMEL, 648 + .mfr_id = CFI_MFR_ATMEL, 633 649 .dev_id = AT49BV32XT, 634 650 .name = "Atmel AT49BV32XT", 635 651 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 642 658 ERASEINFO(0x02000,8) 643 659 } 644 660 }, { 645 - .mfr_id = MANUFACTURER_EON, 661 + .mfr_id = CFI_MFR_EON, 646 662 .dev_id = EN29SL800BT, 647 663 .name = "Eon EN29SL800BT", 648 664 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 657 673 ERASEINFO(0x04000,1), 658 674 } 659 675 }, { 660 - .mfr_id = MANUFACTURER_EON, 676 + .mfr_id = CFI_MFR_EON, 661 677 .dev_id = EN29SL800BB, 662 678 .name = "Eon EN29SL800BB", 663 679 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 672 688 ERASEINFO(0x10000,15), 673 689 } 674 690 }, { 675 - .mfr_id = MANUFACTURER_FUJITSU, 691 + .mfr_id = CFI_MFR_FUJITSU, 676 692 .dev_id = MBM29F040C, 677 693 .name = "Fujitsu MBM29F040C", 678 694 .devtypes = CFI_DEVICETYPE_X8, ··· 684 700 ERASEINFO(0x10000,8) 685 701 } 686 702 }, { 687 - .mfr_id = MANUFACTURER_FUJITSU, 703 + .mfr_id = CFI_MFR_FUJITSU, 688 704 .dev_id = MBM29F800BA, 689 705 .name = "Fujitsu MBM29F800BA", 690 706 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 699 715 ERASEINFO(0x10000,15), 700 716 } 701 717 }, { 702 - .mfr_id = MANUFACTURER_FUJITSU, 718 + .mfr_id = CFI_MFR_FUJITSU, 703 719 .dev_id = MBM29LV650UE, 704 720 .name = "Fujitsu MBM29LV650UE", 705 721 .devtypes = CFI_DEVICETYPE_X8, ··· 711 727 ERASEINFO(0x10000,128) 712 728 } 713 729 }, { 714 - .mfr_id = MANUFACTURER_FUJITSU, 730 + .mfr_id = CFI_MFR_FUJITSU, 715 731 .dev_id = MBM29LV320TE, 716 732 .name = "Fujitsu MBM29LV320TE", 717 733 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 724 740 ERASEINFO(0x02000,8) 725 741 } 726 742 }, { 727 - .mfr_id = MANUFACTURER_FUJITSU, 743 + .mfr_id = CFI_MFR_FUJITSU, 728 744 .dev_id = MBM29LV320BE, 729 745 .name = "Fujitsu MBM29LV320BE", 730 746 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 737 753 ERASEINFO(0x10000,63) 738 754 } 739 755 }, { 740 - .mfr_id = MANUFACTURER_FUJITSU, 756 + .mfr_id = CFI_MFR_FUJITSU, 741 757 .dev_id = MBM29LV160TE, 742 758 .name = "Fujitsu MBM29LV160TE", 743 759 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 752 768 ERASEINFO(0x04000,1) 753 769 } 754 770 }, { 755 - .mfr_id = MANUFACTURER_FUJITSU, 771 + .mfr_id = CFI_MFR_FUJITSU, 756 772 .dev_id = MBM29LV160BE, 757 773 .name = "Fujitsu MBM29LV160BE", 758 774 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 767 783 ERASEINFO(0x10000,31) 768 784 } 769 785 }, { 770 - .mfr_id = MANUFACTURER_FUJITSU, 786 + .mfr_id = CFI_MFR_FUJITSU, 771 787 .dev_id = MBM29LV800BA, 772 788 .name = "Fujitsu MBM29LV800BA", 773 789 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 782 798 ERASEINFO(0x10000,15) 783 799 } 784 800 }, { 785 - .mfr_id = MANUFACTURER_FUJITSU, 801 + .mfr_id = CFI_MFR_FUJITSU, 786 802 .dev_id = MBM29LV800TA, 787 803 .name = "Fujitsu MBM29LV800TA", 788 804 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 797 813 ERASEINFO(0x04000,1) 798 814 } 799 815 }, { 800 - .mfr_id = MANUFACTURER_FUJITSU, 816 + .mfr_id = CFI_MFR_FUJITSU, 801 817 .dev_id = MBM29LV400BC, 802 818 .name = "Fujitsu MBM29LV400BC", 803 819 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 812 828 ERASEINFO(0x10000,7) 813 829 } 814 830 }, { 815 - .mfr_id = MANUFACTURER_FUJITSU, 831 + .mfr_id = CFI_MFR_FUJITSU, 816 832 .dev_id = MBM29LV400TC, 817 833 .name = "Fujitsu MBM29LV400TC", 818 834 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 827 843 ERASEINFO(0x04000,1) 828 844 } 829 845 }, { 830 - .mfr_id = MANUFACTURER_HYUNDAI, 846 + .mfr_id = CFI_MFR_HYUNDAI, 831 847 .dev_id = HY29F002T, 832 848 .name = "Hyundai HY29F002T", 833 849 .devtypes = CFI_DEVICETYPE_X8, ··· 842 858 ERASEINFO(0x04000,1), 843 859 } 844 860 }, { 845 - .mfr_id = MANUFACTURER_INTEL, 861 + .mfr_id = CFI_MFR_INTEL, 846 862 .dev_id = I28F004B3B, 847 863 .name = "Intel 28F004B3B", 848 864 .devtypes = CFI_DEVICETYPE_X8, ··· 855 871 ERASEINFO(0x10000, 7), 856 872 } 857 873 }, { 858 - .mfr_id = MANUFACTURER_INTEL, 874 + .mfr_id = CFI_MFR_INTEL, 859 875 .dev_id = I28F004B3T, 860 876 .name = "Intel 28F004B3T", 861 877 .devtypes = CFI_DEVICETYPE_X8, ··· 868 884 ERASEINFO(0x02000, 8), 869 885 } 870 886 }, { 871 - .mfr_id = MANUFACTURER_INTEL, 887 + .mfr_id = CFI_MFR_INTEL, 872 888 .dev_id = I28F400B3B, 873 889 .name = "Intel 28F400B3B", 874 890 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 881 897 ERASEINFO(0x10000, 7), 882 898 } 883 899 }, { 884 - .mfr_id = MANUFACTURER_INTEL, 900 + .mfr_id = CFI_MFR_INTEL, 885 901 .dev_id = I28F400B3T, 886 902 .name = "Intel 28F400B3T", 887 903 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 894 910 ERASEINFO(0x02000, 8), 895 911 } 896 912 }, { 897 - .mfr_id = MANUFACTURER_INTEL, 913 + .mfr_id = CFI_MFR_INTEL, 898 914 .dev_id = I28F008B3B, 899 915 .name = "Intel 28F008B3B", 900 916 .devtypes = CFI_DEVICETYPE_X8, ··· 907 923 ERASEINFO(0x10000, 15), 908 924 } 909 925 }, { 910 - .mfr_id = MANUFACTURER_INTEL, 926 + .mfr_id = CFI_MFR_INTEL, 911 927 .dev_id = I28F008B3T, 912 928 .name = "Intel 28F008B3T", 913 929 .devtypes = CFI_DEVICETYPE_X8, ··· 920 936 ERASEINFO(0x02000, 8), 921 937 } 922 938 }, { 923 - .mfr_id = MANUFACTURER_INTEL, 939 + .mfr_id = CFI_MFR_INTEL, 924 940 .dev_id = I28F008S5, 925 941 .name = "Intel 28F008S5", 926 942 .devtypes = CFI_DEVICETYPE_X8, ··· 932 948 ERASEINFO(0x10000,16), 933 949 } 934 950 }, { 935 - .mfr_id = MANUFACTURER_INTEL, 951 + .mfr_id = CFI_MFR_INTEL, 936 952 .dev_id = I28F016S5, 937 953 .name = "Intel 28F016S5", 938 954 .devtypes = CFI_DEVICETYPE_X8, ··· 944 960 ERASEINFO(0x10000,32), 945 961 } 946 962 }, { 947 - .mfr_id = MANUFACTURER_INTEL, 963 + .mfr_id = CFI_MFR_INTEL, 948 964 .dev_id = I28F008SA, 949 965 .name = "Intel 28F008SA", 950 966 .devtypes = CFI_DEVICETYPE_X8, ··· 956 972 ERASEINFO(0x10000, 16), 957 973 } 958 974 }, { 959 - .mfr_id = MANUFACTURER_INTEL, 975 + .mfr_id = CFI_MFR_INTEL, 960 976 .dev_id = I28F800B3B, 961 977 .name = "Intel 28F800B3B", 962 978 .devtypes = CFI_DEVICETYPE_X16, ··· 969 985 ERASEINFO(0x10000, 15), 970 986 } 971 987 }, { 972 - .mfr_id = MANUFACTURER_INTEL, 988 + .mfr_id = CFI_MFR_INTEL, 973 989 .dev_id = I28F800B3T, 974 990 .name = "Intel 28F800B3T", 975 991 .devtypes = CFI_DEVICETYPE_X16, ··· 982 998 ERASEINFO(0x02000, 8), 983 999 } 984 1000 }, { 985 - .mfr_id = MANUFACTURER_INTEL, 1001 + .mfr_id = CFI_MFR_INTEL, 986 1002 .dev_id = I28F016B3B, 987 1003 .name = "Intel 28F016B3B", 988 1004 .devtypes = CFI_DEVICETYPE_X8, ··· 995 1011 ERASEINFO(0x10000, 31), 996 1012 } 997 1013 }, { 998 - .mfr_id = MANUFACTURER_INTEL, 1014 + .mfr_id = CFI_MFR_INTEL, 999 1015 .dev_id = I28F016S3, 1000 1016 .name = "Intel I28F016S3", 1001 1017 .devtypes = CFI_DEVICETYPE_X8, ··· 1007 1023 ERASEINFO(0x10000, 32), 1008 1024 } 1009 1025 }, { 1010 - .mfr_id = MANUFACTURER_INTEL, 1026 + .mfr_id = CFI_MFR_INTEL, 1011 1027 .dev_id = I28F016B3T, 1012 1028 .name = "Intel 28F016B3T", 1013 1029 .devtypes = CFI_DEVICETYPE_X8, ··· 1020 1036 ERASEINFO(0x02000, 8), 1021 1037 } 1022 1038 }, { 1023 - .mfr_id = MANUFACTURER_INTEL, 1039 + .mfr_id = CFI_MFR_INTEL, 1024 1040 .dev_id = I28F160B3B, 1025 1041 .name = "Intel 28F160B3B", 1026 1042 .devtypes = CFI_DEVICETYPE_X16, ··· 1033 1049 ERASEINFO(0x10000, 31), 1034 1050 } 1035 1051 }, { 1036 - .mfr_id = MANUFACTURER_INTEL, 1052 + .mfr_id = CFI_MFR_INTEL, 1037 1053 .dev_id = I28F160B3T, 1038 1054 .name = "Intel 28F160B3T", 1039 1055 .devtypes = CFI_DEVICETYPE_X16, ··· 1046 1062 ERASEINFO(0x02000, 8), 1047 1063 } 1048 1064 }, { 1049 - .mfr_id = MANUFACTURER_INTEL, 1065 + .mfr_id = CFI_MFR_INTEL, 1050 1066 .dev_id = I28F320B3B, 1051 1067 .name = "Intel 28F320B3B", 1052 1068 .devtypes = CFI_DEVICETYPE_X16, ··· 1059 1075 ERASEINFO(0x10000, 63), 1060 1076 } 1061 1077 }, { 1062 - .mfr_id = MANUFACTURER_INTEL, 1078 + .mfr_id = CFI_MFR_INTEL, 1063 1079 .dev_id = I28F320B3T, 1064 1080 .name = "Intel 28F320B3T", 1065 1081 .devtypes = CFI_DEVICETYPE_X16, ··· 1072 1088 ERASEINFO(0x02000, 8), 1073 1089 } 1074 1090 }, { 1075 - .mfr_id = MANUFACTURER_INTEL, 1091 + .mfr_id = CFI_MFR_INTEL, 1076 1092 .dev_id = I28F640B3B, 1077 1093 .name = "Intel 28F640B3B", 1078 1094 .devtypes = CFI_DEVICETYPE_X16, ··· 1085 1101 ERASEINFO(0x10000, 127), 1086 1102 } 1087 1103 }, { 1088 - .mfr_id = MANUFACTURER_INTEL, 1104 + .mfr_id = CFI_MFR_INTEL, 1089 1105 .dev_id = I28F640B3T, 1090 1106 .name = "Intel 28F640B3T", 1091 1107 .devtypes = CFI_DEVICETYPE_X16, ··· 1098 1114 ERASEINFO(0x02000, 8), 1099 1115 } 1100 1116 }, { 1101 - .mfr_id = MANUFACTURER_INTEL, 1117 + .mfr_id = CFI_MFR_INTEL, 1102 1118 .dev_id = I28F640C3B, 1103 1119 .name = "Intel 28F640C3B", 1104 1120 .devtypes = CFI_DEVICETYPE_X16, ··· 1111 1127 ERASEINFO(0x10000, 127), 1112 1128 } 1113 1129 }, { 1114 - .mfr_id = MANUFACTURER_INTEL, 1130 + .mfr_id = CFI_MFR_INTEL, 1115 1131 .dev_id = I82802AB, 1116 1132 .name = "Intel 82802AB", 1117 1133 .devtypes = CFI_DEVICETYPE_X8, ··· 1123 1139 ERASEINFO(0x10000,8), 1124 1140 } 1125 1141 }, { 1126 - .mfr_id = MANUFACTURER_INTEL, 1142 + .mfr_id = CFI_MFR_INTEL, 1127 1143 .dev_id = I82802AC, 1128 1144 .name = "Intel 82802AC", 1129 1145 .devtypes = CFI_DEVICETYPE_X8, ··· 1135 1151 ERASEINFO(0x10000,16), 1136 1152 } 1137 1153 }, { 1138 - .mfr_id = MANUFACTURER_MACRONIX, 1154 + .mfr_id = CFI_MFR_MACRONIX, 1139 1155 .dev_id = MX29LV040C, 1140 1156 .name = "Macronix MX29LV040C", 1141 1157 .devtypes = CFI_DEVICETYPE_X8, ··· 1147 1163 ERASEINFO(0x10000,8), 1148 1164 } 1149 1165 }, { 1150 - .mfr_id = MANUFACTURER_MACRONIX, 1166 + .mfr_id = CFI_MFR_MACRONIX, 1151 1167 .dev_id = MX29LV160T, 1152 1168 .name = "MXIC MX29LV160T", 1153 1169 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1162 1178 ERASEINFO(0x04000,1) 1163 1179 } 1164 1180 }, { 1165 - .mfr_id = MANUFACTURER_NEC, 1181 + .mfr_id = CFI_MFR_NEC, 1166 1182 .dev_id = UPD29F064115, 1167 1183 .name = "NEC uPD29F064115", 1168 1184 .devtypes = CFI_DEVICETYPE_X16, ··· 1176 1192 ERASEINFO(0x2000,8), 1177 1193 } 1178 1194 }, { 1179 - .mfr_id = MANUFACTURER_MACRONIX, 1195 + .mfr_id = CFI_MFR_MACRONIX, 1180 1196 .dev_id = MX29LV160B, 1181 1197 .name = "MXIC MX29LV160B", 1182 1198 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1191 1207 ERASEINFO(0x10000,31) 1192 1208 } 1193 1209 }, { 1194 - .mfr_id = MANUFACTURER_MACRONIX, 1210 + .mfr_id = CFI_MFR_MACRONIX, 1195 1211 .dev_id = MX29F040, 1196 1212 .name = "Macronix MX29F040", 1197 1213 .devtypes = CFI_DEVICETYPE_X8, ··· 1203 1219 ERASEINFO(0x10000,8), 1204 1220 } 1205 1221 }, { 1206 - .mfr_id = MANUFACTURER_MACRONIX, 1222 + .mfr_id = CFI_MFR_MACRONIX, 1207 1223 .dev_id = MX29F016, 1208 1224 .name = "Macronix MX29F016", 1209 1225 .devtypes = CFI_DEVICETYPE_X8, ··· 1215 1231 ERASEINFO(0x10000,32), 1216 1232 } 1217 1233 }, { 1218 - .mfr_id = MANUFACTURER_MACRONIX, 1234 + .mfr_id = CFI_MFR_MACRONIX, 1219 1235 .dev_id = MX29F004T, 1220 1236 .name = "Macronix MX29F004T", 1221 1237 .devtypes = CFI_DEVICETYPE_X8, ··· 1230 1246 ERASEINFO(0x04000,1), 1231 1247 } 1232 1248 }, { 1233 - .mfr_id = MANUFACTURER_MACRONIX, 1249 + .mfr_id = CFI_MFR_MACRONIX, 1234 1250 .dev_id = MX29F004B, 1235 1251 .name = "Macronix MX29F004B", 1236 1252 .devtypes = CFI_DEVICETYPE_X8, ··· 1245 1261 ERASEINFO(0x10000,7), 1246 1262 } 1247 1263 }, { 1248 - .mfr_id = MANUFACTURER_MACRONIX, 1264 + .mfr_id = CFI_MFR_MACRONIX, 1249 1265 .dev_id = MX29F002T, 1250 1266 .name = "Macronix MX29F002T", 1251 1267 .devtypes = CFI_DEVICETYPE_X8, ··· 1260 1276 ERASEINFO(0x04000,1), 1261 1277 } 1262 1278 }, { 1263 - .mfr_id = MANUFACTURER_PMC, 1279 + .mfr_id = CFI_MFR_PMC, 1264 1280 .dev_id = PM49FL002, 1265 1281 .name = "PMC Pm49FL002", 1266 1282 .devtypes = CFI_DEVICETYPE_X8, ··· 1272 1288 ERASEINFO( 0x01000, 64 ) 1273 1289 } 1274 1290 }, { 1275 - .mfr_id = MANUFACTURER_PMC, 1291 + .mfr_id = CFI_MFR_PMC, 1276 1292 .dev_id = PM49FL004, 1277 1293 .name = "PMC Pm49FL004", 1278 1294 .devtypes = CFI_DEVICETYPE_X8, ··· 1284 1300 ERASEINFO( 0x01000, 128 ) 1285 1301 } 1286 1302 }, { 1287 - .mfr_id = MANUFACTURER_PMC, 1303 + .mfr_id = CFI_MFR_PMC, 1288 1304 .dev_id = PM49FL008, 1289 1305 .name = "PMC Pm49FL008", 1290 1306 .devtypes = CFI_DEVICETYPE_X8, ··· 1296 1312 ERASEINFO( 0x01000, 256 ) 1297 1313 } 1298 1314 }, { 1299 - .mfr_id = MANUFACTURER_SHARP, 1315 + .mfr_id = CFI_MFR_SHARP, 1300 1316 .dev_id = LH28F640BF, 1301 1317 .name = "LH28F640BF", 1302 1318 .devtypes = CFI_DEVICETYPE_X8, ··· 1308 1324 ERASEINFO(0x40000,16), 1309 1325 } 1310 1326 }, { 1311 - .mfr_id = MANUFACTURER_SST, 1327 + .mfr_id = CFI_MFR_SST, 1312 1328 .dev_id = SST39LF512, 1313 1329 .name = "SST 39LF512", 1314 1330 .devtypes = CFI_DEVICETYPE_X8, ··· 1320 1336 ERASEINFO(0x01000,16), 1321 1337 } 1322 1338 }, { 1323 - .mfr_id = MANUFACTURER_SST, 1339 + .mfr_id = CFI_MFR_SST, 1324 1340 .dev_id = SST39LF010, 1325 1341 .name = "SST 39LF010", 1326 1342 .devtypes = CFI_DEVICETYPE_X8, ··· 1332 1348 ERASEINFO(0x01000,32), 1333 1349 } 1334 1350 }, { 1335 - .mfr_id = MANUFACTURER_SST, 1336 - .dev_id = SST29EE020, 1351 + .mfr_id = CFI_MFR_SST, 1352 + .dev_id = SST29EE020, 1337 1353 .name = "SST 29EE020", 1338 1354 .devtypes = CFI_DEVICETYPE_X8, 1339 1355 .uaddr = MTD_UADDR_0x5555_0x2AAA, ··· 1343 1359 .regions = {ERASEINFO(0x01000,64), 1344 1360 } 1345 1361 }, { 1346 - .mfr_id = MANUFACTURER_SST, 1362 + .mfr_id = CFI_MFR_SST, 1347 1363 .dev_id = SST29LE020, 1348 - .name = "SST 29LE020", 1364 + .name = "SST 29LE020", 1349 1365 .devtypes = CFI_DEVICETYPE_X8, 1350 1366 .uaddr = MTD_UADDR_0x5555_0x2AAA, 1351 1367 .dev_size = SIZE_256KiB, ··· 1354 1370 .regions = {ERASEINFO(0x01000,64), 1355 1371 } 1356 1372 }, { 1357 - .mfr_id = MANUFACTURER_SST, 1373 + .mfr_id = CFI_MFR_SST, 1358 1374 .dev_id = SST39LF020, 1359 1375 .name = "SST 39LF020", 1360 1376 .devtypes = CFI_DEVICETYPE_X8, ··· 1366 1382 ERASEINFO(0x01000,64), 1367 1383 } 1368 1384 }, { 1369 - .mfr_id = MANUFACTURER_SST, 1385 + .mfr_id = CFI_MFR_SST, 1370 1386 .dev_id = SST39LF040, 1371 1387 .name = "SST 39LF040", 1372 1388 .devtypes = CFI_DEVICETYPE_X8, ··· 1378 1394 ERASEINFO(0x01000,128), 1379 1395 } 1380 1396 }, { 1381 - .mfr_id = MANUFACTURER_SST, 1397 + .mfr_id = CFI_MFR_SST, 1382 1398 .dev_id = SST39SF010A, 1383 1399 .name = "SST 39SF010A", 1384 1400 .devtypes = CFI_DEVICETYPE_X8, ··· 1390 1406 ERASEINFO(0x01000,32), 1391 1407 } 1392 1408 }, { 1393 - .mfr_id = MANUFACTURER_SST, 1409 + .mfr_id = CFI_MFR_SST, 1394 1410 .dev_id = SST39SF020A, 1395 1411 .name = "SST 39SF020A", 1396 1412 .devtypes = CFI_DEVICETYPE_X8, ··· 1402 1418 ERASEINFO(0x01000,64), 1403 1419 } 1404 1420 }, { 1405 - .mfr_id = MANUFACTURER_SST, 1421 + .mfr_id = CFI_MFR_SST, 1406 1422 .dev_id = SST39SF040, 1407 1423 .name = "SST 39SF040", 1408 1424 .devtypes = CFI_DEVICETYPE_X8, ··· 1414 1430 ERASEINFO(0x01000,128), 1415 1431 } 1416 1432 }, { 1417 - .mfr_id = MANUFACTURER_SST, 1433 + .mfr_id = CFI_MFR_SST, 1418 1434 .dev_id = SST49LF040B, 1419 1435 .name = "SST 49LF040B", 1420 1436 .devtypes = CFI_DEVICETYPE_X8, ··· 1427 1443 } 1428 1444 }, { 1429 1445 1430 - .mfr_id = MANUFACTURER_SST, 1446 + .mfr_id = CFI_MFR_SST, 1431 1447 .dev_id = SST49LF004B, 1432 1448 .name = "SST 49LF004B", 1433 1449 .devtypes = CFI_DEVICETYPE_X8, ··· 1439 1455 ERASEINFO(0x01000,128), 1440 1456 } 1441 1457 }, { 1442 - .mfr_id = MANUFACTURER_SST, 1458 + .mfr_id = CFI_MFR_SST, 1443 1459 .dev_id = SST49LF008A, 1444 1460 .name = "SST 49LF008A", 1445 1461 .devtypes = CFI_DEVICETYPE_X8, ··· 1451 1467 ERASEINFO(0x01000,256), 1452 1468 } 1453 1469 }, { 1454 - .mfr_id = MANUFACTURER_SST, 1470 + .mfr_id = CFI_MFR_SST, 1455 1471 .dev_id = SST49LF030A, 1456 1472 .name = "SST 49LF030A", 1457 1473 .devtypes = CFI_DEVICETYPE_X8, ··· 1463 1479 ERASEINFO(0x01000,96), 1464 1480 } 1465 1481 }, { 1466 - .mfr_id = MANUFACTURER_SST, 1482 + .mfr_id = CFI_MFR_SST, 1467 1483 .dev_id = SST49LF040A, 1468 1484 .name = "SST 49LF040A", 1469 1485 .devtypes = CFI_DEVICETYPE_X8, ··· 1475 1491 ERASEINFO(0x01000,128), 1476 1492 } 1477 1493 }, { 1478 - .mfr_id = MANUFACTURER_SST, 1494 + .mfr_id = CFI_MFR_SST, 1479 1495 .dev_id = SST49LF080A, 1480 1496 .name = "SST 49LF080A", 1481 1497 .devtypes = CFI_DEVICETYPE_X8, ··· 1487 1503 ERASEINFO(0x01000,256), 1488 1504 } 1489 1505 }, { 1490 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1506 + .mfr_id = CFI_MFR_SST, /* should be CFI */ 1491 1507 .dev_id = SST39LF160, 1492 1508 .name = "SST 39LF160", 1493 1509 .devtypes = CFI_DEVICETYPE_X16, ··· 1500 1516 ERASEINFO(0x1000,256) 1501 1517 } 1502 1518 }, { 1503 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1519 + .mfr_id = CFI_MFR_SST, /* should be CFI */ 1504 1520 .dev_id = SST39VF1601, 1505 1521 .name = "SST 39VF1601", 1506 1522 .devtypes = CFI_DEVICETYPE_X16, ··· 1513 1529 ERASEINFO(0x1000,256) 1514 1530 } 1515 1531 }, { 1516 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1532 + /* CFI is broken: reports AMD_STD, but needs custom uaddr */ 1533 + .mfr_id = CFI_MFR_SST, 1534 + .dev_id = SST39WF1601, 1535 + .name = "SST 39WF1601", 1536 + .devtypes = CFI_DEVICETYPE_X16, 1537 + .uaddr = MTD_UADDR_0xAAAA_0x5555, 1538 + .dev_size = SIZE_2MiB, 1539 + .cmd_set = P_ID_AMD_STD, 1540 + .nr_regions = 2, 1541 + .regions = { 1542 + ERASEINFO(0x1000,256), 1543 + ERASEINFO(0x1000,256) 1544 + } 1545 + }, { 1546 + /* CFI is broken: reports AMD_STD, but needs custom uaddr */ 1547 + .mfr_id = CFI_MFR_SST, 1548 + .dev_id = SST39WF1602, 1549 + .name = "SST 39WF1602", 1550 + .devtypes = CFI_DEVICETYPE_X16, 1551 + .uaddr = MTD_UADDR_0xAAAA_0x5555, 1552 + .dev_size = SIZE_2MiB, 1553 + .cmd_set = P_ID_AMD_STD, 1554 + .nr_regions = 2, 1555 + .regions = { 1556 + ERASEINFO(0x1000,256), 1557 + ERASEINFO(0x1000,256) 1558 + } 1559 + }, { 1560 + .mfr_id = CFI_MFR_SST, /* should be CFI */ 1517 1561 .dev_id = SST39VF3201, 1518 1562 .name = "SST 39VF3201", 1519 1563 .devtypes = CFI_DEVICETYPE_X16, ··· 1556 1544 ERASEINFO(0x1000,256) 1557 1545 } 1558 1546 }, { 1559 - .mfr_id = MANUFACTURER_SST, 1547 + .mfr_id = CFI_MFR_SST, 1560 1548 .dev_id = SST36VF3203, 1561 1549 .name = "SST 36VF3203", 1562 1550 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1568 1556 ERASEINFO(0x10000,64), 1569 1557 } 1570 1558 }, { 1571 - .mfr_id = MANUFACTURER_ST, 1559 + .mfr_id = CFI_MFR_ST, 1572 1560 .dev_id = M29F800AB, 1573 1561 .name = "ST M29F800AB", 1574 1562 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1583 1571 ERASEINFO(0x10000,15), 1584 1572 } 1585 1573 }, { 1586 - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1574 + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ 1587 1575 .dev_id = M29W800DT, 1588 1576 .name = "ST M29W800DT", 1589 1577 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1598 1586 ERASEINFO(0x04000,1) 1599 1587 } 1600 1588 }, { 1601 - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1589 + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ 1602 1590 .dev_id = M29W800DB, 1603 1591 .name = "ST M29W800DB", 1604 1592 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1613 1601 ERASEINFO(0x10000,15) 1614 1602 } 1615 1603 }, { 1616 - .mfr_id = MANUFACTURER_ST, 1604 + .mfr_id = CFI_MFR_ST, 1617 1605 .dev_id = M29W400DT, 1618 1606 .name = "ST M29W400DT", 1619 1607 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1628 1616 ERASEINFO(0x10000,1) 1629 1617 } 1630 1618 }, { 1631 - .mfr_id = MANUFACTURER_ST, 1619 + .mfr_id = CFI_MFR_ST, 1632 1620 .dev_id = M29W400DB, 1633 1621 .name = "ST M29W400DB", 1634 1622 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1643 1631 ERASEINFO(0x10000,7) 1644 1632 } 1645 1633 }, { 1646 - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1634 + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ 1647 1635 .dev_id = M29W160DT, 1648 1636 .name = "ST M29W160DT", 1649 1637 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1658 1646 ERASEINFO(0x04000,1) 1659 1647 } 1660 1648 }, { 1661 - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1649 + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ 1662 1650 .dev_id = M29W160DB, 1663 1651 .name = "ST M29W160DB", 1664 1652 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1673 1661 ERASEINFO(0x10000,31) 1674 1662 } 1675 1663 }, { 1676 - .mfr_id = MANUFACTURER_ST, 1664 + .mfr_id = CFI_MFR_ST, 1677 1665 .dev_id = M29W040B, 1678 1666 .name = "ST M29W040B", 1679 1667 .devtypes = CFI_DEVICETYPE_X8, ··· 1685 1673 ERASEINFO(0x10000,8), 1686 1674 } 1687 1675 }, { 1688 - .mfr_id = MANUFACTURER_ST, 1676 + .mfr_id = CFI_MFR_ST, 1689 1677 .dev_id = M50FW040, 1690 1678 .name = "ST M50FW040", 1691 1679 .devtypes = CFI_DEVICETYPE_X8, ··· 1697 1685 ERASEINFO(0x10000,8), 1698 1686 } 1699 1687 }, { 1700 - .mfr_id = MANUFACTURER_ST, 1688 + .mfr_id = CFI_MFR_ST, 1701 1689 .dev_id = M50FW080, 1702 1690 .name = "ST M50FW080", 1703 1691 .devtypes = CFI_DEVICETYPE_X8, ··· 1709 1697 ERASEINFO(0x10000,16), 1710 1698 } 1711 1699 }, { 1712 - .mfr_id = MANUFACTURER_ST, 1700 + .mfr_id = CFI_MFR_ST, 1713 1701 .dev_id = M50FW016, 1714 1702 .name = "ST M50FW016", 1715 1703 .devtypes = CFI_DEVICETYPE_X8, ··· 1721 1709 ERASEINFO(0x10000,32), 1722 1710 } 1723 1711 }, { 1724 - .mfr_id = MANUFACTURER_ST, 1712 + .mfr_id = CFI_MFR_ST, 1725 1713 .dev_id = M50LPW080, 1726 1714 .name = "ST M50LPW080", 1727 1715 .devtypes = CFI_DEVICETYPE_X8, ··· 1733 1721 ERASEINFO(0x10000,16), 1734 1722 }, 1735 1723 }, { 1736 - .mfr_id = MANUFACTURER_ST, 1724 + .mfr_id = CFI_MFR_ST, 1737 1725 .dev_id = M50FLW080A, 1738 1726 .name = "ST M50FLW080A", 1739 1727 .devtypes = CFI_DEVICETYPE_X8, ··· 1748 1736 ERASEINFO(0x1000,16), 1749 1737 } 1750 1738 }, { 1751 - .mfr_id = MANUFACTURER_ST, 1739 + .mfr_id = CFI_MFR_ST, 1752 1740 .dev_id = M50FLW080B, 1753 1741 .name = "ST M50FLW080B", 1754 1742 .devtypes = CFI_DEVICETYPE_X8, ··· 1763 1751 ERASEINFO(0x1000,16), 1764 1752 } 1765 1753 }, { 1766 - .mfr_id = 0xff00 | MANUFACTURER_ST, 1754 + .mfr_id = 0xff00 | CFI_MFR_ST, 1767 1755 .dev_id = 0xff00 | PSD4256G6V, 1768 1756 .name = "ST PSD4256G6V", 1769 1757 .devtypes = CFI_DEVICETYPE_X16, ··· 1775 1763 ERASEINFO(0x10000,16), 1776 1764 } 1777 1765 }, { 1778 - .mfr_id = MANUFACTURER_TOSHIBA, 1766 + .mfr_id = CFI_MFR_TOSHIBA, 1779 1767 .dev_id = TC58FVT160, 1780 1768 .name = "Toshiba TC58FVT160", 1781 1769 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1790 1778 ERASEINFO(0x04000,1) 1791 1779 } 1792 1780 }, { 1793 - .mfr_id = MANUFACTURER_TOSHIBA, 1781 + .mfr_id = CFI_MFR_TOSHIBA, 1794 1782 .dev_id = TC58FVB160, 1795 1783 .name = "Toshiba TC58FVB160", 1796 1784 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1805 1793 ERASEINFO(0x10000,31) 1806 1794 } 1807 1795 }, { 1808 - .mfr_id = MANUFACTURER_TOSHIBA, 1796 + .mfr_id = CFI_MFR_TOSHIBA, 1809 1797 .dev_id = TC58FVB321, 1810 1798 .name = "Toshiba TC58FVB321", 1811 1799 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1818 1806 ERASEINFO(0x10000,63) 1819 1807 } 1820 1808 }, { 1821 - .mfr_id = MANUFACTURER_TOSHIBA, 1809 + .mfr_id = CFI_MFR_TOSHIBA, 1822 1810 .dev_id = TC58FVT321, 1823 1811 .name = "Toshiba TC58FVT321", 1824 1812 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1831 1819 ERASEINFO(0x02000,8) 1832 1820 } 1833 1821 }, { 1834 - .mfr_id = MANUFACTURER_TOSHIBA, 1822 + .mfr_id = CFI_MFR_TOSHIBA, 1835 1823 .dev_id = TC58FVB641, 1836 1824 .name = "Toshiba TC58FVB641", 1837 1825 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1844 1832 ERASEINFO(0x10000,127) 1845 1833 } 1846 1834 }, { 1847 - .mfr_id = MANUFACTURER_TOSHIBA, 1835 + .mfr_id = CFI_MFR_TOSHIBA, 1848 1836 .dev_id = TC58FVT641, 1849 1837 .name = "Toshiba TC58FVT641", 1850 1838 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, ··· 1857 1845 ERASEINFO(0x02000,8) 1858 1846 } 1859 1847 }, { 1860 - .mfr_id = MANUFACTURER_WINBOND, 1848 + .mfr_id = CFI_MFR_WINBOND, 1861 1849 .dev_id = W49V002A, 1862 1850 .name = "Winbond W49V002A", 1863 1851 .devtypes = CFI_DEVICETYPE_X8, ··· 1890 1878 mask = (1 << (cfi->device_type * 8)) - 1; 1891 1879 result = map_read(map, base + ofs); 1892 1880 bank++; 1893 - } while ((result.x[0] & mask) == CONTINUATION_CODE); 1881 + } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); 1894 1882 1895 1883 return result.x[0] & mask; 1896 1884 } ··· 1981 1969 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; 1982 1970 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; 1983 1971 1984 - return 1; /* ok */ 1972 + return 1; /* ok */ 1985 1973 } 1986 1974 1987 1975
+1 -1
drivers/mtd/devices/Makefile
··· 1 1 # 2 - # linux/drivers/devices/Makefile 2 + # linux/drivers/mtd/devices/Makefile 3 3 # 4 4 5 5 obj-$(CONFIG_MTD_DOC2000) += doc2000.o
+1 -3
drivers/mtd/devices/block2mtd.c
··· 276 276 277 277 /* Setup the MTD structure */ 278 278 /* make the name contain the block device in */ 279 - name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1, 280 - GFP_KERNEL); 279 + name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname); 281 280 if (!name) 282 281 goto devinit_err; 283 282 284 - sprintf(name, "block2mtd: %s", devname); 285 283 dev->mtd.name = name; 286 284 287 285 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+2 -2
drivers/mtd/devices/pmc551.c
··· 668 668 { 669 669 struct pci_dev *PCI_Device = NULL; 670 670 struct mypriv *priv; 671 - int count, found = 0; 671 + int found = 0; 672 672 struct mtd_info *mtd; 673 673 u32 length = 0; 674 674 ··· 695 695 /* 696 696 * PCU-bus chipset probe. 697 697 */ 698 - for (count = 0; count < MAX_MTD_DEVICES; count++) { 698 + for (;;) { 699 699 700 700 if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, 701 701 PCI_DEVICE_ID_V3_SEMI_V370PDC,
+33 -35
drivers/mtd/devices/sst25l.c
··· 73 73 74 74 static int sst25l_status(struct sst25l_flash *flash, int *status) 75 75 { 76 - unsigned char command, response; 76 + struct spi_message m; 77 + struct spi_transfer t; 78 + unsigned char cmd_resp[2]; 77 79 int err; 78 80 79 - command = SST25L_CMD_RDSR; 80 - err = spi_write_then_read(flash->spi, &command, 1, &response, 1); 81 + spi_message_init(&m); 82 + memset(&t, 0, sizeof(struct spi_transfer)); 83 + 84 + cmd_resp[0] = SST25L_CMD_RDSR; 85 + cmd_resp[1] = 0xff; 86 + t.tx_buf = cmd_resp; 87 + t.rx_buf = cmd_resp; 88 + t.len = sizeof(cmd_resp); 89 + spi_message_add_tail(&t, &m); 90 + err = spi_sync(flash->spi, &m); 81 91 if (err < 0) 82 92 return err; 83 93 84 - *status = response; 94 + *status = cmd_resp[1]; 85 95 return 0; 86 96 } 87 97 ··· 338 328 static struct flash_info *__init sst25l_match_device(struct spi_device *spi) 339 329 { 340 330 struct flash_info *flash_info = NULL; 341 - unsigned char command[4], response; 331 + struct spi_message m; 332 + struct spi_transfer t; 333 + unsigned char cmd_resp[6]; 342 334 int i, err; 343 335 uint16_t id; 344 336 345 - command[0] = SST25L_CMD_READ_ID; 346 - command[1] = 0; 347 - command[2] = 0; 348 - command[3] = 0; 349 - err = spi_write_then_read(spi, command, sizeof(command), &response, 1); 337 + spi_message_init(&m); 338 + memset(&t, 0, sizeof(struct spi_transfer)); 339 + 340 + cmd_resp[0] = SST25L_CMD_READ_ID; 341 + cmd_resp[1] = 0; 342 + cmd_resp[2] = 0; 343 + cmd_resp[3] = 0; 344 + cmd_resp[4] = 0xff; 345 + cmd_resp[5] = 0xff; 346 + t.tx_buf = cmd_resp; 347 + t.rx_buf = cmd_resp; 348 + t.len = sizeof(cmd_resp); 349 + spi_message_add_tail(&t, &m); 350 + err = spi_sync(spi, &m); 350 351 if (err < 0) { 351 - dev_err(&spi->dev, "error reading device id msb\n"); 352 + dev_err(&spi->dev, "error reading device id\n"); 352 353 return NULL; 353 354 } 354 355 355 - id = response << 8; 356 - 357 - command[0] = SST25L_CMD_READ_ID; 358 - command[1] = 0; 359 - command[2] = 0; 360 - command[3] = 1; 361 - err = spi_write_then_read(spi, command, sizeof(command), &response, 1); 362 - if (err < 0) { 363 - dev_err(&spi->dev, "error reading device id lsb\n"); 364 - return NULL; 365 - } 366 - 367 - id |= response; 356 + id = (cmd_resp[4] << 8) | cmd_resp[5]; 368 357 369 358 for (i = 0; i < ARRAY_SIZE(sst25l_flash_info); i++) 370 359 if (sst25l_flash_info[i].device_id == id) ··· 419 410 (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), 420 411 flash->mtd.erasesize, flash->mtd.erasesize / 1024, 421 412 flash->mtd.numeraseregions); 422 - 423 - if (flash->mtd.numeraseregions) 424 - for (i = 0; i < flash->mtd.numeraseregions; i++) 425 - DEBUG(MTD_DEBUG_LEVEL2, 426 - "mtd.eraseregions[%d] = { .offset = 0x%llx, " 427 - ".erasesize = 0x%.8x (%uKiB), " 428 - ".numblocks = %d }\n", 429 - i, (long long)flash->mtd.eraseregions[i].offset, 430 - flash->mtd.eraseregions[i].erasesize, 431 - flash->mtd.eraseregions[i].erasesize / 1024, 432 - flash->mtd.eraseregions[i].numblocks); 433 413 434 414 if (mtd_has_partitions()) { 435 415 struct mtd_partition *parts = NULL;
-1
drivers/mtd/ftl.c
··· 1082 1082 { 1083 1083 del_mtd_blktrans_dev(dev); 1084 1084 ftl_freepart((partition_t *)dev); 1085 - kfree(dev); 1086 1085 } 1087 1086 1088 1087 static struct mtd_blktrans_ops ftl_tr = {
-1
drivers/mtd/inftlcore.c
··· 139 139 140 140 kfree(inftl->PUtable); 141 141 kfree(inftl->VUtable); 142 - kfree(inftl); 143 142 } 144 143 145 144 /*
+4 -3
drivers/mtd/inftlmount.c
··· 100 100 } 101 101 102 102 /* To be safer with BIOS, also use erase mark as discriminant */ 103 - if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize + 104 - SECTORSIZE + 8, 8, &retlen, 105 - (char *)&h1) < 0)) { 103 + ret = inftl_read_oob(mtd, 104 + block * inftl->EraseSize + SECTORSIZE + 8, 105 + 8, &retlen,(char *)&h1); 106 + if (ret < 0) { 106 107 printk(KERN_WARNING "INFTL: ANAND header found at " 107 108 "0x%x in mtd%d, but OOB data read failed " 108 109 "(err %d)\n", block * inftl->EraseSize,
+39 -40
drivers/mtd/lpddr/lpddr_cmds.c
··· 107 107 /* those should be reset too since 108 108 they create memory references. */ 109 109 init_waitqueue_head(&chip->wq); 110 - spin_lock_init(&chip->_spinlock); 111 - chip->mutex = &chip->_spinlock; 110 + mutex_init(&chip->mutex); 112 111 chip++; 113 112 } 114 113 } ··· 143 144 } 144 145 145 146 /* OK Still waiting. Drop the lock, wait a while and retry. */ 146 - spin_unlock(chip->mutex); 147 + mutex_unlock(&chip->mutex); 147 148 if (sleep_time >= 1000000/HZ) { 148 149 /* 149 150 * Half of the normal delay still remaining ··· 158 159 cond_resched(); 159 160 timeo--; 160 161 } 161 - spin_lock(chip->mutex); 162 + mutex_lock(&chip->mutex); 162 163 163 164 while (chip->state != chip_state) { 164 165 /* Someone's suspended the operation: sleep */ 165 166 DECLARE_WAITQUEUE(wait, current); 166 167 set_current_state(TASK_UNINTERRUPTIBLE); 167 168 add_wait_queue(&chip->wq, &wait); 168 - spin_unlock(chip->mutex); 169 + mutex_unlock(&chip->mutex); 169 170 schedule(); 170 171 remove_wait_queue(&chip->wq, &wait); 171 - spin_lock(chip->mutex); 172 + mutex_lock(&chip->mutex); 172 173 } 173 174 if (chip->erase_suspended || chip->write_suspended) { 174 175 /* Suspend has occured while sleep: reset timeout */ ··· 229 230 * it'll happily send us to sleep. In any case, when 230 231 * get_chip returns success we're clear to go ahead. 231 232 */ 232 - ret = spin_trylock(contender->mutex); 233 + ret = mutex_trylock(&contender->mutex); 233 234 spin_unlock(&shared->lock); 234 235 if (!ret) 235 236 goto retry; 236 - spin_unlock(chip->mutex); 237 + mutex_unlock(&chip->mutex); 237 238 ret = chip_ready(map, contender, mode); 238 - spin_lock(chip->mutex); 239 + mutex_lock(&chip->mutex); 239 240 240 241 if (ret == -EAGAIN) { 241 - spin_unlock(contender->mutex); 242 + mutex_unlock(&contender->mutex); 242 243 goto retry; 243 244 } 244 245 if (ret) { 245 - spin_unlock(contender->mutex); 246 + mutex_unlock(&contender->mutex); 246 247 return ret; 247 248 } 248 249 spin_lock(&shared->lock); ··· 251 252 * state. Put contender and retry. */ 252 253 if (chip->state == FL_SYNCING) { 253 254 put_chip(map, contender); 254 - spin_unlock(contender->mutex); 255 + mutex_unlock(&contender->mutex); 255 256 goto retry; 256 257 } 257 - spin_unlock(contender->mutex); 258 + mutex_unlock(&contender->mutex); 258 259 } 259 260 260 261 /* Check if we have suspended erase on this chip. ··· 264 265 spin_unlock(&shared->lock); 265 266 set_current_state(TASK_UNINTERRUPTIBLE); 266 267 add_wait_queue(&chip->wq, &wait); 267 - spin_unlock(chip->mutex); 268 + mutex_unlock(&chip->mutex); 268 269 schedule(); 269 270 remove_wait_queue(&chip->wq, &wait); 270 - spin_lock(chip->mutex); 271 + mutex_lock(&chip->mutex); 271 272 goto retry; 272 273 } 273 274 ··· 336 337 sleep: 337 338 set_current_state(TASK_UNINTERRUPTIBLE); 338 339 add_wait_queue(&chip->wq, &wait); 339 - spin_unlock(chip->mutex); 340 + mutex_unlock(&chip->mutex); 340 341 schedule(); 341 342 remove_wait_queue(&chip->wq, &wait); 342 - spin_lock(chip->mutex); 343 + mutex_lock(&chip->mutex); 343 344 return -EAGAIN; 344 345 } 345 346 } ··· 355 356 if (shared->writing && shared->writing != chip) { 356 357 /* give back the ownership */ 357 358 struct flchip *loaner = shared->writing; 358 - spin_lock(loaner->mutex); 359 + mutex_lock(&loaner->mutex); 359 360 spin_unlock(&shared->lock); 360 - spin_unlock(chip->mutex); 361 + mutex_unlock(&chip->mutex); 361 362 put_chip(map, loaner); 362 - spin_lock(chip->mutex); 363 - spin_unlock(loaner->mutex); 363 + mutex_lock(&chip->mutex); 364 + mutex_unlock(&loaner->mutex); 364 365 wake_up(&chip->wq); 365 366 return; 366 367 } ··· 413 414 414 415 wbufsize = 1 << lpddr->qinfo->BufSizeShift; 415 416 416 - spin_lock(chip->mutex); 417 + mutex_lock(&chip->mutex); 417 418 ret = get_chip(map, chip, FL_WRITING); 418 419 if (ret) { 419 - spin_unlock(chip->mutex); 420 + mutex_unlock(&chip->mutex); 420 421 return ret; 421 422 } 422 423 /* Figure out the number of words to write */ ··· 477 478 } 478 479 479 480 out: put_chip(map, chip); 480 - spin_unlock(chip->mutex); 481 + mutex_unlock(&chip->mutex); 481 482 return ret; 482 483 } 483 484 ··· 489 490 struct flchip *chip = &lpddr->chips[chipnum]; 490 491 int ret; 491 492 492 - spin_lock(chip->mutex); 493 + mutex_lock(&chip->mutex); 493 494 ret = get_chip(map, chip, FL_ERASING); 494 495 if (ret) { 495 - spin_unlock(chip->mutex); 496 + mutex_unlock(&chip->mutex); 496 497 return ret; 497 498 } 498 499 send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); ··· 504 505 goto out; 505 506 } 506 507 out: put_chip(map, chip); 507 - spin_unlock(chip->mutex); 508 + mutex_unlock(&chip->mutex); 508 509 return ret; 509 510 } 510 511 ··· 517 518 struct flchip *chip = &lpddr->chips[chipnum]; 518 519 int ret = 0; 519 520 520 - spin_lock(chip->mutex); 521 + mutex_lock(&chip->mutex); 521 522 ret = get_chip(map, chip, FL_READY); 522 523 if (ret) { 523 - spin_unlock(chip->mutex); 524 + mutex_unlock(&chip->mutex); 524 525 return ret; 525 526 } 526 527 ··· 528 529 *retlen = len; 529 530 530 531 put_chip(map, chip); 531 - spin_unlock(chip->mutex); 532 + mutex_unlock(&chip->mutex); 532 533 return ret; 533 534 } 534 535 ··· 568 569 else 569 570 thislen = len; 570 571 /* get the chip */ 571 - spin_lock(chip->mutex); 572 + mutex_lock(&chip->mutex); 572 573 ret = get_chip(map, chip, FL_POINT); 573 - spin_unlock(chip->mutex); 574 + mutex_unlock(&chip->mutex); 574 575 if (ret) 575 576 break; 576 577 ··· 610 611 else 611 612 thislen = len; 612 613 613 - spin_lock(chip->mutex); 614 + mutex_lock(&chip->mutex); 614 615 if (chip->state == FL_POINT) { 615 616 chip->ref_point_counter--; 616 617 if (chip->ref_point_counter == 0) ··· 620 621 "pointed region\n", map->name); 621 622 622 623 put_chip(map, chip); 623 - spin_unlock(chip->mutex); 624 + mutex_unlock(&chip->mutex); 624 625 625 626 len -= thislen; 626 627 ofs = 0; ··· 726 727 int chipnum = adr >> lpddr->chipshift; 727 728 struct flchip *chip = &lpddr->chips[chipnum]; 728 729 729 - spin_lock(chip->mutex); 730 + mutex_lock(&chip->mutex); 730 731 ret = get_chip(map, chip, FL_LOCKING); 731 732 if (ret) { 732 - spin_unlock(chip->mutex); 733 + mutex_unlock(&chip->mutex); 733 734 return ret; 734 735 } 735 736 ··· 749 750 goto out; 750 751 } 751 752 out: put_chip(map, chip); 752 - spin_unlock(chip->mutex); 753 + mutex_unlock(&chip->mutex); 753 754 return ret; 754 755 } 755 756 ··· 770 771 int chipnum = adr >> lpddr->chipshift; 771 772 struct flchip *chip = &lpddr->chips[chipnum]; 772 773 773 - spin_lock(chip->mutex); 774 + mutex_lock(&chip->mutex); 774 775 ret = get_chip(map, chip, FL_WRITING); 775 776 if (ret) { 776 - spin_unlock(chip->mutex); 777 + mutex_unlock(&chip->mutex); 777 778 return ret; 778 779 } 779 780 ··· 787 788 } 788 789 789 790 out: put_chip(map, chip); 790 - spin_unlock(chip->mutex); 791 + mutex_unlock(&chip->mutex); 791 792 return ret; 792 793 } 793 794
+2 -5
drivers/mtd/lpddr/qinfo_probe.c
··· 134 134 static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr) 135 135 { 136 136 137 - lpddr->qinfo = kmalloc(sizeof(struct qinfo_chip), GFP_KERNEL); 137 + lpddr->qinfo = kzalloc(sizeof(struct qinfo_chip), GFP_KERNEL); 138 138 if (!lpddr->qinfo) { 139 139 printk(KERN_WARNING "%s: no memory for LPDDR qinfo structure\n", 140 140 map->name); 141 141 return 0; 142 142 } 143 - memset(lpddr->qinfo, 0, sizeof(struct qinfo_chip)); 144 143 145 144 /* Get the ManuID */ 146 145 lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); ··· 184 185 lpddr.numchips = 1; 185 186 186 187 numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; 187 - retlpddr = kmalloc(sizeof(struct lpddr_private) + 188 + retlpddr = kzalloc(sizeof(struct lpddr_private) + 188 189 numvirtchips * sizeof(struct flchip), GFP_KERNEL); 189 190 if (!retlpddr) 190 191 return NULL; 191 192 192 - memset(retlpddr, 0, sizeof(struct lpddr_private) + 193 - numvirtchips * sizeof(struct flchip)); 194 193 memcpy(retlpddr, &lpddr, sizeof(struct lpddr_private)); 195 194 196 195 retlpddr->numchips = numvirtchips;
+1 -1
drivers/mtd/maps/Kconfig
··· 435 435 436 436 config MTD_PCMCIA 437 437 tristate "PCMCIA MTD driver" 438 - depends on PCMCIA && MTD_COMPLEX_MAPPINGS && BROKEN 438 + depends on PCMCIA && MTD_COMPLEX_MAPPINGS 439 439 help 440 440 Map driver for accessing PCMCIA linear flash memory cards. These 441 441 cards are usually around 4-16MiB in size. This does not include
+8 -8
drivers/mtd/maps/bfin-async-flash.c
··· 70 70 local_irq_restore(state->irq_flags); 71 71 } 72 72 73 - static map_word bfin_read(struct map_info *map, unsigned long ofs) 73 + static map_word bfin_flash_read(struct map_info *map, unsigned long ofs) 74 74 { 75 75 struct async_state *state = (struct async_state *)map->map_priv_1; 76 76 uint16_t word; ··· 86 86 return test; 87 87 } 88 88 89 - static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 89 + static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 90 90 { 91 91 struct async_state *state = (struct async_state *)map->map_priv_1; 92 92 ··· 97 97 switch_back(state); 98 98 } 99 99 100 - static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs) 100 + static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) 101 101 { 102 102 struct async_state *state = (struct async_state *)map->map_priv_1; 103 103 uint16_t d; ··· 112 112 switch_back(state); 113 113 } 114 114 115 - static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 115 + static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 116 116 { 117 117 struct async_state *state = (struct async_state *)map->map_priv_1; 118 118 ··· 141 141 return -ENOMEM; 142 142 143 143 state->map.name = DRIVER_NAME; 144 - state->map.read = bfin_read; 145 - state->map.copy_from = bfin_copy_from; 146 - state->map.write = bfin_write; 147 - state->map.copy_to = bfin_copy_to; 144 + state->map.read = bfin_flash_read; 145 + state->map.copy_from = bfin_flash_copy_from; 146 + state->map.write = bfin_flash_write; 147 + state->map.copy_to = bfin_flash_copy_to; 148 148 state->map.bankwidth = pdata->width; 149 149 state->map.size = memory->end - memory->start + 1; 150 150 state->map.virt = (void __iomem *)memory->start;
+1 -1
drivers/mtd/maps/ceiva.c
··· 253 253 254 254 static int __init clps_setup_flash(void) 255 255 { 256 - int nr; 256 + int nr = 0; 257 257 258 258 #ifdef CONFIG_ARCH_CEIVA 259 259 if (machine_is_ceiva()) {
+1 -2
drivers/mtd/maps/ixp2000.c
··· 165 165 return -EIO; 166 166 } 167 167 168 - info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); 168 + info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); 169 169 if(!info) { 170 170 err = -ENOMEM; 171 171 goto Error; 172 172 } 173 - memset(info, 0, sizeof(struct ixp2000_flash_info)); 174 173 175 174 platform_set_drvdata(dev, info); 176 175
+3 -4
drivers/mtd/maps/ixp4xx.c
··· 107 107 return; 108 108 109 109 if (from & 1) { 110 - *dest++ = BYTE1(flash_read16(src)); 111 - src++; 110 + *dest++ = BYTE1(flash_read16(src-1)); 111 + src++; 112 112 --len; 113 113 } 114 114 ··· 196 196 return err; 197 197 } 198 198 199 - info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); 199 + info = kzalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); 200 200 if(!info) { 201 201 err = -ENOMEM; 202 202 goto Error; 203 203 } 204 - memset(info, 0, sizeof(struct ixp4xx_flash_info)); 205 204 206 205 platform_set_drvdata(dev, info); 207 206
+50 -38
drivers/mtd/maps/pcmciamtd.c
··· 40 40 static const int debug = 0; 41 41 #endif 42 42 43 - #define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg) 44 43 #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) 45 - #define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg) 46 - 47 44 48 45 #define DRIVER_DESC "PCMCIA Flash memory card driver" 49 46 ··· 96 99 MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)"); 97 100 98 101 99 - /* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */ 102 + /* read/write{8,16} copy_{from,to} routines with window remapping 103 + * to access whole card 104 + */ 100 105 static caddr_t remap_window(struct map_info *map, unsigned long to) 101 106 { 102 107 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; ··· 135 136 return d; 136 137 137 138 d.x[0] = readb(addr); 138 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]); 139 + DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]); 139 140 return d; 140 141 } 141 142 ··· 150 151 return d; 151 152 152 153 d.x[0] = readw(addr); 153 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]); 154 + DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]); 154 155 return d; 155 156 } 156 157 ··· 160 161 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 161 162 unsigned long win_size = dev->win_size; 162 163 163 - DEBUG(3, "to = %p from = %lu len = %u", to, from, len); 164 + DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 164 165 while(len) { 165 166 int toread = win_size - (from & (win_size-1)); 166 167 caddr_t addr; ··· 188 189 if(!addr) 189 190 return; 190 191 191 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]); 192 + DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]); 192 193 writeb(d.x[0], addr); 193 194 } 194 195 ··· 199 200 if(!addr) 200 201 return; 201 202 202 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]); 203 + DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]); 203 204 writew(d.x[0], addr); 204 205 } 205 206 ··· 209 210 struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; 210 211 unsigned long win_size = dev->win_size; 211 212 212 - DEBUG(3, "to = %lu from = %p len = %u", to, from, len); 213 + DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 213 214 while(len) { 214 215 int towrite = win_size - (to & (win_size-1)); 215 216 caddr_t addr; ··· 243 244 return d; 244 245 245 246 d.x[0] = readb(win_base + ofs); 246 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]); 247 + DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", 248 + ofs, win_base + ofs, d.x[0]); 247 249 return d; 248 250 } 249 251 ··· 258 258 return d; 259 259 260 260 d.x[0] = readw(win_base + ofs); 261 - DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]); 261 + DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", 262 + ofs, win_base + ofs, d.x[0]); 262 263 return d; 263 264 } 264 265 ··· 271 270 if(DEV_REMOVED(map)) 272 271 return; 273 272 274 - DEBUG(3, "to = %p from = %lu len = %u", to, from, len); 273 + DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); 275 274 memcpy_fromio(to, win_base + from, len); 276 275 } 277 276 278 277 279 - static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr) 278 + static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) 280 279 { 281 280 caddr_t win_base = (caddr_t)map->map_priv_2; 282 281 283 282 if(DEV_REMOVED(map)) 284 283 return; 285 284 286 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d); 287 - writeb(d, win_base + adr); 285 + DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", 286 + adr, win_base + adr, d.x[0]); 287 + writeb(d.x[0], win_base + adr); 288 288 } 289 289 290 290 291 - static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr) 291 + static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) 292 292 { 293 293 caddr_t win_base = (caddr_t)map->map_priv_2; 294 294 295 295 if(DEV_REMOVED(map)) 296 296 return; 297 297 298 - DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d); 299 - writew(d, win_base + adr); 298 + DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", 299 + adr, win_base + adr, d.x[0]); 300 + writew(d.x[0], win_base + adr); 300 301 } 301 302 302 303 ··· 309 306 if(DEV_REMOVED(map)) 310 307 return; 311 308 312 - DEBUG(3, "to = %lu from = %p len = %u", to, from, len); 309 + DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); 313 310 memcpy_toio(win_base + to, from, len); 314 311 } 315 312 ··· 378 375 if (!pcmcia_parse_tuple(tuple, &parse)) { 379 376 cistpl_jedec_t *t = &parse.jedec; 380 377 for (i = 0; i < t->nid; i++) 381 - DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); 378 + DEBUG(2, "JEDEC: 0x%02x 0x%02x", 379 + t->id[i].mfr, t->id[i].info); 382 380 } 383 381 return -ENOSPC; 384 382 } ··· 435 431 } 436 432 437 433 438 - static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) 434 + static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev, int *new_name) 439 435 { 440 436 int i; 441 437 ··· 480 476 } 481 477 482 478 DEBUG(1, "Device: Size: %lu Width:%d Name: %s", 483 - dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name); 479 + dev->pcmcia_map.size, 480 + dev->pcmcia_map.bankwidth << 3, dev->mtd_name); 484 481 } 485 482 486 483 ··· 494 489 { 495 490 struct pcmciamtd_dev *dev = link->priv; 496 491 struct mtd_info *mtd = NULL; 497 - cs_status_t status; 498 492 win_req_t req; 499 493 int ret; 500 494 int i; ··· 517 513 if(setvpp == 1) 518 514 dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp; 519 515 520 - /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum 521 - that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the 522 - whole card - otherwise we try smaller windows until we succeed */ 516 + /* Request a memory window for PCMCIA. Some architeures can map windows 517 + * upto the maximum that PCMCIA can support (64MiB) - this is ideal and 518 + * we aim for a window the size of the whole card - otherwise we try 519 + * smaller windows until we succeed 520 + */ 523 521 524 522 req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE; 525 523 req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; ··· 549 543 DEBUG(2, "dev->win_size = %d", dev->win_size); 550 544 551 545 if(!dev->win_size) { 552 - err("Cant allocate memory window"); 546 + dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); 553 547 pcmciamtd_release(link); 554 548 return -ENODEV; 555 549 } ··· 559 553 DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win); 560 554 dev->win_base = ioremap(req.Base, req.Size); 561 555 if(!dev->win_base) { 562 - err("ioremap(%lu, %u) failed", req.Base, req.Size); 556 + dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n", 557 + req.Base, req.Size); 563 558 pcmciamtd_release(link); 564 559 return -ENODEV; 565 560 } ··· 571 564 dev->pcmcia_map.map_priv_1 = (unsigned long)dev; 572 565 dev->pcmcia_map.map_priv_2 = (unsigned long)link->win; 573 566 574 - dev->vpp = (vpp) ? vpp : link->socket.socket.Vpp; 567 + dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp; 575 568 link->conf.Attributes = 0; 576 569 if(setvpp == 2) { 577 570 link->conf.Vpp = dev->vpp; ··· 607 600 } 608 601 609 602 if(!mtd) { 610 - DEBUG(1, "Cant find an MTD"); 603 + DEBUG(1, "Can not find an MTD"); 611 604 pcmciamtd_release(link); 612 605 return -ENODEV; 613 606 } ··· 618 611 if(new_name) { 619 612 int size = 0; 620 613 char unit = ' '; 621 - /* Since we are using a default name, make it better by adding in the 622 - size */ 614 + /* Since we are using a default name, make it better by adding 615 + * in the size 616 + */ 623 617 if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */ 624 618 size = mtd->size >> 10; 625 619 unit = 'K'; ··· 650 642 if(add_mtd_device(mtd)) { 651 643 map_destroy(mtd); 652 644 dev->mtd_info = NULL; 653 - err("Couldnt register MTD device"); 645 + dev_err(&dev->p_dev->dev, 646 + "Could not register the MTD device\n"); 654 647 pcmciamtd_release(link); 655 648 return -ENODEV; 656 649 } 657 - info("mtd%d: %s", mtd->index, mtd->name); 650 + dev_info(&dev->p_dev->dev, "mtd%d: %s\n", mtd->index, mtd->name); 658 651 return 0; 659 652 660 - failed: 661 - err("CS Error, exiting"); 653 + dev_err(&dev->p_dev->dev, "CS Error, exiting\n"); 662 654 pcmciamtd_release(link); 663 655 return -ENODEV; 664 656 } ··· 697 689 698 690 if(dev->mtd_info) { 699 691 del_mtd_device(dev->mtd_info); 692 + dev_info(&dev->p_dev->dev, "mtd%d: Removing\n", 693 + dev->mtd_info->index); 700 694 map_destroy(dev->mtd_info); 701 - info("mtd%d: Removed", dev->mtd_info->index); 702 695 } 703 696 704 697 pcmciamtd_release(link); ··· 743 734 PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8), 744 735 PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c), 745 736 PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0), 737 + PCMCIA_DEVICE_PROD_ID123("M-Systems", "M-SYS Flash Memory Card", "(c) M-Systems", 0x7ed2ad87, 0x675dc3fb, 0x7aef3965), 738 + PCMCIA_DEVICE_PROD_ID12("PRETEC", " 2MB SRAM CARD", 0xebf91155, 0x805360ca), 746 739 PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b), 747 740 PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad), 741 + PCMCIA_DEVICE_PROD_ID12("SMART Modular Technologies", " 4MB FLASH Card", 0x96fd8277, 0x737a5b05), 748 742 PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca), 749 743 PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944), 750 744 /* the following was commented out in pcmcia-cs-3.2.7 */
+5 -2
drivers/mtd/maps/physmap.c
··· 264 264 265 265 err = platform_driver_register(&physmap_flash_driver); 266 266 #ifdef CONFIG_MTD_PHYSMAP_COMPAT 267 - if (err == 0) 268 - platform_device_register(&physmap_flash); 267 + if (err == 0) { 268 + err = platform_device_register(&physmap_flash); 269 + if (err) 270 + platform_driver_unregister(&physmap_flash_driver); 271 + } 269 272 #endif 270 273 271 274 return err;
+49 -6
drivers/mtd/maps/physmap_of.c
··· 173 173 } 174 174 } 175 175 176 + #ifdef CONFIG_MTD_PARTITIONS 177 + /* When partitions are set we look for a linux,part-probe property which 178 + specifies the list of partition probers to use. If none is given then the 179 + default is use. These take precedence over other device tree 180 + information. */ 181 + static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL }; 182 + static const char ** __devinit of_get_probes(struct device_node *dp) 183 + { 184 + const char *cp; 185 + int cplen; 186 + unsigned int l; 187 + unsigned int count; 188 + const char **res; 189 + 190 + cp = of_get_property(dp, "linux,part-probe", &cplen); 191 + if (cp == NULL) 192 + return part_probe_types_def; 193 + 194 + count = 0; 195 + for (l = 0; l != cplen; l++) 196 + if (cp[l] == 0) 197 + count++; 198 + 199 + res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL); 200 + count = 0; 201 + while (cplen > 0) { 202 + res[count] = cp; 203 + l = strlen(cp) + 1; 204 + cp += l; 205 + cplen -= l; 206 + count++; 207 + } 208 + return res; 209 + } 210 + 211 + static void __devinit of_free_probes(const char **probes) 212 + { 213 + if (probes != part_probe_types_def) 214 + kfree(probes); 215 + } 216 + #endif 217 + 176 218 static int __devinit of_flash_probe(struct of_device *dev, 177 219 const struct of_device_id *match) 178 220 { 179 221 #ifdef CONFIG_MTD_PARTITIONS 180 - static const char *part_probe_types[] 181 - = { "cmdlinepart", "RedBoot", NULL }; 222 + const char **part_probe_types; 182 223 #endif 183 224 struct device_node *dp = dev->node; 184 225 struct resource res; ··· 259 218 260 219 dev_set_drvdata(&dev->dev, info); 261 220 262 - mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); 221 + mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL); 263 222 if (!mtd_list) 264 223 goto err_flash_remove; 265 224 ··· 348 307 goto err_out; 349 308 350 309 #ifdef CONFIG_MTD_PARTITIONS 351 - /* First look for RedBoot table or partitions on the command 352 - * line, these take precedence over device tree information */ 310 + part_probe_types = of_get_probes(dp); 353 311 err = parse_mtd_partitions(info->cmtd, part_probe_types, 354 312 &info->parts, 0); 355 - if (err < 0) 313 + if (err < 0) { 314 + of_free_probes(part_probe_types); 356 315 return err; 316 + } 317 + of_free_probes(part_probe_types); 357 318 358 319 #ifdef CONFIG_MTD_OF_PARTS 359 320 if (err == 0) {
+7 -1
drivers/mtd/maps/pismo.c
··· 234 234 /* FIXME: set_vpp needs saner arguments */ 235 235 pismo_setvpp_remove_fix(pismo); 236 236 237 + i2c_set_clientdata(client, NULL); 237 238 kfree(pismo); 238 239 239 240 return 0; ··· 273 272 ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom)); 274 273 if (ret < 0) { 275 274 dev_err(&client->dev, "error reading EEPROM: %d\n", ret); 276 - return ret; 275 + goto exit_free; 277 276 } 278 277 279 278 dev_info(&client->dev, "%.15s board found\n", eeprom.board); ··· 284 283 pdata->cs_addrs[i]); 285 284 286 285 return 0; 286 + 287 + exit_free: 288 + i2c_set_clientdata(client, NULL); 289 + kfree(pismo); 290 + return ret; 287 291 } 288 292 289 293 static const struct i2c_device_id pismo_id[] = {
+1 -2
drivers/mtd/maps/pxa2xx-flash.c
··· 63 63 if (!res) 64 64 return -ENODEV; 65 65 66 - info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL); 66 + info = kzalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL); 67 67 if (!info) 68 68 return -ENOMEM; 69 69 70 - memset(info, 0, sizeof(struct pxa2xx_flash_info)); 71 70 info->map.name = (char *) flash->name; 72 71 info->map.bankwidth = flash->width; 73 72 info->map.phys = res->start;
+214 -115
drivers/mtd/mtd_blkdevs.c
··· 14 14 #include <linux/mtd/mtd.h> 15 15 #include <linux/blkdev.h> 16 16 #include <linux/blkpg.h> 17 - #include <linux/freezer.h> 18 17 #include <linux/spinlock.h> 19 18 #include <linux/hdreg.h> 20 19 #include <linux/init.h> ··· 24 25 #include "mtdcore.h" 25 26 26 27 static LIST_HEAD(blktrans_majors); 28 + static DEFINE_MUTEX(blktrans_ref_mutex); 27 29 28 - struct mtd_blkcore_priv { 29 - struct task_struct *thread; 30 - struct request_queue *rq; 31 - spinlock_t queue_lock; 32 - }; 30 + void blktrans_dev_release(struct kref *kref) 31 + { 32 + struct mtd_blktrans_dev *dev = 33 + container_of(kref, struct mtd_blktrans_dev, ref); 34 + 35 + dev->disk->private_data = NULL; 36 + blk_cleanup_queue(dev->rq); 37 + put_disk(dev->disk); 38 + list_del(&dev->list); 39 + kfree(dev); 40 + } 41 + 42 + static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk) 43 + { 44 + struct mtd_blktrans_dev *dev; 45 + 46 + mutex_lock(&blktrans_ref_mutex); 47 + dev = disk->private_data; 48 + 49 + if (!dev) 50 + goto unlock; 51 + kref_get(&dev->ref); 52 + unlock: 53 + mutex_unlock(&blktrans_ref_mutex); 54 + return dev; 55 + } 56 + 57 + void blktrans_dev_put(struct mtd_blktrans_dev *dev) 58 + { 59 + mutex_lock(&blktrans_ref_mutex); 60 + kref_put(&dev->ref, blktrans_dev_release); 61 + mutex_unlock(&blktrans_ref_mutex); 62 + } 63 + 33 64 34 65 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 35 66 struct mtd_blktrans_dev *dev, ··· 90 61 return -EIO; 91 62 rq_flush_dcache_pages(req); 92 63 return 0; 93 - 94 64 case WRITE: 95 65 if (!tr->writesect) 96 66 return -EIO; ··· 99 71 if (tr->writesect(dev, block, buf)) 100 72 return -EIO; 101 73 return 0; 102 - 103 74 default: 104 75 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 105 76 return -EIO; ··· 107 80 108 81 static int mtd_blktrans_thread(void *arg) 109 82 { 110 - struct mtd_blktrans_ops *tr = arg; 111 - struct request_queue *rq = tr->blkcore_priv->rq; 83 + struct mtd_blktrans_dev *dev = arg; 84 + struct request_queue *rq = dev->rq; 112 85 struct request *req = NULL; 113 86 114 87 spin_lock_irq(rq->queue_lock); 115 88 116 89 while (!kthread_should_stop()) { 117 - struct mtd_blktrans_dev *dev; 118 90 int res; 119 91 120 92 if (!req && !(req = blk_fetch_request(rq))) { ··· 124 98 continue; 125 99 } 126 100 127 - dev = req->rq_disk->private_data; 128 - tr = dev->tr; 129 - 130 101 spin_unlock_irq(rq->queue_lock); 131 102 132 103 mutex_lock(&dev->lock); 133 - res = do_blktrans_request(tr, dev, req); 104 + res = do_blktrans_request(dev->tr, dev, req); 134 105 mutex_unlock(&dev->lock); 135 106 136 107 spin_lock_irq(rq->queue_lock); ··· 146 123 147 124 static void mtd_blktrans_request(struct request_queue *rq) 148 125 { 149 - struct mtd_blktrans_ops *tr = rq->queuedata; 150 - wake_up_process(tr->blkcore_priv->thread); 151 - } 126 + struct mtd_blktrans_dev *dev; 127 + struct request *req = NULL; 152 128 129 + dev = rq->queuedata; 130 + 131 + if (!dev) 132 + while ((req = blk_fetch_request(rq)) != NULL) 133 + __blk_end_request_all(req, -ENODEV); 134 + else 135 + wake_up_process(dev->thread); 136 + } 153 137 154 138 static int blktrans_open(struct block_device *bdev, fmode_t mode) 155 139 { 156 - struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 157 - struct mtd_blktrans_ops *tr = dev->tr; 158 - int ret = -ENODEV; 140 + struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 141 + int ret; 159 142 160 - if (!get_mtd_device(NULL, dev->mtd->index)) 161 - goto out; 143 + if (!dev) 144 + return -ERESTARTSYS; 162 145 163 - if (!try_module_get(tr->owner)) 164 - goto out_tr; 146 + mutex_lock(&dev->lock); 165 147 166 - /* FIXME: Locking. A hot pluggable device can go away 167 - (del_mtd_device can be called for it) without its module 168 - being unloaded. */ 169 - dev->mtd->usecount++; 170 - 171 - ret = 0; 172 - if (tr->open && (ret = tr->open(dev))) { 173 - dev->mtd->usecount--; 174 - put_mtd_device(dev->mtd); 175 - out_tr: 176 - module_put(tr->owner); 148 + if (!dev->mtd) { 149 + ret = -ENXIO; 150 + goto unlock; 177 151 } 178 - out: 152 + 153 + ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0; 154 + 155 + /* Take another reference on the device so it won't go away till 156 + last release */ 157 + if (!ret) 158 + kref_get(&dev->ref); 159 + unlock: 160 + mutex_unlock(&dev->lock); 161 + blktrans_dev_put(dev); 179 162 return ret; 180 163 } 181 164 182 165 static int blktrans_release(struct gendisk *disk, fmode_t mode) 183 166 { 184 - struct mtd_blktrans_dev *dev = disk->private_data; 185 - struct mtd_blktrans_ops *tr = dev->tr; 186 - int ret = 0; 167 + struct mtd_blktrans_dev *dev = blktrans_dev_get(disk); 168 + int ret = -ENXIO; 187 169 188 - if (tr->release) 189 - ret = tr->release(dev); 170 + if (!dev) 171 + return ret; 190 172 191 - if (!ret) { 192 - dev->mtd->usecount--; 193 - put_mtd_device(dev->mtd); 194 - module_put(tr->owner); 195 - } 173 + mutex_lock(&dev->lock); 196 174 175 + /* Release one reference, we sure its not the last one here*/ 176 + kref_put(&dev->ref, blktrans_dev_release); 177 + 178 + if (!dev->mtd) 179 + goto unlock; 180 + 181 + ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0; 182 + unlock: 183 + mutex_unlock(&dev->lock); 184 + blktrans_dev_put(dev); 197 185 return ret; 198 186 } 199 187 200 188 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) 201 189 { 202 - struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 190 + struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 191 + int ret = -ENXIO; 203 192 204 - if (dev->tr->getgeo) 205 - return dev->tr->getgeo(dev, geo); 206 - return -ENOTTY; 193 + if (!dev) 194 + return ret; 195 + 196 + mutex_lock(&dev->lock); 197 + 198 + if (!dev->mtd) 199 + goto unlock; 200 + 201 + ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0; 202 + unlock: 203 + mutex_unlock(&dev->lock); 204 + blktrans_dev_put(dev); 205 + return ret; 207 206 } 208 207 209 208 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, 210 209 unsigned int cmd, unsigned long arg) 211 210 { 212 - struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; 213 - struct mtd_blktrans_ops *tr = dev->tr; 211 + struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk); 212 + int ret = -ENXIO; 213 + 214 + if (!dev) 215 + return ret; 216 + 217 + mutex_lock(&dev->lock); 218 + 219 + if (!dev->mtd) 220 + goto unlock; 214 221 215 222 switch (cmd) { 216 223 case BLKFLSBUF: 217 - if (tr->flush) 218 - return tr->flush(dev); 219 - /* The core code did the work, we had nothing to do. */ 220 - return 0; 224 + ret = dev->tr->flush ? dev->tr->flush(dev) : 0; 221 225 default: 222 - return -ENOTTY; 226 + ret = -ENOTTY; 223 227 } 228 + unlock: 229 + mutex_unlock(&dev->lock); 230 + blktrans_dev_put(dev); 231 + return ret; 224 232 } 225 233 226 234 static const struct block_device_operations mtd_blktrans_ops = { ··· 268 214 struct mtd_blktrans_dev *d; 269 215 int last_devnum = -1; 270 216 struct gendisk *gd; 217 + int ret; 271 218 272 219 if (mutex_trylock(&mtd_table_mutex)) { 273 220 mutex_unlock(&mtd_table_mutex); 274 221 BUG(); 275 222 } 276 223 224 + mutex_lock(&blktrans_ref_mutex); 277 225 list_for_each_entry(d, &tr->devs, list) { 278 226 if (new->devnum == -1) { 279 227 /* Use first free number */ ··· 287 231 } 288 232 } else if (d->devnum == new->devnum) { 289 233 /* Required number taken */ 234 + mutex_unlock(&blktrans_ref_mutex); 290 235 return -EBUSY; 291 236 } else if (d->devnum > new->devnum) { 292 237 /* Required number was free */ ··· 296 239 } 297 240 last_devnum = d->devnum; 298 241 } 242 + 243 + ret = -EBUSY; 299 244 if (new->devnum == -1) 300 245 new->devnum = last_devnum+1; 301 246 302 - if ((new->devnum << tr->part_bits) > 256) { 303 - return -EBUSY; 247 + /* Check that the device and any partitions will get valid 248 + * minor numbers and that the disk naming code below can cope 249 + * with this number. */ 250 + if (new->devnum > (MINORMASK >> tr->part_bits) || 251 + (tr->part_bits && new->devnum >= 27 * 26)) { 252 + mutex_unlock(&blktrans_ref_mutex); 253 + goto error1; 304 254 } 305 255 306 256 list_add_tail(&new->list, &tr->devs); 307 257 added: 258 + mutex_unlock(&blktrans_ref_mutex); 259 + 308 260 mutex_init(&new->lock); 261 + kref_init(&new->ref); 309 262 if (!tr->writesect) 310 263 new->readonly = 1; 311 264 265 + /* Create gendisk */ 266 + ret = -ENOMEM; 312 267 gd = alloc_disk(1 << tr->part_bits); 313 - if (!gd) { 314 - list_del(&new->list); 315 - return -ENOMEM; 316 - } 268 + 269 + if (!gd) 270 + goto error2; 271 + 272 + new->disk = gd; 273 + gd->private_data = new; 317 274 gd->major = tr->major; 318 275 gd->first_minor = (new->devnum) << tr->part_bits; 319 276 gd->fops = &mtd_blktrans_ops; ··· 345 274 snprintf(gd->disk_name, sizeof(gd->disk_name), 346 275 "%s%d", tr->name, new->devnum); 347 276 348 - /* 2.5 has capacity in units of 512 bytes while still 349 - having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ 350 277 set_capacity(gd, (new->size * tr->blksize) >> 9); 351 278 352 - gd->private_data = new; 353 - new->blkcore_priv = gd; 354 - gd->queue = tr->blkcore_priv->rq; 279 + /* Create the request queue */ 280 + spin_lock_init(&new->queue_lock); 281 + new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock); 282 + 283 + if (!new->rq) 284 + goto error3; 285 + 286 + new->rq->queuedata = new; 287 + blk_queue_logical_block_size(new->rq, tr->blksize); 288 + 289 + if (tr->discard) 290 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 291 + new->rq); 292 + 293 + gd->queue = new->rq; 294 + 295 + __get_mtd_device(new->mtd); 296 + __module_get(tr->owner); 297 + 298 + /* Create processing thread */ 299 + /* TODO: workqueue ? */ 300 + new->thread = kthread_run(mtd_blktrans_thread, new, 301 + "%s%d", tr->name, new->mtd->index); 302 + if (IS_ERR(new->thread)) { 303 + ret = PTR_ERR(new->thread); 304 + goto error4; 305 + } 355 306 gd->driverfs_dev = &new->mtd->dev; 356 307 357 308 if (new->readonly) ··· 381 288 382 289 add_disk(gd); 383 290 291 + if (new->disk_attributes) { 292 + ret = sysfs_create_group(&disk_to_dev(gd)->kobj, 293 + new->disk_attributes); 294 + WARN_ON(ret); 295 + } 384 296 return 0; 297 + error4: 298 + module_put(tr->owner); 299 + __put_mtd_device(new->mtd); 300 + blk_cleanup_queue(new->rq); 301 + error3: 302 + put_disk(new->disk); 303 + error2: 304 + list_del(&new->list); 305 + error1: 306 + kfree(new); 307 + return ret; 385 308 } 386 309 387 310 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) 388 311 { 312 + unsigned long flags; 313 + 389 314 if (mutex_trylock(&mtd_table_mutex)) { 390 315 mutex_unlock(&mtd_table_mutex); 391 316 BUG(); 392 317 } 393 318 394 - list_del(&old->list); 319 + /* Stop new requests to arrive */ 320 + del_gendisk(old->disk); 395 321 396 - del_gendisk(old->blkcore_priv); 397 - put_disk(old->blkcore_priv); 322 + if (old->disk_attributes) 323 + sysfs_remove_group(&disk_to_dev(old->disk)->kobj, 324 + old->disk_attributes); 398 325 326 + /* Stop the thread */ 327 + kthread_stop(old->thread); 328 + 329 + /* Kill current requests */ 330 + spin_lock_irqsave(&old->queue_lock, flags); 331 + old->rq->queuedata = NULL; 332 + blk_start_queue(old->rq); 333 + spin_unlock_irqrestore(&old->queue_lock, flags); 334 + 335 + /* Ask trans driver for release to the mtd device */ 336 + mutex_lock(&old->lock); 337 + if (old->open && old->tr->release) { 338 + old->tr->release(old); 339 + old->open = 0; 340 + } 341 + 342 + __put_mtd_device(old->mtd); 343 + module_put(old->tr->owner); 344 + 345 + /* At that point, we don't touch the mtd anymore */ 346 + old->mtd = NULL; 347 + 348 + mutex_unlock(&old->lock); 349 + blktrans_dev_put(old); 399 350 return 0; 400 351 } 401 352 ··· 472 335 473 336 int register_mtd_blktrans(struct mtd_blktrans_ops *tr) 474 337 { 475 - int ret, i; 338 + struct mtd_info *mtd; 339 + int ret; 476 340 477 341 /* Register the notifier if/when the first device type is 478 342 registered, to prevent the link/init ordering from fucking ··· 481 343 if (!blktrans_notifier.list.next) 482 344 register_mtd_user(&blktrans_notifier); 483 345 484 - tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); 485 - if (!tr->blkcore_priv) 486 - return -ENOMEM; 487 346 488 347 mutex_lock(&mtd_table_mutex); 489 348 ··· 488 353 if (ret) { 489 354 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", 490 355 tr->name, tr->major, ret); 491 - kfree(tr->blkcore_priv); 492 356 mutex_unlock(&mtd_table_mutex); 493 357 return ret; 494 358 } 495 - spin_lock_init(&tr->blkcore_priv->queue_lock); 496 - 497 - tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); 498 - if (!tr->blkcore_priv->rq) { 499 - unregister_blkdev(tr->major, tr->name); 500 - kfree(tr->blkcore_priv); 501 - mutex_unlock(&mtd_table_mutex); 502 - return -ENOMEM; 503 - } 504 - 505 - tr->blkcore_priv->rq->queuedata = tr; 506 - blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 507 - if (tr->discard) 508 - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 509 - tr->blkcore_priv->rq); 510 359 511 360 tr->blkshift = ffs(tr->blksize) - 1; 512 - 513 - tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr, 514 - "%sd", tr->name); 515 - if (IS_ERR(tr->blkcore_priv->thread)) { 516 - ret = PTR_ERR(tr->blkcore_priv->thread); 517 - blk_cleanup_queue(tr->blkcore_priv->rq); 518 - unregister_blkdev(tr->major, tr->name); 519 - kfree(tr->blkcore_priv); 520 - mutex_unlock(&mtd_table_mutex); 521 - return ret; 522 - } 523 361 524 362 INIT_LIST_HEAD(&tr->devs); 525 363 list_add(&tr->list, &blktrans_majors); 526 364 527 - for (i=0; i<MAX_MTD_DEVICES; i++) { 528 - if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) 529 - tr->add_mtd(tr, mtd_table[i]); 530 - } 365 + mtd_for_each_device(mtd) 366 + if (mtd->type != MTD_ABSENT) 367 + tr->add_mtd(tr, mtd); 531 368 532 369 mutex_unlock(&mtd_table_mutex); 533 - 534 370 return 0; 535 371 } 536 372 ··· 511 405 512 406 mutex_lock(&mtd_table_mutex); 513 407 514 - /* Clean up the kernel thread */ 515 - kthread_stop(tr->blkcore_priv->thread); 516 - 517 408 /* Remove it from the list of active majors */ 518 409 list_del(&tr->list); 519 410 520 411 list_for_each_entry_safe(dev, next, &tr->devs, list) 521 412 tr->remove_dev(dev); 522 413 523 - blk_cleanup_queue(tr->blkcore_priv->rq); 524 414 unregister_blkdev(tr->major, tr->name); 525 - 526 415 mutex_unlock(&mtd_table_mutex); 527 - 528 - kfree(tr->blkcore_priv); 529 416 530 417 BUG_ON(!list_empty(&tr->devs)); 531 418 return 0;
+29 -43
drivers/mtd/mtdblock.c
··· 19 19 #include <linux/mutex.h> 20 20 21 21 22 - static struct mtdblk_dev { 23 - struct mtd_info *mtd; 22 + struct mtdblk_dev { 23 + struct mtd_blktrans_dev mbd; 24 24 int count; 25 25 struct mutex cache_mutex; 26 26 unsigned char *cache_data; 27 27 unsigned long cache_offset; 28 28 unsigned int cache_size; 29 29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 30 - } *mtdblks[MAX_MTD_DEVICES]; 30 + }; 31 31 32 32 static struct mutex mtdblks_lock; 33 33 ··· 98 98 99 99 static int write_cached_data (struct mtdblk_dev *mtdblk) 100 100 { 101 - struct mtd_info *mtd = mtdblk->mtd; 101 + struct mtd_info *mtd = mtdblk->mbd.mtd; 102 102 int ret; 103 103 104 104 if (mtdblk->cache_state != STATE_DIRTY) ··· 128 128 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, 129 129 int len, const char *buf) 130 130 { 131 - struct mtd_info *mtd = mtdblk->mtd; 131 + struct mtd_info *mtd = mtdblk->mbd.mtd; 132 132 unsigned int sect_size = mtdblk->cache_size; 133 133 size_t retlen; 134 134 int ret; ··· 198 198 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, 199 199 int len, char *buf) 200 200 { 201 - struct mtd_info *mtd = mtdblk->mtd; 201 + struct mtd_info *mtd = mtdblk->mbd.mtd; 202 202 unsigned int sect_size = mtdblk->cache_size; 203 203 size_t retlen; 204 204 int ret; ··· 244 244 static int mtdblock_readsect(struct mtd_blktrans_dev *dev, 245 245 unsigned long block, char *buf) 246 246 { 247 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 247 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 248 248 return do_cached_read(mtdblk, block<<9, 512, buf); 249 249 } 250 250 251 251 static int mtdblock_writesect(struct mtd_blktrans_dev *dev, 252 252 unsigned long block, char *buf) 253 253 { 254 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 254 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 255 255 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { 256 - mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize); 256 + mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); 257 257 if (!mtdblk->cache_data) 258 258 return -EINTR; 259 259 /* -EINTR is not really correct, but it is the best match ··· 266 266 267 267 static int mtdblock_open(struct mtd_blktrans_dev *mbd) 268 268 { 269 - struct mtdblk_dev *mtdblk; 270 - struct mtd_info *mtd = mbd->mtd; 271 - int dev = mbd->devnum; 269 + struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 272 270 273 271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 274 272 275 273 mutex_lock(&mtdblks_lock); 276 - if (mtdblks[dev]) { 277 - mtdblks[dev]->count++; 274 + if (mtdblk->count) { 275 + mtdblk->count++; 278 276 mutex_unlock(&mtdblks_lock); 279 277 return 0; 280 278 } 281 279 282 280 /* OK, it's not open. Create cache info for it */ 283 - mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); 284 - if (!mtdblk) { 285 - mutex_unlock(&mtdblks_lock); 286 - return -ENOMEM; 287 - } 288 - 289 281 mtdblk->count = 1; 290 - mtdblk->mtd = mtd; 291 - 292 282 mutex_init(&mtdblk->cache_mutex); 293 283 mtdblk->cache_state = STATE_EMPTY; 294 - if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) { 295 - mtdblk->cache_size = mtdblk->mtd->erasesize; 284 + if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { 285 + mtdblk->cache_size = mbd->mtd->erasesize; 296 286 mtdblk->cache_data = NULL; 297 287 } 298 288 299 - mtdblks[dev] = mtdblk; 300 289 mutex_unlock(&mtdblks_lock); 301 290 302 291 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); ··· 295 306 296 307 static int mtdblock_release(struct mtd_blktrans_dev *mbd) 297 308 { 298 - int dev = mbd->devnum; 299 - struct mtdblk_dev *mtdblk = mtdblks[dev]; 309 + struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 300 310 301 311 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 302 312 ··· 306 318 mutex_unlock(&mtdblk->cache_mutex); 307 319 308 320 if (!--mtdblk->count) { 309 - /* It was the last usage. Free the device */ 310 - mtdblks[dev] = NULL; 311 - if (mtdblk->mtd->sync) 312 - mtdblk->mtd->sync(mtdblk->mtd); 321 + /* It was the last usage. Free the cache */ 322 + if (mbd->mtd->sync) 323 + mbd->mtd->sync(mbd->mtd); 313 324 vfree(mtdblk->cache_data); 314 - kfree(mtdblk); 315 325 } 316 326 317 327 mutex_unlock(&mtdblks_lock); ··· 321 335 322 336 static int mtdblock_flush(struct mtd_blktrans_dev *dev) 323 337 { 324 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 338 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 325 339 326 340 mutex_lock(&mtdblk->cache_mutex); 327 341 write_cached_data(mtdblk); 328 342 mutex_unlock(&mtdblk->cache_mutex); 329 343 330 - if (mtdblk->mtd->sync) 331 - mtdblk->mtd->sync(mtdblk->mtd); 344 + if (dev->mtd->sync) 345 + dev->mtd->sync(dev->mtd); 332 346 return 0; 333 347 } 334 348 335 349 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 336 350 { 337 - struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); 351 + struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); 338 352 339 353 if (!dev) 340 354 return; 341 355 342 - dev->mtd = mtd; 343 - dev->devnum = mtd->index; 356 + dev->mbd.mtd = mtd; 357 + dev->mbd.devnum = mtd->index; 344 358 345 - dev->size = mtd->size >> 9; 346 - dev->tr = tr; 359 + dev->mbd.size = mtd->size >> 9; 360 + dev->mbd.tr = tr; 347 361 348 362 if (!(mtd->flags & MTD_WRITEABLE)) 349 - dev->readonly = 1; 363 + dev->mbd.readonly = 1; 350 364 351 - add_mtd_blktrans_dev(dev); 365 + if (add_mtd_blktrans_dev(&dev->mbd)) 366 + kfree(dev); 352 367 } 353 368 354 369 static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) 355 370 { 356 371 del_mtd_blktrans_dev(dev); 357 - kfree(dev); 358 372 } 359 373 360 374 static struct mtd_blktrans_ops mtdblock_tr = {
+2 -2
drivers/mtd/mtdblock_ro.c
··· 43 43 dev->tr = tr; 44 44 dev->readonly = 1; 45 45 46 - add_mtd_blktrans_dev(dev); 46 + if (add_mtd_blktrans_dev(dev)) 47 + kfree(dev); 47 48 } 48 49 49 50 static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) 50 51 { 51 52 del_mtd_blktrans_dev(dev); 52 - kfree(dev); 53 53 } 54 54 55 55 static struct mtd_blktrans_ops mtdblock_tr = {
+91 -14
drivers/mtd/mtdchar.c
··· 15 15 #include <linux/smp_lock.h> 16 16 #include <linux/backing-dev.h> 17 17 #include <linux/compat.h> 18 + #include <linux/mount.h> 18 19 19 20 #include <linux/mtd/mtd.h> 20 21 #include <linux/mtd/compatmac.h> 21 22 22 23 #include <asm/uaccess.h> 23 24 25 + #define MTD_INODE_FS_MAGIC 0x11307854 26 + static struct vfsmount *mtd_inode_mnt __read_mostly; 24 27 25 28 /* 26 29 * Data structure to hold the pointer to the mtd device as well ··· 31 28 */ 32 29 struct mtd_file_info { 33 30 struct mtd_info *mtd; 31 + struct inode *ino; 34 32 enum mtd_file_modes mode; 35 33 }; 36 34 ··· 68 64 int ret = 0; 69 65 struct mtd_info *mtd; 70 66 struct mtd_file_info *mfi; 67 + struct inode *mtd_ino; 71 68 72 69 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 73 - 74 - if (devnum >= MAX_MTD_DEVICES) 75 - return -ENODEV; 76 70 77 71 /* You can't open the RO devices RW */ 78 72 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) ··· 90 88 goto out; 91 89 } 92 90 93 - if (mtd->backing_dev_info) 94 - file->f_mapping->backing_dev_info = mtd->backing_dev_info; 91 + mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); 92 + if (!mtd_ino) { 93 + put_mtd_device(mtd); 94 + ret = -ENOMEM; 95 + goto out; 96 + } 97 + if (mtd_ino->i_state & I_NEW) { 98 + mtd_ino->i_private = mtd; 99 + mtd_ino->i_mode = S_IFCHR; 100 + mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; 101 + unlock_new_inode(mtd_ino); 102 + } 103 + file->f_mapping = mtd_ino->i_mapping; 95 104 96 105 /* You can't open it RW if it's not a writeable device */ 97 106 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 107 + iput(mtd_ino); 98 108 put_mtd_device(mtd); 99 109 ret = -EACCES; 100 110 goto out; ··· 114 100 115 101 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 116 102 if (!mfi) { 103 + iput(mtd_ino); 117 104 put_mtd_device(mtd); 118 105 ret = -ENOMEM; 119 106 goto out; 120 107 } 108 + mfi->ino = mtd_ino; 121 109 mfi->mtd = mtd; 122 110 file->private_data = mfi; 123 111 ··· 140 124 /* Only sync if opened RW */ 141 125 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 142 126 mtd->sync(mtd); 127 + 128 + iput(mfi->ino); 143 129 144 130 put_mtd_device(mtd); 145 131 file->private_data = NULL; ··· 391 373 if (!mtd->write_oob) 392 374 ret = -EOPNOTSUPP; 393 375 else 394 - ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT; 376 + ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 395 377 396 378 if (ret) 397 379 return ret; ··· 500 482 { 501 483 uint32_t ur_idx; 502 484 struct mtd_erase_region_info *kr; 503 - struct region_info_user *ur = (struct region_info_user *) argp; 485 + struct region_info_user __user *ur = argp; 504 486 505 487 if (get_user(ur_idx, &(ur->regionindex))) 506 488 return -EFAULT; ··· 972 954 #endif 973 955 }; 974 956 957 + static int mtd_inodefs_get_sb(struct file_system_type *fs_type, int flags, 958 + const char *dev_name, void *data, 959 + struct vfsmount *mnt) 960 + { 961 + return get_sb_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC, 962 + mnt); 963 + } 964 + 965 + static struct file_system_type mtd_inodefs_type = { 966 + .name = "mtd_inodefs", 967 + .get_sb = mtd_inodefs_get_sb, 968 + .kill_sb = kill_anon_super, 969 + }; 970 + 971 + static void mtdchar_notify_add(struct mtd_info *mtd) 972 + { 973 + } 974 + 975 + static void mtdchar_notify_remove(struct mtd_info *mtd) 976 + { 977 + struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); 978 + 979 + if (mtd_ino) { 980 + /* Destroy the inode if it exists */ 981 + mtd_ino->i_nlink = 0; 982 + iput(mtd_ino); 983 + } 984 + } 985 + 986 + static struct mtd_notifier mtdchar_notifier = { 987 + .add = mtdchar_notify_add, 988 + .remove = mtdchar_notify_remove, 989 + }; 990 + 975 991 static int __init init_mtdchar(void) 976 992 { 977 - int status; 993 + int ret; 978 994 979 - status = register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops); 980 - if (status < 0) { 981 - printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", 982 - MTD_CHAR_MAJOR); 995 + ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 996 + "mtd", &mtd_fops); 997 + if (ret < 0) { 998 + pr_notice("Can't allocate major number %d for " 999 + "Memory Technology Devices.\n", MTD_CHAR_MAJOR); 1000 + return ret; 983 1001 } 984 1002 985 - return status; 1003 + ret = register_filesystem(&mtd_inodefs_type); 1004 + if (ret) { 1005 + pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1006 + goto err_unregister_chdev; 1007 + } 1008 + 1009 + mtd_inode_mnt = kern_mount(&mtd_inodefs_type); 1010 + if (IS_ERR(mtd_inode_mnt)) { 1011 + ret = PTR_ERR(mtd_inode_mnt); 1012 + pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); 1013 + goto err_unregister_filesystem; 1014 + } 1015 + register_mtd_user(&mtdchar_notifier); 1016 + 1017 + return ret; 1018 + 1019 + err_unregister_filesystem: 1020 + unregister_filesystem(&mtd_inodefs_type); 1021 + err_unregister_chdev: 1022 + __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1023 + return ret; 986 1024 } 987 1025 988 1026 static void __exit cleanup_mtdchar(void) 989 1027 { 990 - unregister_chrdev(MTD_CHAR_MAJOR, "mtd"); 1028 + unregister_mtd_user(&mtdchar_notifier); 1029 + mntput(mtd_inode_mnt); 1030 + unregister_filesystem(&mtd_inodefs_type); 1031 + __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 991 1032 } 992 1033 993 1034 module_init(init_mtdchar);
+1 -2
drivers/mtd/mtdconcat.c
··· 183 183 } 184 184 185 185 /* make a copy of vecs */ 186 - vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL); 186 + vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL); 187 187 if (!vecs_copy) 188 188 return -ENOMEM; 189 - memcpy(vecs_copy, vecs, sizeof(struct kvec) * count); 190 189 191 190 entry_low = 0; 192 191 for (i = 0; i < concat->num_subdev; i++) {
+149 -122
drivers/mtd/mtdcore.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/mtd/compatmac.h> 21 21 #include <linux/proc_fs.h> 22 + #include <linux/idr.h> 22 23 #include <linux/backing-dev.h> 24 + #include <linux/gfp.h> 23 25 24 26 #include <linux/mtd/mtd.h> 25 27 ··· 65 63 .resume = mtd_cls_resume, 66 64 }; 67 65 66 + static DEFINE_IDR(mtd_idr); 67 + 68 68 /* These are exported solely for the purpose of mtd_blkdevs.c. You 69 69 should not use them for _anything_ else */ 70 70 DEFINE_MUTEX(mtd_table_mutex); 71 - struct mtd_info *mtd_table[MAX_MTD_DEVICES]; 72 - 73 71 EXPORT_SYMBOL_GPL(mtd_table_mutex); 74 - EXPORT_SYMBOL_GPL(mtd_table); 72 + 73 + struct mtd_info *__mtd_next_device(int i) 74 + { 75 + return idr_get_next(&mtd_idr, &i); 76 + } 77 + EXPORT_SYMBOL_GPL(__mtd_next_device); 75 78 76 79 static LIST_HEAD(mtd_notifiers); 77 80 ··· 272 265 * Add a device to the list of MTD devices present in the system, and 273 266 * notify each currently active MTD 'user' of its arrival. Returns 274 267 * zero on success or 1 on failure, which currently will only happen 275 - * if the number of present devices exceeds MAX_MTD_DEVICES (i.e. 16) 276 - * or there's a sysfs error. 268 + * if there is insufficient memory or a sysfs error. 277 269 */ 278 270 279 271 int add_mtd_device(struct mtd_info *mtd) 280 272 { 281 - int i; 273 + struct mtd_notifier *not; 274 + int i, error; 282 275 283 276 if (!mtd->backing_dev_info) { 284 277 switch (mtd->type) { ··· 297 290 BUG_ON(mtd->writesize == 0); 298 291 mutex_lock(&mtd_table_mutex); 299 292 300 - for (i=0; i < MAX_MTD_DEVICES; i++) 301 - if (!mtd_table[i]) { 302 - struct mtd_notifier *not; 293 + do { 294 + if (!idr_pre_get(&mtd_idr, GFP_KERNEL)) 295 + goto fail_locked; 296 + error = idr_get_new(&mtd_idr, mtd, &i); 297 + } while (error == -EAGAIN); 303 298 304 - mtd_table[i] = mtd; 305 - mtd->index = i; 306 - mtd->usecount = 0; 299 + if (error) 300 + goto fail_locked; 307 301 308 - if (is_power_of_2(mtd->erasesize)) 309 - mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 310 - else 311 - mtd->erasesize_shift = 0; 302 + mtd->index = i; 303 + mtd->usecount = 0; 312 304 313 - if (is_power_of_2(mtd->writesize)) 314 - mtd->writesize_shift = ffs(mtd->writesize) - 1; 315 - else 316 - mtd->writesize_shift = 0; 305 + if (is_power_of_2(mtd->erasesize)) 306 + mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 307 + else 308 + mtd->erasesize_shift = 0; 317 309 318 - mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 319 - mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 310 + if (is_power_of_2(mtd->writesize)) 311 + mtd->writesize_shift = ffs(mtd->writesize) - 1; 312 + else 313 + mtd->writesize_shift = 0; 320 314 321 - /* Some chips always power up locked. Unlock them now */ 322 - if ((mtd->flags & MTD_WRITEABLE) 323 - && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 324 - if (mtd->unlock(mtd, 0, mtd->size)) 325 - printk(KERN_WARNING 326 - "%s: unlock failed, " 327 - "writes may not work\n", 328 - mtd->name); 329 - } 315 + mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 316 + mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 330 317 331 - /* Caller should have set dev.parent to match the 332 - * physical device. 333 - */ 334 - mtd->dev.type = &mtd_devtype; 335 - mtd->dev.class = &mtd_class; 336 - mtd->dev.devt = MTD_DEVT(i); 337 - dev_set_name(&mtd->dev, "mtd%d", i); 338 - dev_set_drvdata(&mtd->dev, mtd); 339 - if (device_register(&mtd->dev) != 0) { 340 - mtd_table[i] = NULL; 341 - break; 342 - } 318 + /* Some chips always power up locked. Unlock them now */ 319 + if ((mtd->flags & MTD_WRITEABLE) 320 + && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 321 + if (mtd->unlock(mtd, 0, mtd->size)) 322 + printk(KERN_WARNING 323 + "%s: unlock failed, writes may not work\n", 324 + mtd->name); 325 + } 343 326 344 - if (MTD_DEVT(i)) 345 - device_create(&mtd_class, mtd->dev.parent, 346 - MTD_DEVT(i) + 1, 347 - NULL, "mtd%dro", i); 327 + /* Caller should have set dev.parent to match the 328 + * physical device. 329 + */ 330 + mtd->dev.type = &mtd_devtype; 331 + mtd->dev.class = &mtd_class; 332 + mtd->dev.devt = MTD_DEVT(i); 333 + dev_set_name(&mtd->dev, "mtd%d", i); 334 + dev_set_drvdata(&mtd->dev, mtd); 335 + if (device_register(&mtd->dev) != 0) 336 + goto fail_added; 348 337 349 - DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); 350 - /* No need to get a refcount on the module containing 351 - the notifier, since we hold the mtd_table_mutex */ 352 - list_for_each_entry(not, &mtd_notifiers, list) 353 - not->add(mtd); 338 + if (MTD_DEVT(i)) 339 + device_create(&mtd_class, mtd->dev.parent, 340 + MTD_DEVT(i) + 1, 341 + NULL, "mtd%dro", i); 354 342 355 - mutex_unlock(&mtd_table_mutex); 356 - /* We _know_ we aren't being removed, because 357 - our caller is still holding us here. So none 358 - of this try_ nonsense, and no bitching about it 359 - either. :) */ 360 - __module_get(THIS_MODULE); 361 - return 0; 362 - } 343 + DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name); 344 + /* No need to get a refcount on the module containing 345 + the notifier, since we hold the mtd_table_mutex */ 346 + list_for_each_entry(not, &mtd_notifiers, list) 347 + not->add(mtd); 363 348 349 + mutex_unlock(&mtd_table_mutex); 350 + /* We _know_ we aren't being removed, because 351 + our caller is still holding us here. So none 352 + of this try_ nonsense, and no bitching about it 353 + either. :) */ 354 + __module_get(THIS_MODULE); 355 + return 0; 356 + 357 + fail_added: 358 + idr_remove(&mtd_idr, i); 359 + fail_locked: 364 360 mutex_unlock(&mtd_table_mutex); 365 361 return 1; 366 362 } ··· 381 371 int del_mtd_device (struct mtd_info *mtd) 382 372 { 383 373 int ret; 374 + struct mtd_notifier *not; 384 375 385 376 mutex_lock(&mtd_table_mutex); 386 377 387 - if (mtd_table[mtd->index] != mtd) { 378 + if (idr_find(&mtd_idr, mtd->index) != mtd) { 388 379 ret = -ENODEV; 389 - } else if (mtd->usecount) { 380 + goto out_error; 381 + } 382 + 383 + /* No need to get a refcount on the module containing 384 + the notifier, since we hold the mtd_table_mutex */ 385 + list_for_each_entry(not, &mtd_notifiers, list) 386 + not->remove(mtd); 387 + 388 + if (mtd->usecount) { 390 389 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n", 391 390 mtd->index, mtd->name, mtd->usecount); 392 391 ret = -EBUSY; 393 392 } else { 394 - struct mtd_notifier *not; 395 - 396 393 device_unregister(&mtd->dev); 397 394 398 - /* No need to get a refcount on the module containing 399 - the notifier, since we hold the mtd_table_mutex */ 400 - list_for_each_entry(not, &mtd_notifiers, list) 401 - not->remove(mtd); 402 - 403 - mtd_table[mtd->index] = NULL; 395 + idr_remove(&mtd_idr, mtd->index); 404 396 405 397 module_put(THIS_MODULE); 406 398 ret = 0; 407 399 } 408 400 401 + out_error: 409 402 mutex_unlock(&mtd_table_mutex); 410 403 return ret; 411 404 } ··· 424 411 425 412 void register_mtd_user (struct mtd_notifier *new) 426 413 { 427 - int i; 414 + struct mtd_info *mtd; 428 415 429 416 mutex_lock(&mtd_table_mutex); 430 417 ··· 432 419 433 420 __module_get(THIS_MODULE); 434 421 435 - for (i=0; i< MAX_MTD_DEVICES; i++) 436 - if (mtd_table[i]) 437 - new->add(mtd_table[i]); 422 + mtd_for_each_device(mtd) 423 + new->add(mtd); 438 424 439 425 mutex_unlock(&mtd_table_mutex); 440 426 } ··· 450 438 451 439 int unregister_mtd_user (struct mtd_notifier *old) 452 440 { 453 - int i; 441 + struct mtd_info *mtd; 454 442 455 443 mutex_lock(&mtd_table_mutex); 456 444 457 445 module_put(THIS_MODULE); 458 446 459 - for (i=0; i< MAX_MTD_DEVICES; i++) 460 - if (mtd_table[i]) 461 - old->remove(mtd_table[i]); 447 + mtd_for_each_device(mtd) 448 + old->remove(mtd); 462 449 463 450 list_del(&old->list); 464 451 mutex_unlock(&mtd_table_mutex); ··· 479 468 480 469 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) 481 470 { 482 - struct mtd_info *ret = NULL; 483 - int i, err = -ENODEV; 471 + struct mtd_info *ret = NULL, *other; 472 + int err = -ENODEV; 484 473 485 474 mutex_lock(&mtd_table_mutex); 486 475 487 476 if (num == -1) { 488 - for (i=0; i< MAX_MTD_DEVICES; i++) 489 - if (mtd_table[i] == mtd) 490 - ret = mtd_table[i]; 491 - } else if (num >= 0 && num < MAX_MTD_DEVICES) { 492 - ret = mtd_table[num]; 477 + mtd_for_each_device(other) { 478 + if (other == mtd) { 479 + ret = mtd; 480 + break; 481 + } 482 + } 483 + } else if (num >= 0) { 484 + ret = idr_find(&mtd_idr, num); 493 485 if (mtd && mtd != ret) 494 486 ret = NULL; 495 487 } 496 488 497 - if (!ret) 498 - goto out_unlock; 499 - 500 - if (!try_module_get(ret->owner)) 501 - goto out_unlock; 502 - 503 - if (ret->get_device) { 504 - err = ret->get_device(ret); 505 - if (err) 506 - goto out_put; 489 + if (!ret) { 490 + ret = ERR_PTR(err); 491 + goto out; 507 492 } 508 493 509 - ret->usecount++; 494 + err = __get_mtd_device(ret); 495 + if (err) 496 + ret = ERR_PTR(err); 497 + out: 510 498 mutex_unlock(&mtd_table_mutex); 511 499 return ret; 500 + } 512 501 513 - out_put: 514 - module_put(ret->owner); 515 - out_unlock: 516 - mutex_unlock(&mtd_table_mutex); 517 - return ERR_PTR(err); 502 + 503 + int __get_mtd_device(struct mtd_info *mtd) 504 + { 505 + int err; 506 + 507 + if (!try_module_get(mtd->owner)) 508 + return -ENODEV; 509 + 510 + if (mtd->get_device) { 511 + 512 + err = mtd->get_device(mtd); 513 + 514 + if (err) { 515 + module_put(mtd->owner); 516 + return err; 517 + } 518 + } 519 + mtd->usecount++; 520 + return 0; 518 521 } 519 522 520 523 /** ··· 542 517 543 518 struct mtd_info *get_mtd_device_nm(const char *name) 544 519 { 545 - int i, err = -ENODEV; 546 - struct mtd_info *mtd = NULL; 520 + int err = -ENODEV; 521 + struct mtd_info *mtd = NULL, *other; 547 522 548 523 mutex_lock(&mtd_table_mutex); 549 524 550 - for (i = 0; i < MAX_MTD_DEVICES; i++) { 551 - if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) { 552 - mtd = mtd_table[i]; 525 + mtd_for_each_device(other) { 526 + if (!strcmp(name, other->name)) { 527 + mtd = other; 553 528 break; 554 529 } 555 530 } ··· 579 554 580 555 void put_mtd_device(struct mtd_info *mtd) 581 556 { 582 - int c; 583 - 584 557 mutex_lock(&mtd_table_mutex); 585 - c = --mtd->usecount; 558 + __put_mtd_device(mtd); 559 + mutex_unlock(&mtd_table_mutex); 560 + 561 + } 562 + 563 + void __put_mtd_device(struct mtd_info *mtd) 564 + { 565 + --mtd->usecount; 566 + BUG_ON(mtd->usecount < 0); 567 + 586 568 if (mtd->put_device) 587 569 mtd->put_device(mtd); 588 - mutex_unlock(&mtd_table_mutex); 589 - BUG_ON(c < 0); 590 570 591 571 module_put(mtd->owner); 592 572 } ··· 629 599 EXPORT_SYMBOL_GPL(del_mtd_device); 630 600 EXPORT_SYMBOL_GPL(get_mtd_device); 631 601 EXPORT_SYMBOL_GPL(get_mtd_device_nm); 602 + EXPORT_SYMBOL_GPL(__get_mtd_device); 632 603 EXPORT_SYMBOL_GPL(put_mtd_device); 604 + EXPORT_SYMBOL_GPL(__put_mtd_device); 633 605 EXPORT_SYMBOL_GPL(register_mtd_user); 634 606 EXPORT_SYMBOL_GPL(unregister_mtd_user); 635 607 EXPORT_SYMBOL_GPL(default_mtd_writev); ··· 643 611 644 612 static struct proc_dir_entry *proc_mtd; 645 613 646 - static inline int mtd_proc_info (char *buf, int i) 614 + static inline int mtd_proc_info(char *buf, struct mtd_info *this) 647 615 { 648 - struct mtd_info *this = mtd_table[i]; 649 - 650 - if (!this) 651 - return 0; 652 - 653 - return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", i, 616 + return sprintf(buf, "mtd%d: %8.8llx %8.8x \"%s\"\n", this->index, 654 617 (unsigned long long)this->size, 655 618 this->erasesize, this->name); 656 619 } ··· 653 626 static int mtd_read_proc (char *page, char **start, off_t off, int count, 654 627 int *eof, void *data_unused) 655 628 { 656 - int len, l, i; 629 + struct mtd_info *mtd; 630 + int len, l; 657 631 off_t begin = 0; 658 632 659 633 mutex_lock(&mtd_table_mutex); 660 634 661 635 len = sprintf(page, "dev: size erasesize name\n"); 662 - for (i=0; i< MAX_MTD_DEVICES; i++) { 663 - 664 - l = mtd_proc_info(page + len, i); 636 + mtd_for_each_device(mtd) { 637 + l = mtd_proc_info(page + len, mtd); 665 638 len += l; 666 639 if (len+begin > off+count) 667 640 goto done;
+6 -1
drivers/mtd/mtdcore.h
··· 8 8 should not use them for _anything_ else */ 9 9 10 10 extern struct mutex mtd_table_mutex; 11 - extern struct mtd_info *mtd_table[MAX_MTD_DEVICES]; 11 + extern struct mtd_info *__mtd_next_device(int i); 12 + 13 + #define mtd_for_each_device(mtd) \ 14 + for ((mtd) = __mtd_next_device(0); \ 15 + (mtd) != NULL; \ 16 + (mtd) = __mtd_next_device(mtd->index + 1))
-5
drivers/mtd/mtdoops.c
··· 429 429 mtd_index = simple_strtoul(mtddev, &endp, 0); 430 430 if (*endp == '\0') 431 431 cxt->mtd_index = mtd_index; 432 - if (cxt->mtd_index > MAX_MTD_DEVICES) { 433 - printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n", 434 - mtd_index); 435 - return -EINVAL; 436 - } 437 432 438 433 cxt->oops_buf = vmalloc(record_size); 439 434 if (!cxt->oops_buf) {
+6 -12
drivers/mtd/mtdsuper.c
··· 152 152 DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", 153 153 dev_name + 4); 154 154 155 - for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { 156 - mtd = get_mtd_device(NULL, mtdnr); 157 - if (!IS_ERR(mtd)) { 158 - if (!strcmp(mtd->name, dev_name + 4)) 159 - return get_sb_mtd_aux( 160 - fs_type, flags, 161 - dev_name, data, mtd, 162 - fill_super, mnt); 163 - 164 - put_mtd_device(mtd); 165 - } 166 - } 155 + mtd = get_mtd_device_nm(dev_name + 4); 156 + if (!IS_ERR(mtd)) 157 + return get_sb_mtd_aux( 158 + fs_type, flags, 159 + dev_name, data, mtd, 160 + fill_super, mnt); 167 161 168 162 printk(KERN_NOTICE "MTD:" 169 163 " MTD device with name \"%s\" not found.\n",
+53 -16
drivers/mtd/nand/Kconfig
··· 2 2 tristate "NAND Device Support" 3 3 depends on MTD 4 4 select MTD_NAND_IDS 5 + select MTD_NAND_ECC 5 6 help 6 7 This enables support for accessing all type of NAND flash 7 8 devices. For further information see 8 9 <http://www.linux-mtd.infradead.org/doc/nand.html>. 10 + 11 + config MTD_NAND_ECC 12 + tristate 13 + 14 + config MTD_NAND_ECC_SMC 15 + bool "NAND ECC Smart Media byte order" 16 + depends on MTD_NAND_ECC 17 + default n 18 + help 19 + Software ECC according to the Smart Media Specification. 20 + The original Linux implementation had byte 0 and 1 swapped. 9 21 10 22 if MTD_NAND 11 23 ··· 30 18 device thinks the write was successful, a bit could have been 31 19 flipped accidentally due to device wear or something else. 32 20 33 - config MTD_NAND_ECC_SMC 34 - bool "NAND ECC Smart Media byte order" 21 + config MTD_SM_COMMON 22 + tristate 35 23 default n 36 - help 37 - Software ECC according to the Smart Media Specification. 38 - The original Linux implementation had byte 0 and 1 swapped. 39 24 40 25 config MTD_NAND_MUSEUM_IDS 41 26 bool "Enable chip ids for obsolete ancient NAND devices" ··· 49 40 help 50 41 This enables the driver for the autronix autcpu12 board to 51 42 access the SmartMediaCard. 43 + 44 + config MTD_NAND_DENALI 45 + depends on PCI 46 + tristate "Support Denali NAND controller on Intel Moorestown" 47 + help 48 + Enable the driver for NAND flash on Intel Moorestown, using the 49 + Denali NAND controller core. 50 + 51 + config MTD_NAND_DENALI_SCRATCH_REG_ADDR 52 + hex "Denali NAND size scratch register address" 53 + default "0xFF108018" 54 + help 55 + Some platforms place the NAND chip size in a scratch register 56 + because (some versions of) the driver aren't able to automatically 57 + determine the size of certain chips. Set the address of the 58 + scratch register here to enable this feature. On Intel Moorestown 59 + boards, the scratch register is at 0xFF108018. 52 60 53 61 config MTD_NAND_EDB7312 54 62 tristate "Support for Cirrus Logic EBD7312 evaluation board" ··· 121 95 or in DMA interrupt mode. 122 96 Say y for DMA mode or MPU mode will be used 123 97 124 - config MTD_NAND_TS7250 125 - tristate "NAND Flash device on TS-7250 board" 126 - depends on MACH_TS72XX 127 - help 128 - Support for NAND flash on Technologic Systems TS-7250 platform. 129 - 130 98 config MTD_NAND_IDS 131 99 tristate 100 + 101 + config MTD_NAND_RICOH 102 + tristate "Ricoh xD card reader" 103 + default n 104 + depends on PCI 105 + select MTD_SM_COMMON 106 + help 107 + Enable support for Ricoh R5C852 xD card reader 108 + You also need to enable ether 109 + NAND SSFDC (SmartMedia) read only translation layer' or new 110 + expermental, readwrite 111 + 'SmartMedia/xD new translation layer' 132 112 133 113 config MTD_NAND_AU1550 134 114 tristate "Au1550/1200 NAND support" ··· 390 358 391 359 If unsure, say N 392 360 393 - endchoice 394 - 395 361 endchoice 396 362 397 363 config MTD_NAND_PXA3xx ··· 472 442 Enables support for NAND Flash chips wired onto Freescale PowerPC 473 443 processor localbus with User-Programmable Machine support. 474 444 445 + config MTD_NAND_MPC5121_NFC 446 + tristate "MPC5121 built-in NAND Flash Controller support" 447 + depends on PPC_MPC512x 448 + help 449 + This enables the driver for the NAND flash controller on the 450 + MPC5121 SoC. 451 + 475 452 config MTD_NAND_MXC 476 453 tristate "MXC NAND support" 477 454 depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 ··· 518 481 help 519 482 Enables support for NAND Flash chips wired onto Socrates board. 520 483 521 - config MTD_NAND_W90P910 522 - tristate "Support for NAND on w90p910 evaluation board." 484 + config MTD_NAND_NUC900 485 + tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards." 523 486 depends on ARCH_W90X900 && MTD_PARTITIONS 524 487 help 525 488 This enables the driver for the NAND Flash on evaluation board based 526 - on w90p910. 489 + on w90p910 / NUC9xx. 527 490 528 491 endif # MTD_NAND
+7 -3
drivers/mtd/nand/Makefile
··· 2 2 # linux/drivers/nand/Makefile 3 3 # 4 4 5 - obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o 5 + obj-$(CONFIG_MTD_NAND) += nand.o 6 + obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o 6 7 obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o 8 + obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o 7 9 8 10 obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o 9 11 obj-$(CONFIG_MTD_NAND_SPIA) += spia.o 10 12 obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o 11 13 obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 14 + obj-$(CONFIG_MTD_NAND_DENALI) += denali.o 12 15 obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 13 16 obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o 14 17 obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o ··· 22 19 obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 23 20 obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 24 21 obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o 25 - obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o 26 22 obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 27 23 obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 28 24 obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o ··· 41 39 obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o 42 40 obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o 43 41 obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o 44 - obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o 42 + obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o 45 43 obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o 46 44 obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o 45 + obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o 46 + obj-$(CONFIG_MTD_NAND_RICOH) += r852.o 47 47 48 48 nand-objs := nand_base.o nand_bbt.o
+1 -1
drivers/mtd/nand/alauda.c
··· 49 49 50 50 #define TIMEOUT HZ 51 51 52 - static struct usb_device_id alauda_table [] = { 52 + static const struct usb_device_id alauda_table[] = { 53 53 { USB_DEVICE(0x0584, 0x0008) }, /* Fujifilm DPC-R1 */ 54 54 { USB_DEVICE(0x07b4, 0x010a) }, /* Olympus MAUSB-10 */ 55 55 { }
+1 -1
drivers/mtd/nand/atmel_nand.c
··· 474 474 } 475 475 476 476 /* first scan to find the device and get the page size */ 477 - if (nand_scan_ident(mtd, 1)) { 477 + if (nand_scan_ident(mtd, 1, NULL)) { 478 478 res = -ENXIO; 479 479 goto err_scan_ident; 480 480 }
+4 -8
drivers/mtd/nand/au1550nd.c
··· 451 451 u32 nand_phys; 452 452 453 453 /* Allocate memory for MTD device structure and private data */ 454 - au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 454 + au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 455 455 if (!au1550_mtd) { 456 456 printk("Unable to allocate NAND MTD dev structure.\n"); 457 457 return -ENOMEM; ··· 459 459 460 460 /* Get pointer to private data */ 461 461 this = (struct nand_chip *)(&au1550_mtd[1]); 462 - 463 - /* Initialize structures */ 464 - memset(au1550_mtd, 0, sizeof(struct mtd_info)); 465 - memset(this, 0, sizeof(struct nand_chip)); 466 462 467 463 /* Link the private data with the MTD structure */ 468 464 au1550_mtd->priv = this; ··· 540 544 } 541 545 nand_phys = (mem_staddr << 4) & 0xFFFC0000; 542 546 543 - p_nand = (void __iomem *)ioremap(nand_phys, 0x1000); 547 + p_nand = ioremap(nand_phys, 0x1000); 544 548 545 549 /* make controller and MTD agree */ 546 550 if (NAND_CS == 0) ··· 585 589 return 0; 586 590 587 591 outio: 588 - iounmap((void *)p_nand); 592 + iounmap(p_nand); 589 593 590 594 outmem: 591 595 kfree(au1550_mtd); ··· 606 610 kfree(au1550_mtd); 607 611 608 612 /* Unmap */ 609 - iounmap((void *)p_nand); 613 + iounmap(p_nand); 610 614 } 611 615 612 616 module_exit(au1550_cleanup);
+1 -2
drivers/mtd/nand/bcm_umi_nand.c
··· 13 13 *****************************************************************************/ 14 14 15 15 /* ---- Include Files ---------------------------------------------------- */ 16 - #include <linux/version.h> 17 16 #include <linux/module.h> 18 17 #include <linux/types.h> 19 18 #include <linux/init.h> ··· 446 447 * layout we'll be using. 447 448 */ 448 449 449 - err = nand_scan_ident(board_mtd, 1); 450 + err = nand_scan_ident(board_mtd, 1, NULL); 450 451 if (err) { 451 452 printk(KERN_ERR "nand_scan failed: %d\n", err); 452 453 iounmap(bcm_umi_io_base);
+25 -4
drivers/mtd/nand/bf5xx_nand.c
··· 68 68 #define DRV_AUTHOR "Bryan Wu <bryan.wu@analog.com>" 69 69 #define DRV_DESC "BF5xx on-chip NAND FLash Controller Driver" 70 70 71 + /* NFC_STAT Masks */ 72 + #define NBUSY 0x01 /* Not Busy */ 73 + #define WB_FULL 0x02 /* Write Buffer Full */ 74 + #define PG_WR_STAT 0x04 /* Page Write Pending */ 75 + #define PG_RD_STAT 0x08 /* Page Read Pending */ 76 + #define WB_EMPTY 0x10 /* Write Buffer Empty */ 77 + 78 + /* NFC_IRQSTAT Masks */ 79 + #define NBUSYIRQ 0x01 /* Not Busy IRQ */ 80 + #define WB_OVF 0x02 /* Write Buffer Overflow */ 81 + #define WB_EDGE 0x04 /* Write Buffer Edge Detect */ 82 + #define RD_RDY 0x08 /* Read Data Ready */ 83 + #define WR_DONE 0x10 /* Page Write Done */ 84 + 85 + /* NFC_RST Masks */ 86 + #define ECC_RST 0x01 /* ECC (and NFC counters) Reset */ 87 + 88 + /* NFC_PGCTL Masks */ 89 + #define PG_RD_START 0x01 /* Page Read Start */ 90 + #define PG_WR_START 0x02 /* Page Write Start */ 91 + 71 92 #ifdef CONFIG_MTD_NAND_BF5XX_HWECC 72 93 static int hardware_ecc = 1; 73 94 #else ··· 508 487 * transferred to generate the correct ECC register 509 488 * values. 510 489 */ 511 - bfin_write_NFC_RST(0x1); 490 + bfin_write_NFC_RST(ECC_RST); 512 491 SSYNC(); 513 492 514 493 disable_dma(CH_NFC); ··· 518 497 set_dma_config(CH_NFC, 0x0); 519 498 set_dma_start_addr(CH_NFC, (unsigned long) buf); 520 499 521 - /* The DMAs have different size on BF52x and BF54x */ 500 + /* The DMAs have different size on BF52x and BF54x */ 522 501 #ifdef CONFIG_BF52x 523 502 set_dma_x_count(CH_NFC, (page_size >> 1)); 524 503 set_dma_x_modify(CH_NFC, 2); ··· 538 517 539 518 /* Start PAGE read/write operation */ 540 519 if (is_read) 541 - bfin_write_NFC_PGCTL(0x1); 520 + bfin_write_NFC_PGCTL(PG_RD_START); 542 521 else 543 - bfin_write_NFC_PGCTL(0x2); 522 + bfin_write_NFC_PGCTL(PG_WR_START); 544 523 wait_for_completion(&info->dma_completion); 545 524 } 546 525
+2 -2
drivers/mtd/nand/cafe_nand.c
··· 762 762 cafe_readl(cafe, GLOBAL_CTRL), cafe_readl(cafe, GLOBAL_IRQ_MASK)); 763 763 764 764 /* Scan to find existence of the device */ 765 - if (nand_scan_ident(mtd, 2)) { 765 + if (nand_scan_ident(mtd, 2, NULL)) { 766 766 err = -ENXIO; 767 767 goto out_irq; 768 768 } ··· 849 849 kfree(mtd); 850 850 } 851 851 852 - static struct pci_device_id cafe_nand_tbl[] = { 852 + static const struct pci_device_id cafe_nand_tbl[] = { 853 853 { PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND, 854 854 PCI_ANY_ID, PCI_ANY_ID }, 855 855 { }
+3 -3
drivers/mtd/nand/davinci_nand.c
··· 567 567 goto err_nomem; 568 568 } 569 569 570 - vaddr = ioremap(res1->start, res1->end - res1->start); 571 - base = ioremap(res2->start, res2->end - res2->start); 570 + vaddr = ioremap(res1->start, resource_size(res1)); 571 + base = ioremap(res2->start, resource_size(res2)); 572 572 if (!vaddr || !base) { 573 573 dev_err(&pdev->dev, "ioremap failed\n"); 574 574 ret = -EINVAL; ··· 691 691 spin_unlock_irq(&davinci_nand_lock); 692 692 693 693 /* Scan to find existence of the device(s) */ 694 - ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1); 694 + ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL); 695 695 if (ret < 0) { 696 696 dev_dbg(&pdev->dev, "no NAND chip(s) found\n"); 697 697 goto err_scan;
+2134
drivers/mtd/nand/denali.c
··· 1 + /* 2 + * NAND Flash Controller Device Driver 3 + * Copyright © 2009-2010, Intel Corporation and its suppliers. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + */ 19 + 20 + #include <linux/interrupt.h> 21 + #include <linux/delay.h> 22 + #include <linux/wait.h> 23 + #include <linux/mutex.h> 24 + #include <linux/pci.h> 25 + #include <linux/mtd/mtd.h> 26 + #include <linux/module.h> 27 + 28 + #include "denali.h" 29 + 30 + MODULE_LICENSE("GPL"); 31 + 32 + /* We define a module parameter that allows the user to override 33 + * the hardware and decide what timing mode should be used. 34 + */ 35 + #define NAND_DEFAULT_TIMINGS -1 36 + 37 + static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; 38 + module_param(onfi_timing_mode, int, S_IRUGO); 39 + MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates" 40 + " use default timings"); 41 + 42 + #define DENALI_NAND_NAME "denali-nand" 43 + 44 + /* We define a macro here that combines all interrupts this driver uses into 45 + * a single constant value, for convenience. */ 46 + #define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \ 47 + INTR_STATUS0__ECC_TRANSACTION_DONE | \ 48 + INTR_STATUS0__ECC_ERR | \ 49 + INTR_STATUS0__PROGRAM_FAIL | \ 50 + INTR_STATUS0__LOAD_COMP | \ 51 + INTR_STATUS0__PROGRAM_COMP | \ 52 + INTR_STATUS0__TIME_OUT | \ 53 + INTR_STATUS0__ERASE_FAIL | \ 54 + INTR_STATUS0__RST_COMP | \ 55 + INTR_STATUS0__ERASE_COMP) 56 + 57 + /* indicates whether or not the internal value for the flash bank is 58 + valid or not */ 59 + #define CHIP_SELECT_INVALID -1 60 + 61 + #define SUPPORT_8BITECC 1 62 + 63 + /* This macro divides two integers and rounds fractional values up 64 + * to the nearest integer value. */ 65 + #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y))) 66 + 67 + /* this macro allows us to convert from an MTD structure to our own 68 + * device context (denali) structure. 69 + */ 70 + #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd) 71 + 72 + /* These constants are defined by the driver to enable common driver 73 + configuration options. */ 74 + #define SPARE_ACCESS 0x41 75 + #define MAIN_ACCESS 0x42 76 + #define MAIN_SPARE_ACCESS 0x43 77 + 78 + #define DENALI_READ 0 79 + #define DENALI_WRITE 0x100 80 + 81 + /* types of device accesses. We can issue commands and get status */ 82 + #define COMMAND_CYCLE 0 83 + #define ADDR_CYCLE 1 84 + #define STATUS_CYCLE 2 85 + 86 + /* this is a helper macro that allows us to 87 + * format the bank into the proper bits for the controller */ 88 + #define BANK(x) ((x) << 24) 89 + 90 + /* List of platforms this NAND controller has be integrated into */ 91 + static const struct pci_device_id denali_pci_ids[] = { 92 + { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, 93 + { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST }, 94 + { /* end: all zeroes */ } 95 + }; 96 + 97 + 98 + /* these are static lookup tables that give us easy access to 99 + registers in the NAND controller. 100 + */ 101 + static const uint32_t intr_status_addresses[4] = {INTR_STATUS0, 102 + INTR_STATUS1, 103 + INTR_STATUS2, 104 + INTR_STATUS3}; 105 + 106 + static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0, 107 + DEVICE_RESET__BANK1, 108 + DEVICE_RESET__BANK2, 109 + DEVICE_RESET__BANK3}; 110 + 111 + static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT, 112 + INTR_STATUS1__TIME_OUT, 113 + INTR_STATUS2__TIME_OUT, 114 + INTR_STATUS3__TIME_OUT}; 115 + 116 + static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP, 117 + INTR_STATUS1__RST_COMP, 118 + INTR_STATUS2__RST_COMP, 119 + INTR_STATUS3__RST_COMP}; 120 + 121 + /* specifies the debug level of the driver */ 122 + static int nand_debug_level = 0; 123 + 124 + /* forward declarations */ 125 + static void clear_interrupts(struct denali_nand_info *denali); 126 + static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask); 127 + static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask); 128 + static uint32_t read_interrupt_status(struct denali_nand_info *denali); 129 + 130 + #define DEBUG_DENALI 0 131 + 132 + /* This is a wrapper for writing to the denali registers. 133 + * this allows us to create debug information so we can 134 + * observe how the driver is programming the device. 135 + * it uses standard linux convention for (val, addr) */ 136 + static void denali_write32(uint32_t value, void *addr) 137 + { 138 + iowrite32(value, addr); 139 + 140 + #if DEBUG_DENALI 141 + printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff)); 142 + #endif 143 + } 144 + 145 + /* Certain operations for the denali NAND controller use an indexed mode to read/write 146 + data. The operation is performed by writing the address value of the command to 147 + the device memory followed by the data. This function abstracts this common 148 + operation. 149 + */ 150 + static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data) 151 + { 152 + denali_write32(address, denali->flash_mem); 153 + denali_write32(data, denali->flash_mem + 0x10); 154 + } 155 + 156 + /* Perform an indexed read of the device */ 157 + static void index_addr_read_data(struct denali_nand_info *denali, 158 + uint32_t address, uint32_t *pdata) 159 + { 160 + denali_write32(address, denali->flash_mem); 161 + *pdata = ioread32(denali->flash_mem + 0x10); 162 + } 163 + 164 + /* We need to buffer some data for some of the NAND core routines. 165 + * The operations manage buffering that data. */ 166 + static void reset_buf(struct denali_nand_info *denali) 167 + { 168 + denali->buf.head = denali->buf.tail = 0; 169 + } 170 + 171 + static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) 172 + { 173 + BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf)); 174 + denali->buf.buf[denali->buf.tail++] = byte; 175 + } 176 + 177 + /* reads the status of the device */ 178 + static void read_status(struct denali_nand_info *denali) 179 + { 180 + uint32_t cmd = 0x0; 181 + 182 + /* initialize the data buffer to store status */ 183 + reset_buf(denali); 184 + 185 + /* initiate a device status read */ 186 + cmd = MODE_11 | BANK(denali->flash_bank); 187 + index_addr(denali, cmd | COMMAND_CYCLE, 0x70); 188 + denali_write32(cmd | STATUS_CYCLE, denali->flash_mem); 189 + 190 + /* update buffer with status value */ 191 + write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10)); 192 + 193 + #if DEBUG_DENALI 194 + printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]); 195 + #endif 196 + } 197 + 198 + /* resets a specific device connected to the core */ 199 + static void reset_bank(struct denali_nand_info *denali) 200 + { 201 + uint32_t irq_status = 0; 202 + uint32_t irq_mask = reset_complete[denali->flash_bank] | 203 + operation_timeout[denali->flash_bank]; 204 + int bank = 0; 205 + 206 + clear_interrupts(denali); 207 + 208 + bank = device_reset_banks[denali->flash_bank]; 209 + denali_write32(bank, denali->flash_reg + DEVICE_RESET); 210 + 211 + irq_status = wait_for_irq(denali, irq_mask); 212 + 213 + if (irq_status & operation_timeout[denali->flash_bank]) 214 + { 215 + printk(KERN_ERR "reset bank failed.\n"); 216 + } 217 + } 218 + 219 + /* Reset the flash controller */ 220 + static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali) 221 + { 222 + uint32_t i; 223 + 224 + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 225 + __FILE__, __LINE__, __func__); 226 + 227 + for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) 228 + denali_write32(reset_complete[i] | operation_timeout[i], 229 + denali->flash_reg + intr_status_addresses[i]); 230 + 231 + for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) { 232 + denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET); 233 + while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) & 234 + (reset_complete[i] | operation_timeout[i]))) 235 + ; 236 + if (ioread32(denali->flash_reg + intr_status_addresses[i]) & 237 + operation_timeout[i]) 238 + nand_dbg_print(NAND_DBG_WARN, 239 + "NAND Reset operation timed out on bank %d\n", i); 240 + } 241 + 242 + for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) 243 + denali_write32(reset_complete[i] | operation_timeout[i], 244 + denali->flash_reg + intr_status_addresses[i]); 245 + 246 + return PASS; 247 + } 248 + 249 + /* this routine calculates the ONFI timing values for a given mode and programs 250 + * the clocking register accordingly. The mode is determined by the get_onfi_nand_para 251 + routine. 252 + */ 253 + static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode) 254 + { 255 + uint16_t Trea[6] = {40, 30, 25, 20, 20, 16}; 256 + uint16_t Trp[6] = {50, 25, 17, 15, 12, 10}; 257 + uint16_t Treh[6] = {30, 15, 15, 10, 10, 7}; 258 + uint16_t Trc[6] = {100, 50, 35, 30, 25, 20}; 259 + uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15}; 260 + uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5}; 261 + uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25}; 262 + uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70}; 263 + uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100}; 264 + uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100}; 265 + uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60}; 266 + uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15}; 267 + 268 + uint16_t TclsRising = 1; 269 + uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid; 270 + uint16_t dv_window = 0; 271 + uint16_t en_lo, en_hi; 272 + uint16_t acc_clks; 273 + uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; 274 + 275 + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 276 + __FILE__, __LINE__, __func__); 277 + 278 + en_lo = CEIL_DIV(Trp[mode], CLK_X); 279 + en_hi = CEIL_DIV(Treh[mode], CLK_X); 280 + #if ONFI_BLOOM_TIME 281 + if ((en_hi * CLK_X) < (Treh[mode] + 2)) 282 + en_hi++; 283 + #endif 284 + 285 + if ((en_lo + en_hi) * CLK_X < Trc[mode]) 286 + en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X); 287 + 288 + if ((en_lo + en_hi) < CLK_MULTI) 289 + en_lo += CLK_MULTI - en_lo - en_hi; 290 + 291 + while (dv_window < 8) { 292 + data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode]; 293 + 294 + data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode]; 295 + 296 + data_invalid = 297 + data_invalid_rhoh < 298 + data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh; 299 + 300 + dv_window = data_invalid - Trea[mode]; 301 + 302 + if (dv_window < 8) 303 + en_lo++; 304 + } 305 + 306 + acc_clks = CEIL_DIV(Trea[mode], CLK_X); 307 + 308 + while (((acc_clks * CLK_X) - Trea[mode]) < 3) 309 + acc_clks++; 310 + 311 + if ((data_invalid - acc_clks * CLK_X) < 2) 312 + nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n", 313 + __FILE__, __LINE__); 314 + 315 + addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); 316 + re_2_we = CEIL_DIV(Trhw[mode], CLK_X); 317 + re_2_re = CEIL_DIV(Trhz[mode], CLK_X); 318 + we_2_re = CEIL_DIV(Twhr[mode], CLK_X); 319 + cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X); 320 + if (!TclsRising) 321 + cs_cnt = CEIL_DIV(Tcs[mode], CLK_X); 322 + if (cs_cnt == 0) 323 + cs_cnt = 1; 324 + 325 + if (Tcea[mode]) { 326 + while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode]) 327 + cs_cnt++; 328 + } 329 + 330 + #if MODE5_WORKAROUND 331 + if (mode == 5) 332 + acc_clks = 5; 333 + #endif 334 + 335 + /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ 336 + if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) && 337 + (ioread32(denali->flash_reg + DEVICE_ID) == 0x88)) 338 + acc_clks = 6; 339 + 340 + denali_write32(acc_clks, denali->flash_reg + ACC_CLKS); 341 + denali_write32(re_2_we, denali->flash_reg + RE_2_WE); 342 + denali_write32(re_2_re, denali->flash_reg + RE_2_RE); 343 + denali_write32(we_2_re, denali->flash_reg + WE_2_RE); 344 + denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA); 345 + denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); 346 + denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); 347 + denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); 348 + } 349 + 350 + /* configures the initial ECC settings for the controller */ 351 + static void set_ecc_config(struct denali_nand_info *denali) 352 + { 353 + #if SUPPORT_8BITECC 354 + if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) || 355 + (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128)) 356 + denali_write32(8, denali->flash_reg + ECC_CORRECTION); 357 + #endif 358 + 359 + if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE) 360 + == 1) { 361 + denali->dev_info.wECCBytesPerSector = 4; 362 + denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; 363 + denali->dev_info.wNumPageSpareFlag = 364 + denali->dev_info.wPageSpareSize - 365 + denali->dev_info.wPageDataSize / 366 + (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * 367 + denali->dev_info.wECCBytesPerSector 368 + - denali->dev_info.wSpareSkipBytes; 369 + } else { 370 + denali->dev_info.wECCBytesPerSector = 371 + (ioread32(denali->flash_reg + ECC_CORRECTION) & 372 + ECC_CORRECTION__VALUE) * 13 / 8; 373 + if ((denali->dev_info.wECCBytesPerSector) % 2 == 0) 374 + denali->dev_info.wECCBytesPerSector += 2; 375 + else 376 + denali->dev_info.wECCBytesPerSector += 1; 377 + 378 + denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected; 379 + denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize - 380 + denali->dev_info.wPageDataSize / 381 + (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) * 382 + denali->dev_info.wECCBytesPerSector 383 + - denali->dev_info.wSpareSkipBytes; 384 + } 385 + } 386 + 387 + /* queries the NAND device to see what ONFI modes it supports. */ 388 + static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) 389 + { 390 + int i; 391 + uint16_t blks_lun_l, blks_lun_h, n_of_luns; 392 + uint32_t blockperlun, id; 393 + 394 + denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET); 395 + 396 + while (!((ioread32(denali->flash_reg + INTR_STATUS0) & 397 + INTR_STATUS0__RST_COMP) | 398 + (ioread32(denali->flash_reg + INTR_STATUS0) & 399 + INTR_STATUS0__TIME_OUT))) 400 + ; 401 + 402 + if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) { 403 + denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET); 404 + while (!((ioread32(denali->flash_reg + INTR_STATUS1) & 405 + INTR_STATUS1__RST_COMP) | 406 + (ioread32(denali->flash_reg + INTR_STATUS1) & 407 + INTR_STATUS1__TIME_OUT))) 408 + ; 409 + 410 + if (ioread32(denali->flash_reg + INTR_STATUS1) & 411 + INTR_STATUS1__RST_COMP) { 412 + denali_write32(DEVICE_RESET__BANK2, 413 + denali->flash_reg + DEVICE_RESET); 414 + while (!((ioread32(denali->flash_reg + INTR_STATUS2) & 415 + INTR_STATUS2__RST_COMP) | 416 + (ioread32(denali->flash_reg + INTR_STATUS2) & 417 + INTR_STATUS2__TIME_OUT))) 418 + ; 419 + 420 + if (ioread32(denali->flash_reg + INTR_STATUS2) & 421 + INTR_STATUS2__RST_COMP) { 422 + denali_write32(DEVICE_RESET__BANK3, 423 + denali->flash_reg + DEVICE_RESET); 424 + while (!((ioread32(denali->flash_reg + INTR_STATUS3) & 425 + INTR_STATUS3__RST_COMP) | 426 + (ioread32(denali->flash_reg + INTR_STATUS3) & 427 + INTR_STATUS3__TIME_OUT))) 428 + ; 429 + } else { 430 + printk(KERN_ERR "Getting a time out for bank 2!\n"); 431 + } 432 + } else { 433 + printk(KERN_ERR "Getting a time out for bank 1!\n"); 434 + } 435 + } 436 + 437 + denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0); 438 + denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1); 439 + denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2); 440 + denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3); 441 + 442 + denali->dev_info.wONFIDevFeatures = 443 + ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES); 444 + denali->dev_info.wONFIOptCommands = 445 + ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS); 446 + denali->dev_info.wONFITimingMode = 447 + ioread32(denali->flash_reg + ONFI_TIMING_MODE); 448 + denali->dev_info.wONFIPgmCacheTimingMode = 449 + ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE); 450 + 451 + n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & 452 + ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS; 453 + blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L); 454 + blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U); 455 + 456 + blockperlun = (blks_lun_h << 16) | blks_lun_l; 457 + 458 + denali->dev_info.wTotalBlocks = n_of_luns * blockperlun; 459 + 460 + if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) & 461 + ONFI_TIMING_MODE__VALUE)) 462 + return FAIL; 463 + 464 + for (i = 5; i > 0; i--) { 465 + if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i)) 466 + break; 467 + } 468 + 469 + NAND_ONFi_Timing_Mode(denali, i); 470 + 471 + index_addr(denali, MODE_11 | 0, 0x90); 472 + index_addr(denali, MODE_11 | 1, 0); 473 + 474 + for (i = 0; i < 3; i++) 475 + index_addr_read_data(denali, MODE_11 | 2, &id); 476 + 477 + nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id); 478 + 479 + denali->dev_info.MLCDevice = id & 0x0C; 480 + 481 + /* By now, all the ONFI devices we know support the page cache */ 482 + /* rw feature. So here we enable the pipeline_rw_ahead feature */ 483 + /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */ 484 + /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */ 485 + 486 + return PASS; 487 + } 488 + 489 + static void get_samsung_nand_para(struct denali_nand_info *denali) 490 + { 491 + uint8_t no_of_planes; 492 + uint32_t blk_size; 493 + uint64_t plane_size, capacity; 494 + uint32_t id_bytes[5]; 495 + int i; 496 + 497 + index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90); 498 + index_addr(denali, (uint32_t)(MODE_11 | 1), 0); 499 + for (i = 0; i < 5; i++) 500 + index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]); 501 + 502 + nand_dbg_print(NAND_DBG_DEBUG, 503 + "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", 504 + id_bytes[0], id_bytes[1], id_bytes[2], 505 + id_bytes[3], id_bytes[4]); 506 + 507 + if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */ 508 + /* Set timing register values according to datasheet */ 509 + denali_write32(5, denali->flash_reg + ACC_CLKS); 510 + denali_write32(20, denali->flash_reg + RE_2_WE); 511 + denali_write32(12, denali->flash_reg + WE_2_RE); 512 + denali_write32(14, denali->flash_reg + ADDR_2_DATA); 513 + denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT); 514 + denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT); 515 + denali_write32(2, denali->flash_reg + CS_SETUP_CNT); 516 + } 517 + 518 + no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2); 519 + plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4); 520 + blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4); 521 + capacity = (uint64_t)128 * plane_size * no_of_planes; 522 + 523 + do_div(capacity, blk_size); 524 + denali->dev_info.wTotalBlocks = capacity; 525 + } 526 + 527 + static void get_toshiba_nand_para(struct denali_nand_info *denali) 528 + { 529 + void __iomem *scratch_reg; 530 + uint32_t tmp; 531 + 532 + /* Workaround to fix a controller bug which reports a wrong */ 533 + /* spare area size for some kind of Toshiba NAND device */ 534 + if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && 535 + (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) { 536 + denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 537 + tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) * 538 + ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 539 + denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); 540 + #if SUPPORT_15BITECC 541 + denali_write32(15, denali->flash_reg + ECC_CORRECTION); 542 + #elif SUPPORT_8BITECC 543 + denali_write32(8, denali->flash_reg + ECC_CORRECTION); 544 + #endif 545 + } 546 + 547 + /* As Toshiba NAND can not provide it's block number, */ 548 + /* so here we need user to provide the correct block */ 549 + /* number in a scratch register before the Linux NAND */ 550 + /* driver is loaded. If no valid value found in the scratch */ 551 + /* register, then we use default block number value */ 552 + scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); 553 + if (!scratch_reg) { 554 + printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", 555 + __FILE__, __LINE__); 556 + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; 557 + } else { 558 + nand_dbg_print(NAND_DBG_WARN, 559 + "Spectra: ioremap reg address: 0x%p\n", scratch_reg); 560 + denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); 561 + if (denali->dev_info.wTotalBlocks < 512) 562 + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; 563 + iounmap(scratch_reg); 564 + } 565 + } 566 + 567 + static void get_hynix_nand_para(struct denali_nand_info *denali) 568 + { 569 + void __iomem *scratch_reg; 570 + uint32_t main_size, spare_size; 571 + 572 + switch (denali->dev_info.wDeviceID) { 573 + case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ 574 + case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ 575 + denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK); 576 + denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); 577 + denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 578 + main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED); 579 + spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED); 580 + denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); 581 + denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); 582 + denali_write32(0, denali->flash_reg + DEVICE_WIDTH); 583 + #if SUPPORT_15BITECC 584 + denali_write32(15, denali->flash_reg + ECC_CORRECTION); 585 + #elif SUPPORT_8BITECC 586 + denali_write32(8, denali->flash_reg + ECC_CORRECTION); 587 + #endif 588 + denali->dev_info.MLCDevice = 1; 589 + break; 590 + default: 591 + nand_dbg_print(NAND_DBG_WARN, 592 + "Spectra: Unknown Hynix NAND (Device ID: 0x%x)." 593 + "Will use default parameter values instead.\n", 594 + denali->dev_info.wDeviceID); 595 + } 596 + 597 + scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE); 598 + if (!scratch_reg) { 599 + printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d", 600 + __FILE__, __LINE__); 601 + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; 602 + } else { 603 + nand_dbg_print(NAND_DBG_WARN, 604 + "Spectra: ioremap reg address: 0x%p\n", scratch_reg); 605 + denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg); 606 + if (denali->dev_info.wTotalBlocks < 512) 607 + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; 608 + iounmap(scratch_reg); 609 + } 610 + } 611 + 612 + /* determines how many NAND chips are connected to the controller. Note for 613 + Intel CE4100 devices we don't support more than one device. 614 + */ 615 + static void find_valid_banks(struct denali_nand_info *denali) 616 + { 617 + uint32_t id[LLD_MAX_FLASH_BANKS]; 618 + int i; 619 + 620 + denali->total_used_banks = 1; 621 + for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) { 622 + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90); 623 + index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0); 624 + index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]); 625 + 626 + nand_dbg_print(NAND_DBG_DEBUG, 627 + "Return 1st ID for bank[%d]: %x\n", i, id[i]); 628 + 629 + if (i == 0) { 630 + if (!(id[i] & 0x0ff)) 631 + break; /* WTF? */ 632 + } else { 633 + if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) 634 + denali->total_used_banks++; 635 + else 636 + break; 637 + } 638 + } 639 + 640 + if (denali->platform == INTEL_CE4100) 641 + { 642 + /* Platform limitations of the CE4100 device limit 643 + * users to a single chip solution for NAND. 644 + * Multichip support is not enabled. 645 + */ 646 + if (denali->total_used_banks != 1) 647 + { 648 + printk(KERN_ERR "Sorry, Intel CE4100 only supports " 649 + "a single NAND device.\n"); 650 + BUG(); 651 + } 652 + } 653 + nand_dbg_print(NAND_DBG_DEBUG, 654 + "denali->total_used_banks: %d\n", denali->total_used_banks); 655 + } 656 + 657 + static void detect_partition_feature(struct denali_nand_info *denali) 658 + { 659 + if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) { 660 + if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) & 661 + PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) { 662 + denali->dev_info.wSpectraStartBlock = 663 + ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & 664 + MIN_MAX_BANK_1__MIN_VALUE) * 665 + denali->dev_info.wTotalBlocks) 666 + + 667 + (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) & 668 + MIN_BLK_ADDR_1__VALUE); 669 + 670 + denali->dev_info.wSpectraEndBlock = 671 + (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) & 672 + MIN_MAX_BANK_1__MAX_VALUE) >> 2) * 673 + denali->dev_info.wTotalBlocks) 674 + + 675 + (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) & 676 + MAX_BLK_ADDR_1__VALUE); 677 + 678 + denali->dev_info.wTotalBlocks *= denali->total_used_banks; 679 + 680 + if (denali->dev_info.wSpectraEndBlock >= 681 + denali->dev_info.wTotalBlocks) { 682 + denali->dev_info.wSpectraEndBlock = 683 + denali->dev_info.wTotalBlocks - 1; 684 + } 685 + 686 + denali->dev_info.wDataBlockNum = 687 + denali->dev_info.wSpectraEndBlock - 688 + denali->dev_info.wSpectraStartBlock + 1; 689 + } else { 690 + denali->dev_info.wTotalBlocks *= denali->total_used_banks; 691 + denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; 692 + denali->dev_info.wSpectraEndBlock = 693 + denali->dev_info.wTotalBlocks - 1; 694 + denali->dev_info.wDataBlockNum = 695 + denali->dev_info.wSpectraEndBlock - 696 + denali->dev_info.wSpectraStartBlock + 1; 697 + } 698 + } else { 699 + denali->dev_info.wTotalBlocks *= denali->total_used_banks; 700 + denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK; 701 + denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1; 702 + denali->dev_info.wDataBlockNum = 703 + denali->dev_info.wSpectraEndBlock - 704 + denali->dev_info.wSpectraStartBlock + 1; 705 + } 706 + } 707 + 708 + static void dump_device_info(struct denali_nand_info *denali) 709 + { 710 + nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n"); 711 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n", 712 + denali->dev_info.wDeviceMaker); 713 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n", 714 + denali->dev_info.wDeviceID); 715 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n", 716 + denali->dev_info.wDeviceType); 717 + nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n", 718 + denali->dev_info.wSpectraStartBlock); 719 + nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n", 720 + denali->dev_info.wSpectraEndBlock); 721 + nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n", 722 + denali->dev_info.wTotalBlocks); 723 + nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n", 724 + denali->dev_info.wPagesPerBlock); 725 + nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n", 726 + denali->dev_info.wPageSize); 727 + nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n", 728 + denali->dev_info.wPageDataSize); 729 + nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n", 730 + denali->dev_info.wPageSpareSize); 731 + nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n", 732 + denali->dev_info.wNumPageSpareFlag); 733 + nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n", 734 + denali->dev_info.wECCBytesPerSector); 735 + nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n", 736 + denali->dev_info.wBlockSize); 737 + nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n", 738 + denali->dev_info.wBlockDataSize); 739 + nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n", 740 + denali->dev_info.wDataBlockNum); 741 + nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n", 742 + denali->dev_info.bPlaneNum); 743 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n", 744 + denali->dev_info.wDeviceMainAreaSize); 745 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n", 746 + denali->dev_info.wDeviceSpareAreaSize); 747 + nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n", 748 + denali->dev_info.wDevicesConnected); 749 + nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n", 750 + denali->dev_info.wDeviceWidth); 751 + nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n", 752 + denali->dev_info.wHWRevision); 753 + nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n", 754 + denali->dev_info.wHWFeatures); 755 + nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n", 756 + denali->dev_info.wONFIDevFeatures); 757 + nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n", 758 + denali->dev_info.wONFIOptCommands); 759 + nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n", 760 + denali->dev_info.wONFITimingMode); 761 + nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n", 762 + denali->dev_info.wONFIPgmCacheTimingMode); 763 + nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n", 764 + denali->dev_info.MLCDevice ? "Yes" : "No"); 765 + nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n", 766 + denali->dev_info.wSpareSkipBytes); 767 + nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n", 768 + denali->dev_info.nBitsInPageNumber); 769 + nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n", 770 + denali->dev_info.nBitsInPageDataSize); 771 + nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n", 772 + denali->dev_info.nBitsInBlockDataSize); 773 + } 774 + 775 + static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali) 776 + { 777 + uint16_t status = PASS; 778 + uint8_t no_of_planes; 779 + 780 + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 781 + __FILE__, __LINE__, __func__); 782 + 783 + denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID); 784 + denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID); 785 + denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0); 786 + denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1); 787 + denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2); 788 + 789 + denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c; 790 + 791 + if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & 792 + ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ 793 + if (FAIL == get_onfi_nand_para(denali)) 794 + return FAIL; 795 + } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */ 796 + get_samsung_nand_para(denali); 797 + } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */ 798 + get_toshiba_nand_para(denali); 799 + } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */ 800 + get_hynix_nand_para(denali); 801 + } else { 802 + denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS; 803 + } 804 + 805 + nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" 806 + "acc_clks: %d, re_2_we: %d, we_2_re: %d," 807 + "addr_2_data: %d, rdwr_en_lo_cnt: %d, " 808 + "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", 809 + ioread32(denali->flash_reg + ACC_CLKS), 810 + ioread32(denali->flash_reg + RE_2_WE), 811 + ioread32(denali->flash_reg + WE_2_RE), 812 + ioread32(denali->flash_reg + ADDR_2_DATA), 813 + ioread32(denali->flash_reg + RDWR_EN_LO_CNT), 814 + ioread32(denali->flash_reg + RDWR_EN_HI_CNT), 815 + ioread32(denali->flash_reg + CS_SETUP_CNT)); 816 + 817 + denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION); 818 + denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES); 819 + 820 + denali->dev_info.wDeviceMainAreaSize = 821 + ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE); 822 + denali->dev_info.wDeviceSpareAreaSize = 823 + ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE); 824 + 825 + denali->dev_info.wPageDataSize = 826 + ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE); 827 + 828 + /* Note: When using the Micon 4K NAND device, the controller will report 829 + * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes. 830 + * And if force set it to 218 bytes, the controller can not work 831 + * correctly. So just let it be. But keep in mind that this bug may 832 + * cause 833 + * other problems in future. - Yunpeng 2008-10-10 834 + */ 835 + denali->dev_info.wPageSpareSize = 836 + ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE); 837 + 838 + denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK); 839 + 840 + denali->dev_info.wPageSize = 841 + denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize; 842 + denali->dev_info.wBlockSize = 843 + denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock; 844 + denali->dev_info.wBlockDataSize = 845 + denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize; 846 + 847 + denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH); 848 + denali->dev_info.wDeviceType = 849 + ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8); 850 + 851 + denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED); 852 + 853 + denali->dev_info.wSpareSkipBytes = 854 + ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) * 855 + denali->dev_info.wDevicesConnected; 856 + 857 + denali->dev_info.nBitsInPageNumber = 858 + ilog2(denali->dev_info.wPagesPerBlock); 859 + denali->dev_info.nBitsInPageDataSize = 860 + ilog2(denali->dev_info.wPageDataSize); 861 + denali->dev_info.nBitsInBlockDataSize = 862 + ilog2(denali->dev_info.wBlockDataSize); 863 + 864 + set_ecc_config(denali); 865 + 866 + no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) & 867 + NUMBER_OF_PLANES__VALUE; 868 + 869 + switch (no_of_planes) { 870 + case 0: 871 + case 1: 872 + case 3: 873 + case 7: 874 + denali->dev_info.bPlaneNum = no_of_planes + 1; 875 + break; 876 + default: 877 + status = FAIL; 878 + break; 879 + } 880 + 881 + find_valid_banks(denali); 882 + 883 + detect_partition_feature(denali); 884 + 885 + dump_device_info(denali); 886 + 887 + /* If the user specified to override the default timings 888 + * with a specific ONFI mode, we apply those changes here. 889 + */ 890 + if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) 891 + { 892 + NAND_ONFi_Timing_Mode(denali, onfi_timing_mode); 893 + } 894 + 895 + return status; 896 + } 897 + 898 + static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, 899 + uint16_t INT_ENABLE) 900 + { 901 + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 902 + __FILE__, __LINE__, __func__); 903 + 904 + if (INT_ENABLE) 905 + denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE); 906 + else 907 + denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE); 908 + } 909 + 910 + /* validation function to verify that the controlling software is making 911 + a valid request 912 + */ 913 + static inline bool is_flash_bank_valid(int flash_bank) 914 + { 915 + return (flash_bank >= 0 && flash_bank < 4); 916 + } 917 + 918 + static void denali_irq_init(struct denali_nand_info *denali) 919 + { 920 + uint32_t int_mask = 0; 921 + 922 + /* Disable global interrupts */ 923 + NAND_LLD_Enable_Disable_Interrupts(denali, false); 924 + 925 + int_mask = DENALI_IRQ_ALL; 926 + 927 + /* Clear all status bits */ 928 + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0); 929 + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1); 930 + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2); 931 + denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3); 932 + 933 + denali_irq_enable(denali, int_mask); 934 + } 935 + 936 + static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) 937 + { 938 + NAND_LLD_Enable_Disable_Interrupts(denali, false); 939 + free_irq(irqnum, denali); 940 + } 941 + 942 + static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask) 943 + { 944 + denali_write32(int_mask, denali->flash_reg + INTR_EN0); 945 + denali_write32(int_mask, denali->flash_reg + INTR_EN1); 946 + denali_write32(int_mask, denali->flash_reg + INTR_EN2); 947 + denali_write32(int_mask, denali->flash_reg + INTR_EN3); 948 + } 949 + 950 + /* This function only returns when an interrupt that this driver cares about 951 + * occurs. This is to reduce the overhead of servicing interrupts 952 + */ 953 + static inline uint32_t denali_irq_detected(struct denali_nand_info *denali) 954 + { 955 + return (read_interrupt_status(denali) & DENALI_IRQ_ALL); 956 + } 957 + 958 + /* Interrupts are cleared by writing a 1 to the appropriate status bit */ 959 + static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask) 960 + { 961 + uint32_t intr_status_reg = 0; 962 + 963 + intr_status_reg = intr_status_addresses[denali->flash_bank]; 964 + 965 + denali_write32(irq_mask, denali->flash_reg + intr_status_reg); 966 + } 967 + 968 + static void clear_interrupts(struct denali_nand_info *denali) 969 + { 970 + uint32_t status = 0x0; 971 + spin_lock_irq(&denali->irq_lock); 972 + 973 + status = read_interrupt_status(denali); 974 + 975 + #if DEBUG_DENALI 976 + denali->irq_debug_array[denali->idx++] = 0x30000000 | status; 977 + denali->idx %= 32; 978 + #endif 979 + 980 + denali->irq_status = 0x0; 981 + spin_unlock_irq(&denali->irq_lock); 982 + } 983 + 984 + static uint32_t read_interrupt_status(struct denali_nand_info *denali) 985 + { 986 + uint32_t intr_status_reg = 0; 987 + 988 + intr_status_reg = intr_status_addresses[denali->flash_bank]; 989 + 990 + return ioread32(denali->flash_reg + intr_status_reg); 991 + } 992 + 993 + #if DEBUG_DENALI 994 + static void print_irq_log(struct denali_nand_info *denali) 995 + { 996 + int i = 0; 997 + 998 + printk("ISR debug log index = %X\n", denali->idx); 999 + for (i = 0; i < 32; i++) 1000 + { 1001 + printk("%08X: %08X\n", i, denali->irq_debug_array[i]); 1002 + } 1003 + } 1004 + #endif 1005 + 1006 + /* This is the interrupt service routine. It handles all interrupts 1007 + * sent to this device. Note that on CE4100, this is a shared 1008 + * interrupt. 1009 + */ 1010 + static irqreturn_t denali_isr(int irq, void *dev_id) 1011 + { 1012 + struct denali_nand_info *denali = dev_id; 1013 + uint32_t irq_status = 0x0; 1014 + irqreturn_t result = IRQ_NONE; 1015 + 1016 + spin_lock(&denali->irq_lock); 1017 + 1018 + /* check to see if a valid NAND chip has 1019 + * been selected. 1020 + */ 1021 + if (is_flash_bank_valid(denali->flash_bank)) 1022 + { 1023 + /* check to see if controller generated 1024 + * the interrupt, since this is a shared interrupt */ 1025 + if ((irq_status = denali_irq_detected(denali)) != 0) 1026 + { 1027 + #if DEBUG_DENALI 1028 + denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status; 1029 + denali->idx %= 32; 1030 + 1031 + printk("IRQ status = 0x%04x\n", irq_status); 1032 + #endif 1033 + /* handle interrupt */ 1034 + /* first acknowledge it */ 1035 + clear_interrupt(denali, irq_status); 1036 + /* store the status in the device context for someone 1037 + to read */ 1038 + denali->irq_status |= irq_status; 1039 + /* notify anyone who cares that it happened */ 1040 + complete(&denali->complete); 1041 + /* tell the OS that we've handled this */ 1042 + result = IRQ_HANDLED; 1043 + } 1044 + } 1045 + spin_unlock(&denali->irq_lock); 1046 + return result; 1047 + } 1048 + #define BANK(x) ((x) << 24) 1049 + 1050 + static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) 1051 + { 1052 + unsigned long comp_res = 0; 1053 + uint32_t intr_status = 0; 1054 + bool retry = false; 1055 + unsigned long timeout = msecs_to_jiffies(1000); 1056 + 1057 + do 1058 + { 1059 + #if DEBUG_DENALI 1060 + printk("waiting for 0x%x\n", irq_mask); 1061 + #endif 1062 + comp_res = wait_for_completion_timeout(&denali->complete, timeout); 1063 + spin_lock_irq(&denali->irq_lock); 1064 + intr_status = denali->irq_status; 1065 + 1066 + #if DEBUG_DENALI 1067 + denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status; 1068 + denali->idx %= 32; 1069 + #endif 1070 + 1071 + if (intr_status & irq_mask) 1072 + { 1073 + denali->irq_status &= ~irq_mask; 1074 + spin_unlock_irq(&denali->irq_lock); 1075 + #if DEBUG_DENALI 1076 + if (retry) printk("status on retry = 0x%x\n", intr_status); 1077 + #endif 1078 + /* our interrupt was detected */ 1079 + break; 1080 + } 1081 + else 1082 + { 1083 + /* these are not the interrupts you are looking for - 1084 + need to wait again */ 1085 + spin_unlock_irq(&denali->irq_lock); 1086 + #if DEBUG_DENALI 1087 + print_irq_log(denali); 1088 + printk("received irq nobody cared: irq_status = 0x%x," 1089 + " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res); 1090 + #endif 1091 + retry = true; 1092 + } 1093 + } while (comp_res != 0); 1094 + 1095 + if (comp_res == 0) 1096 + { 1097 + /* timeout */ 1098 + printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n", 1099 + intr_status, irq_mask); 1100 + 1101 + intr_status = 0; 1102 + } 1103 + return intr_status; 1104 + } 1105 + 1106 + /* This helper function setups the registers for ECC and whether or not 1107 + the spare area will be transfered. */ 1108 + static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, 1109 + bool transfer_spare) 1110 + { 1111 + int ecc_en_flag = 0, transfer_spare_flag = 0; 1112 + 1113 + /* set ECC, transfer spare bits if needed */ 1114 + ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; 1115 + transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; 1116 + 1117 + /* Enable spare area/ECC per user's request. */ 1118 + denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); 1119 + denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); 1120 + } 1121 + 1122 + /* sends a pipeline command operation to the controller. See the Denali NAND 1123 + controller's user guide for more information (section 4.2.3.6). 1124 + */ 1125 + static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en, 1126 + bool transfer_spare, int access_type, 1127 + int op) 1128 + { 1129 + int status = PASS; 1130 + uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0, 1131 + irq_mask = 0; 1132 + 1133 + if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP; 1134 + else if (op == DENALI_WRITE) irq_mask = 0; 1135 + else BUG(); 1136 + 1137 + setup_ecc_for_xfer(denali, ecc_en, transfer_spare); 1138 + 1139 + #if DEBUG_DENALI 1140 + spin_lock_irq(&denali->irq_lock); 1141 + denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4); 1142 + denali->idx %= 32; 1143 + spin_unlock_irq(&denali->irq_lock); 1144 + #endif 1145 + 1146 + 1147 + /* clear interrupts */ 1148 + clear_interrupts(denali); 1149 + 1150 + addr = BANK(denali->flash_bank) | denali->page; 1151 + 1152 + if (op == DENALI_WRITE && access_type != SPARE_ACCESS) 1153 + { 1154 + cmd = MODE_01 | addr; 1155 + denali_write32(cmd, denali->flash_mem); 1156 + } 1157 + else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) 1158 + { 1159 + /* read spare area */ 1160 + cmd = MODE_10 | addr; 1161 + index_addr(denali, (uint32_t)cmd, access_type); 1162 + 1163 + cmd = MODE_01 | addr; 1164 + denali_write32(cmd, denali->flash_mem); 1165 + } 1166 + else if (op == DENALI_READ) 1167 + { 1168 + /* setup page read request for access type */ 1169 + cmd = MODE_10 | addr; 1170 + index_addr(denali, (uint32_t)cmd, access_type); 1171 + 1172 + /* page 33 of the NAND controller spec indicates we should not 1173 + use the pipeline commands in Spare area only mode. So we 1174 + don't. 1175 + */ 1176 + if (access_type == SPARE_ACCESS) 1177 + { 1178 + cmd = MODE_01 | addr; 1179 + denali_write32(cmd, denali->flash_mem); 1180 + } 1181 + else 1182 + { 1183 + index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count); 1184 + 1185 + /* wait for command to be accepted 1186 + * can always use status0 bit as the mask is identical for each 1187 + * bank. */ 1188 + irq_status = wait_for_irq(denali, irq_mask); 1189 + 1190 + if (irq_status == 0) 1191 + { 1192 + printk(KERN_ERR "cmd, page, addr on timeout " 1193 + "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr); 1194 + status = FAIL; 1195 + } 1196 + else 1197 + { 1198 + cmd = MODE_01 | addr; 1199 + denali_write32(cmd, denali->flash_mem); 1200 + } 1201 + } 1202 + } 1203 + return status; 1204 + } 1205 + 1206 + /* helper function that simply writes a buffer to the flash */ 1207 + static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf, 1208 + int len) 1209 + { 1210 + uint32_t i = 0, *buf32; 1211 + 1212 + /* verify that the len is a multiple of 4. see comment in 1213 + * read_data_from_flash_mem() */ 1214 + BUG_ON((len % 4) != 0); 1215 + 1216 + /* write the data to the flash memory */ 1217 + buf32 = (uint32_t *)buf; 1218 + for (i = 0; i < len / 4; i++) 1219 + { 1220 + denali_write32(*buf32++, denali->flash_mem + 0x10); 1221 + } 1222 + return i*4; /* intent is to return the number of bytes read */ 1223 + } 1224 + 1225 + /* helper function that simply reads a buffer from the flash */ 1226 + static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf, 1227 + int len) 1228 + { 1229 + uint32_t i = 0, *buf32; 1230 + 1231 + /* we assume that len will be a multiple of 4, if not 1232 + * it would be nice to know about it ASAP rather than 1233 + * have random failures... 1234 + * 1235 + * This assumption is based on the fact that this 1236 + * function is designed to be used to read flash pages, 1237 + * which are typically multiples of 4... 1238 + */ 1239 + 1240 + BUG_ON((len % 4) != 0); 1241 + 1242 + /* transfer the data from the flash */ 1243 + buf32 = (uint32_t *)buf; 1244 + for (i = 0; i < len / 4; i++) 1245 + { 1246 + *buf32++ = ioread32(denali->flash_mem + 0x10); 1247 + } 1248 + return i*4; /* intent is to return the number of bytes read */ 1249 + } 1250 + 1251 + /* writes OOB data to the device */ 1252 + static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) 1253 + { 1254 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1255 + uint32_t irq_status = 0; 1256 + uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP | 1257 + INTR_STATUS0__PROGRAM_FAIL; 1258 + int status = 0; 1259 + 1260 + denali->page = page; 1261 + 1262 + if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, 1263 + DENALI_WRITE) == PASS) 1264 + { 1265 + write_data_to_flash_mem(denali, buf, mtd->oobsize); 1266 + 1267 + #if DEBUG_DENALI 1268 + spin_lock_irq(&denali->irq_lock); 1269 + denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize; 1270 + denali->idx %= 32; 1271 + spin_unlock_irq(&denali->irq_lock); 1272 + #endif 1273 + 1274 + 1275 + /* wait for operation to complete */ 1276 + irq_status = wait_for_irq(denali, irq_mask); 1277 + 1278 + if (irq_status == 0) 1279 + { 1280 + printk(KERN_ERR "OOB write failed\n"); 1281 + status = -EIO; 1282 + } 1283 + } 1284 + else 1285 + { 1286 + printk(KERN_ERR "unable to send pipeline command\n"); 1287 + status = -EIO; 1288 + } 1289 + return status; 1290 + } 1291 + 1292 + /* reads OOB data from the device */ 1293 + static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) 1294 + { 1295 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1296 + uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0; 1297 + 1298 + denali->page = page; 1299 + 1300 + #if DEBUG_DENALI 1301 + printk("read_oob %d\n", page); 1302 + #endif 1303 + if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, 1304 + DENALI_READ) == PASS) 1305 + { 1306 + read_data_from_flash_mem(denali, buf, mtd->oobsize); 1307 + 1308 + /* wait for command to be accepted 1309 + * can always use status0 bit as the mask is identical for each 1310 + * bank. */ 1311 + irq_status = wait_for_irq(denali, irq_mask); 1312 + 1313 + if (irq_status == 0) 1314 + { 1315 + printk(KERN_ERR "page on OOB timeout %d\n", denali->page); 1316 + } 1317 + 1318 + /* We set the device back to MAIN_ACCESS here as I observed 1319 + * instability with the controller if you do a block erase 1320 + * and the last transaction was a SPARE_ACCESS. Block erase 1321 + * is reliable (according to the MTD test infrastructure) 1322 + * if you are in MAIN_ACCESS. 1323 + */ 1324 + addr = BANK(denali->flash_bank) | denali->page; 1325 + cmd = MODE_10 | addr; 1326 + index_addr(denali, (uint32_t)cmd, MAIN_ACCESS); 1327 + 1328 + #if DEBUG_DENALI 1329 + spin_lock_irq(&denali->irq_lock); 1330 + denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize; 1331 + denali->idx %= 32; 1332 + spin_unlock_irq(&denali->irq_lock); 1333 + #endif 1334 + } 1335 + } 1336 + 1337 + /* this function examines buffers to see if they contain data that 1338 + * indicate that the buffer is part of an erased region of flash. 1339 + */ 1340 + bool is_erased(uint8_t *buf, int len) 1341 + { 1342 + int i = 0; 1343 + for (i = 0; i < len; i++) 1344 + { 1345 + if (buf[i] != 0xFF) 1346 + { 1347 + return false; 1348 + } 1349 + } 1350 + return true; 1351 + } 1352 + #define ECC_SECTOR_SIZE 512 1353 + 1354 + #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) 1355 + #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) 1356 + #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) 1357 + #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO)) 1358 + #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8) 1359 + #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 1360 + 1361 + static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 1362 + uint8_t *oobbuf, uint32_t irq_status) 1363 + { 1364 + bool check_erased_page = false; 1365 + 1366 + if (irq_status & INTR_STATUS0__ECC_ERR) 1367 + { 1368 + /* read the ECC errors. we'll ignore them for now */ 1369 + uint32_t err_address = 0, err_correction_info = 0; 1370 + uint32_t err_byte = 0, err_sector = 0, err_device = 0; 1371 + uint32_t err_correction_value = 0; 1372 + 1373 + do 1374 + { 1375 + err_address = ioread32(denali->flash_reg + 1376 + ECC_ERROR_ADDRESS); 1377 + err_sector = ECC_SECTOR(err_address); 1378 + err_byte = ECC_BYTE(err_address); 1379 + 1380 + 1381 + err_correction_info = ioread32(denali->flash_reg + 1382 + ERR_CORRECTION_INFO); 1383 + err_correction_value = 1384 + ECC_CORRECTION_VALUE(err_correction_info); 1385 + err_device = ECC_ERR_DEVICE(err_correction_info); 1386 + 1387 + if (ECC_ERROR_CORRECTABLE(err_correction_info)) 1388 + { 1389 + /* offset in our buffer is computed as: 1390 + sector number * sector size + offset in 1391 + sector 1392 + */ 1393 + int offset = err_sector * ECC_SECTOR_SIZE + 1394 + err_byte; 1395 + if (offset < denali->mtd.writesize) 1396 + { 1397 + /* correct the ECC error */ 1398 + buf[offset] ^= err_correction_value; 1399 + denali->mtd.ecc_stats.corrected++; 1400 + } 1401 + else 1402 + { 1403 + /* bummer, couldn't correct the error */ 1404 + printk(KERN_ERR "ECC offset invalid\n"); 1405 + denali->mtd.ecc_stats.failed++; 1406 + } 1407 + } 1408 + else 1409 + { 1410 + /* if the error is not correctable, need to 1411 + * look at the page to see if it is an erased page. 1412 + * if so, then it's not a real ECC error */ 1413 + check_erased_page = true; 1414 + } 1415 + 1416 + #if DEBUG_DENALI 1417 + printk("Detected ECC error in page %d: err_addr = 0x%08x," 1418 + " info to fix is 0x%08x\n", denali->page, err_address, 1419 + err_correction_info); 1420 + #endif 1421 + } while (!ECC_LAST_ERR(err_correction_info)); 1422 + } 1423 + return check_erased_page; 1424 + } 1425 + 1426 + /* programs the controller to either enable/disable DMA transfers */ 1427 + static void denali_enable_dma(struct denali_nand_info *denali, bool en) 1428 + { 1429 + uint32_t reg_val = 0x0; 1430 + 1431 + if (en) reg_val = DMA_ENABLE__FLAG; 1432 + 1433 + denali_write32(reg_val, denali->flash_reg + DMA_ENABLE); 1434 + ioread32(denali->flash_reg + DMA_ENABLE); 1435 + } 1436 + 1437 + /* setups the HW to perform the data DMA */ 1438 + static void denali_setup_dma(struct denali_nand_info *denali, int op) 1439 + { 1440 + uint32_t mode = 0x0; 1441 + const int page_count = 1; 1442 + dma_addr_t addr = denali->buf.dma_buf; 1443 + 1444 + mode = MODE_10 | BANK(denali->flash_bank); 1445 + 1446 + /* DMA is a four step process */ 1447 + 1448 + /* 1. setup transfer type and # of pages */ 1449 + index_addr(denali, mode | denali->page, 0x2000 | op | page_count); 1450 + 1451 + /* 2. set memory high address bits 23:8 */ 1452 + index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200); 1453 + 1454 + /* 3. set memory low address bits 23:8 */ 1455 + index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300); 1456 + 1457 + /* 4. interrupt when complete, burst len = 64 bytes*/ 1458 + index_addr(denali, mode | 0x14000, 0x2400); 1459 + } 1460 + 1461 + /* writes a page. user specifies type, and this function handles the 1462 + configuration details. */ 1463 + static void write_page(struct mtd_info *mtd, struct nand_chip *chip, 1464 + const uint8_t *buf, bool raw_xfer) 1465 + { 1466 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1467 + struct pci_dev *pci_dev = denali->dev; 1468 + 1469 + dma_addr_t addr = denali->buf.dma_buf; 1470 + size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1471 + 1472 + uint32_t irq_status = 0; 1473 + uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP | 1474 + INTR_STATUS0__PROGRAM_FAIL; 1475 + 1476 + /* if it is a raw xfer, we want to disable ecc, and send 1477 + * the spare area. 1478 + * !raw_xfer - enable ecc 1479 + * raw_xfer - transfer spare 1480 + */ 1481 + setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); 1482 + 1483 + /* copy buffer into DMA buffer */ 1484 + memcpy(denali->buf.buf, buf, mtd->writesize); 1485 + 1486 + if (raw_xfer) 1487 + { 1488 + /* transfer the data to the spare area */ 1489 + memcpy(denali->buf.buf + mtd->writesize, 1490 + chip->oob_poi, 1491 + mtd->oobsize); 1492 + } 1493 + 1494 + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE); 1495 + 1496 + clear_interrupts(denali); 1497 + denali_enable_dma(denali, true); 1498 + 1499 + denali_setup_dma(denali, DENALI_WRITE); 1500 + 1501 + /* wait for operation to complete */ 1502 + irq_status = wait_for_irq(denali, irq_mask); 1503 + 1504 + if (irq_status == 0) 1505 + { 1506 + printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer); 1507 + denali->status = 1508 + (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL : 1509 + PASS; 1510 + } 1511 + 1512 + denali_enable_dma(denali, false); 1513 + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE); 1514 + } 1515 + 1516 + /* NAND core entry points */ 1517 + 1518 + /* this is the callback that the NAND core calls to write a page. Since 1519 + writing a page with ECC or without is similar, all the work is done 1520 + by write_page above. */ 1521 + static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1522 + const uint8_t *buf) 1523 + { 1524 + /* for regular page writes, we let HW handle all the ECC 1525 + * data written to the device. */ 1526 + write_page(mtd, chip, buf, false); 1527 + } 1528 + 1529 + /* This is the callback that the NAND core calls to write a page without ECC. 1530 + raw access is similiar to ECC page writes, so all the work is done in the 1531 + write_page() function above. 1532 + */ 1533 + static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1534 + const uint8_t *buf) 1535 + { 1536 + /* for raw page writes, we want to disable ECC and simply write 1537 + whatever data is in the buffer. */ 1538 + write_page(mtd, chip, buf, true); 1539 + } 1540 + 1541 + static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 1542 + int page) 1543 + { 1544 + return write_oob_data(mtd, chip->oob_poi, page); 1545 + } 1546 + 1547 + static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1548 + int page, int sndcmd) 1549 + { 1550 + read_oob_data(mtd, chip->oob_poi, page); 1551 + 1552 + return 0; /* notify NAND core to send command to 1553 + * NAND device. */ 1554 + } 1555 + 1556 + static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1557 + uint8_t *buf, int page) 1558 + { 1559 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1560 + struct pci_dev *pci_dev = denali->dev; 1561 + 1562 + dma_addr_t addr = denali->buf.dma_buf; 1563 + size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1564 + 1565 + uint32_t irq_status = 0; 1566 + uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE | 1567 + INTR_STATUS0__ECC_ERR; 1568 + bool check_erased_page = false; 1569 + 1570 + setup_ecc_for_xfer(denali, true, false); 1571 + 1572 + denali_enable_dma(denali, true); 1573 + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1574 + 1575 + clear_interrupts(denali); 1576 + denali_setup_dma(denali, DENALI_READ); 1577 + 1578 + /* wait for operation to complete */ 1579 + irq_status = wait_for_irq(denali, irq_mask); 1580 + 1581 + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1582 + 1583 + memcpy(buf, denali->buf.buf, mtd->writesize); 1584 + 1585 + check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status); 1586 + denali_enable_dma(denali, false); 1587 + 1588 + if (check_erased_page) 1589 + { 1590 + read_oob_data(&denali->mtd, chip->oob_poi, denali->page); 1591 + 1592 + /* check ECC failures that may have occurred on erased pages */ 1593 + if (check_erased_page) 1594 + { 1595 + if (!is_erased(buf, denali->mtd.writesize)) 1596 + { 1597 + denali->mtd.ecc_stats.failed++; 1598 + } 1599 + if (!is_erased(buf, denali->mtd.oobsize)) 1600 + { 1601 + denali->mtd.ecc_stats.failed++; 1602 + } 1603 + } 1604 + } 1605 + return 0; 1606 + } 1607 + 1608 + static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1609 + uint8_t *buf, int page) 1610 + { 1611 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1612 + struct pci_dev *pci_dev = denali->dev; 1613 + 1614 + dma_addr_t addr = denali->buf.dma_buf; 1615 + size_t size = denali->mtd.writesize + denali->mtd.oobsize; 1616 + 1617 + uint32_t irq_status = 0; 1618 + uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP; 1619 + 1620 + setup_ecc_for_xfer(denali, false, true); 1621 + denali_enable_dma(denali, true); 1622 + 1623 + pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1624 + 1625 + clear_interrupts(denali); 1626 + denali_setup_dma(denali, DENALI_READ); 1627 + 1628 + /* wait for operation to complete */ 1629 + irq_status = wait_for_irq(denali, irq_mask); 1630 + 1631 + pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE); 1632 + 1633 + denali_enable_dma(denali, false); 1634 + 1635 + memcpy(buf, denali->buf.buf, mtd->writesize); 1636 + memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize); 1637 + 1638 + return 0; 1639 + } 1640 + 1641 + static uint8_t denali_read_byte(struct mtd_info *mtd) 1642 + { 1643 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1644 + uint8_t result = 0xff; 1645 + 1646 + if (denali->buf.head < denali->buf.tail) 1647 + { 1648 + result = denali->buf.buf[denali->buf.head++]; 1649 + } 1650 + 1651 + #if DEBUG_DENALI 1652 + printk("read byte -> 0x%02x\n", result); 1653 + #endif 1654 + return result; 1655 + } 1656 + 1657 + static void denali_select_chip(struct mtd_info *mtd, int chip) 1658 + { 1659 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1660 + #if DEBUG_DENALI 1661 + printk("denali select chip %d\n", chip); 1662 + #endif 1663 + spin_lock_irq(&denali->irq_lock); 1664 + denali->flash_bank = chip; 1665 + spin_unlock_irq(&denali->irq_lock); 1666 + } 1667 + 1668 + static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 1669 + { 1670 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1671 + int status = denali->status; 1672 + denali->status = 0; 1673 + 1674 + #if DEBUG_DENALI 1675 + printk("waitfunc %d\n", status); 1676 + #endif 1677 + return status; 1678 + } 1679 + 1680 + static void denali_erase(struct mtd_info *mtd, int page) 1681 + { 1682 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1683 + 1684 + uint32_t cmd = 0x0, irq_status = 0; 1685 + 1686 + #if DEBUG_DENALI 1687 + printk("erase page: %d\n", page); 1688 + #endif 1689 + /* clear interrupts */ 1690 + clear_interrupts(denali); 1691 + 1692 + /* setup page read request for access type */ 1693 + cmd = MODE_10 | BANK(denali->flash_bank) | page; 1694 + index_addr(denali, (uint32_t)cmd, 0x1); 1695 + 1696 + /* wait for erase to complete or failure to occur */ 1697 + irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP | 1698 + INTR_STATUS0__ERASE_FAIL); 1699 + 1700 + denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL : 1701 + PASS; 1702 + } 1703 + 1704 + static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, 1705 + int page) 1706 + { 1707 + struct denali_nand_info *denali = mtd_to_denali(mtd); 1708 + 1709 + #if DEBUG_DENALI 1710 + printk("cmdfunc: 0x%x %d %d\n", cmd, col, page); 1711 + #endif 1712 + switch (cmd) 1713 + { 1714 + case NAND_CMD_PAGEPROG: 1715 + break; 1716 + case NAND_CMD_STATUS: 1717 + read_status(denali); 1718 + break; 1719 + case NAND_CMD_READID: 1720 + reset_buf(denali); 1721 + if (denali->flash_bank < denali->total_used_banks) 1722 + { 1723 + /* write manufacturer information into nand 1724 + buffer for NAND subsystem to fetch. 1725 + */ 1726 + write_byte_to_buf(denali, denali->dev_info.wDeviceMaker); 1727 + write_byte_to_buf(denali, denali->dev_info.wDeviceID); 1728 + write_byte_to_buf(denali, denali->dev_info.bDeviceParam0); 1729 + write_byte_to_buf(denali, denali->dev_info.bDeviceParam1); 1730 + write_byte_to_buf(denali, denali->dev_info.bDeviceParam2); 1731 + } 1732 + else 1733 + { 1734 + int i; 1735 + for (i = 0; i < 5; i++) 1736 + write_byte_to_buf(denali, 0xff); 1737 + } 1738 + break; 1739 + case NAND_CMD_READ0: 1740 + case NAND_CMD_SEQIN: 1741 + denali->page = page; 1742 + break; 1743 + case NAND_CMD_RESET: 1744 + reset_bank(denali); 1745 + break; 1746 + case NAND_CMD_READOOB: 1747 + /* TODO: Read OOB data */ 1748 + break; 1749 + default: 1750 + printk(KERN_ERR ": unsupported command received 0x%x\n", cmd); 1751 + break; 1752 + } 1753 + } 1754 + 1755 + /* stubs for ECC functions not used by the NAND core */ 1756 + static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, 1757 + uint8_t *ecc_code) 1758 + { 1759 + printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n"); 1760 + BUG(); 1761 + return -EIO; 1762 + } 1763 + 1764 + static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, 1765 + uint8_t *read_ecc, uint8_t *calc_ecc) 1766 + { 1767 + printk(KERN_ERR "denali_ecc_correct called unexpectedly\n"); 1768 + BUG(); 1769 + return -EIO; 1770 + } 1771 + 1772 + static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) 1773 + { 1774 + printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n"); 1775 + BUG(); 1776 + } 1777 + /* end NAND core entry points */ 1778 + 1779 + /* Initialization code to bring the device up to a known good state */ 1780 + static void denali_hw_init(struct denali_nand_info *denali) 1781 + { 1782 + denali_irq_init(denali); 1783 + NAND_Flash_Reset(denali); 1784 + denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED); 1785 + denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE); 1786 + 1787 + denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES); 1788 + denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); 1789 + 1790 + /* Should set value for these registers when init */ 1791 + denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); 1792 + denali_write32(1, denali->flash_reg + ECC_ENABLE); 1793 + } 1794 + 1795 + /* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */ 1796 + #define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE) 1797 + static struct nand_ecclayout nand_oob_slc = { 1798 + .eccbytes = 4, 1799 + .eccpos = { 0, 1, 2, 3 }, /* not used */ 1800 + .oobfree = {{ 1801 + .offset = ECC_BYTES_SLC, 1802 + .length = 64 - ECC_BYTES_SLC 1803 + }} 1804 + }; 1805 + 1806 + #define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE) 1807 + static struct nand_ecclayout nand_oob_mlc_14bit = { 1808 + .eccbytes = 14, 1809 + .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */ 1810 + .oobfree = {{ 1811 + .offset = ECC_BYTES_MLC, 1812 + .length = 64 - ECC_BYTES_MLC 1813 + }} 1814 + }; 1815 + 1816 + static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 1817 + static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 1818 + 1819 + static struct nand_bbt_descr bbt_main_descr = { 1820 + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1821 + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 1822 + .offs = 8, 1823 + .len = 4, 1824 + .veroffs = 12, 1825 + .maxblocks = 4, 1826 + .pattern = bbt_pattern, 1827 + }; 1828 + 1829 + static struct nand_bbt_descr bbt_mirror_descr = { 1830 + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 1831 + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, 1832 + .offs = 8, 1833 + .len = 4, 1834 + .veroffs = 12, 1835 + .maxblocks = 4, 1836 + .pattern = mirror_pattern, 1837 + }; 1838 + 1839 + /* initalize driver data structures */ 1840 + void denali_drv_init(struct denali_nand_info *denali) 1841 + { 1842 + denali->idx = 0; 1843 + 1844 + /* setup interrupt handler */ 1845 + /* the completion object will be used to notify 1846 + * the callee that the interrupt is done */ 1847 + init_completion(&denali->complete); 1848 + 1849 + /* the spinlock will be used to synchronize the ISR 1850 + * with any element that might be access shared 1851 + * data (interrupt status) */ 1852 + spin_lock_init(&denali->irq_lock); 1853 + 1854 + /* indicate that MTD has not selected a valid bank yet */ 1855 + denali->flash_bank = CHIP_SELECT_INVALID; 1856 + 1857 + /* initialize our irq_status variable to indicate no interrupts */ 1858 + denali->irq_status = 0; 1859 + } 1860 + 1861 + /* driver entry point */ 1862 + static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 1863 + { 1864 + int ret = -ENODEV; 1865 + resource_size_t csr_base, mem_base; 1866 + unsigned long csr_len, mem_len; 1867 + struct denali_nand_info *denali; 1868 + 1869 + nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", 1870 + __FILE__, __LINE__, __func__); 1871 + 1872 + denali = kzalloc(sizeof(*denali), GFP_KERNEL); 1873 + if (!denali) 1874 + return -ENOMEM; 1875 + 1876 + ret = pci_enable_device(dev); 1877 + if (ret) { 1878 + printk(KERN_ERR "Spectra: pci_enable_device failed.\n"); 1879 + goto failed_enable; 1880 + } 1881 + 1882 + if (id->driver_data == INTEL_CE4100) { 1883 + /* Due to a silicon limitation, we can only support 1884 + * ONFI timing mode 1 and below. 1885 + */ 1886 + if (onfi_timing_mode < -1 || onfi_timing_mode > 1) 1887 + { 1888 + printk("Intel CE4100 only supports ONFI timing mode 1 " 1889 + "or below\n"); 1890 + ret = -EINVAL; 1891 + goto failed_enable; 1892 + } 1893 + denali->platform = INTEL_CE4100; 1894 + mem_base = pci_resource_start(dev, 0); 1895 + mem_len = pci_resource_len(dev, 1); 1896 + csr_base = pci_resource_start(dev, 1); 1897 + csr_len = pci_resource_len(dev, 1); 1898 + } else { 1899 + denali->platform = INTEL_MRST; 1900 + csr_base = pci_resource_start(dev, 0); 1901 + csr_len = pci_resource_start(dev, 0); 1902 + mem_base = pci_resource_start(dev, 1); 1903 + mem_len = pci_resource_len(dev, 1); 1904 + if (!mem_len) { 1905 + mem_base = csr_base + csr_len; 1906 + mem_len = csr_len; 1907 + nand_dbg_print(NAND_DBG_WARN, 1908 + "Spectra: No second BAR for PCI device; assuming %08Lx\n", 1909 + (uint64_t)csr_base); 1910 + } 1911 + } 1912 + 1913 + /* Is 32-bit DMA supported? */ 1914 + ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); 1915 + 1916 + if (ret) 1917 + { 1918 + printk(KERN_ERR "Spectra: no usable DMA configuration\n"); 1919 + goto failed_enable; 1920 + } 1921 + denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE, 1922 + PCI_DMA_BIDIRECTIONAL); 1923 + 1924 + if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) 1925 + { 1926 + printk(KERN_ERR "Spectra: failed to map DMA buffer\n"); 1927 + goto failed_enable; 1928 + } 1929 + 1930 + pci_set_master(dev); 1931 + denali->dev = dev; 1932 + 1933 + ret = pci_request_regions(dev, DENALI_NAND_NAME); 1934 + if (ret) { 1935 + printk(KERN_ERR "Spectra: Unable to request memory regions\n"); 1936 + goto failed_req_csr; 1937 + } 1938 + 1939 + denali->flash_reg = ioremap_nocache(csr_base, csr_len); 1940 + if (!denali->flash_reg) { 1941 + printk(KERN_ERR "Spectra: Unable to remap memory region\n"); 1942 + ret = -ENOMEM; 1943 + goto failed_remap_csr; 1944 + } 1945 + nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n", 1946 + (uint64_t)csr_base, denali->flash_reg, csr_len); 1947 + 1948 + denali->flash_mem = ioremap_nocache(mem_base, mem_len); 1949 + if (!denali->flash_mem) { 1950 + printk(KERN_ERR "Spectra: ioremap_nocache failed!"); 1951 + iounmap(denali->flash_reg); 1952 + ret = -ENOMEM; 1953 + goto failed_remap_csr; 1954 + } 1955 + 1956 + nand_dbg_print(NAND_DBG_WARN, 1957 + "Spectra: Remapped flash base address: " 1958 + "0x%p, len: %ld\n", 1959 + denali->flash_mem, csr_len); 1960 + 1961 + denali_hw_init(denali); 1962 + denali_drv_init(denali); 1963 + 1964 + nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq); 1965 + if (request_irq(dev->irq, denali_isr, IRQF_SHARED, 1966 + DENALI_NAND_NAME, denali)) { 1967 + printk(KERN_ERR "Spectra: Unable to allocate IRQ\n"); 1968 + ret = -ENODEV; 1969 + goto failed_request_irq; 1970 + } 1971 + 1972 + /* now that our ISR is registered, we can enable interrupts */ 1973 + NAND_LLD_Enable_Disable_Interrupts(denali, true); 1974 + 1975 + pci_set_drvdata(dev, denali); 1976 + 1977 + NAND_Read_Device_ID(denali); 1978 + 1979 + /* MTD supported page sizes vary by kernel. We validate our 1980 + kernel supports the device here. 1981 + */ 1982 + if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) 1983 + { 1984 + ret = -ENODEV; 1985 + printk(KERN_ERR "Spectra: device size not supported by this " 1986 + "version of MTD."); 1987 + goto failed_nand; 1988 + } 1989 + 1990 + nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:" 1991 + "acc_clks: %d, re_2_we: %d, we_2_re: %d," 1992 + "addr_2_data: %d, rdwr_en_lo_cnt: %d, " 1993 + "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", 1994 + ioread32(denali->flash_reg + ACC_CLKS), 1995 + ioread32(denali->flash_reg + RE_2_WE), 1996 + ioread32(denali->flash_reg + WE_2_RE), 1997 + ioread32(denali->flash_reg + ADDR_2_DATA), 1998 + ioread32(denali->flash_reg + RDWR_EN_LO_CNT), 1999 + ioread32(denali->flash_reg + RDWR_EN_HI_CNT), 2000 + ioread32(denali->flash_reg + CS_SETUP_CNT)); 2001 + 2002 + denali->mtd.name = "Denali NAND"; 2003 + denali->mtd.owner = THIS_MODULE; 2004 + denali->mtd.priv = &denali->nand; 2005 + 2006 + /* register the driver with the NAND core subsystem */ 2007 + denali->nand.select_chip = denali_select_chip; 2008 + denali->nand.cmdfunc = denali_cmdfunc; 2009 + denali->nand.read_byte = denali_read_byte; 2010 + denali->nand.waitfunc = denali_waitfunc; 2011 + 2012 + /* scan for NAND devices attached to the controller 2013 + * this is the first stage in a two step process to register 2014 + * with the nand subsystem */ 2015 + if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) 2016 + { 2017 + ret = -ENXIO; 2018 + goto failed_nand; 2019 + } 2020 + 2021 + /* second stage of the NAND scan 2022 + * this stage requires information regarding ECC and 2023 + * bad block management. */ 2024 + 2025 + /* Bad block management */ 2026 + denali->nand.bbt_td = &bbt_main_descr; 2027 + denali->nand.bbt_md = &bbt_mirror_descr; 2028 + 2029 + /* skip the scan for now until we have OOB read and write support */ 2030 + denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; 2031 + denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 2032 + 2033 + if (denali->dev_info.MLCDevice) 2034 + { 2035 + denali->nand.ecc.layout = &nand_oob_mlc_14bit; 2036 + denali->nand.ecc.bytes = ECC_BYTES_MLC; 2037 + } 2038 + else /* SLC */ 2039 + { 2040 + denali->nand.ecc.layout = &nand_oob_slc; 2041 + denali->nand.ecc.bytes = ECC_BYTES_SLC; 2042 + } 2043 + 2044 + /* These functions are required by the NAND core framework, otherwise, 2045 + the NAND core will assert. However, we don't need them, so we'll stub 2046 + them out. */ 2047 + denali->nand.ecc.calculate = denali_ecc_calculate; 2048 + denali->nand.ecc.correct = denali_ecc_correct; 2049 + denali->nand.ecc.hwctl = denali_ecc_hwctl; 2050 + 2051 + /* override the default read operations */ 2052 + denali->nand.ecc.size = denali->mtd.writesize; 2053 + denali->nand.ecc.read_page = denali_read_page; 2054 + denali->nand.ecc.read_page_raw = denali_read_page_raw; 2055 + denali->nand.ecc.write_page = denali_write_page; 2056 + denali->nand.ecc.write_page_raw = denali_write_page_raw; 2057 + denali->nand.ecc.read_oob = denali_read_oob; 2058 + denali->nand.ecc.write_oob = denali_write_oob; 2059 + denali->nand.erase_cmd = denali_erase; 2060 + 2061 + if (nand_scan_tail(&denali->mtd)) 2062 + { 2063 + ret = -ENXIO; 2064 + goto failed_nand; 2065 + } 2066 + 2067 + ret = add_mtd_device(&denali->mtd); 2068 + if (ret) { 2069 + printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret); 2070 + goto failed_nand; 2071 + } 2072 + return 0; 2073 + 2074 + failed_nand: 2075 + denali_irq_cleanup(dev->irq, denali); 2076 + failed_request_irq: 2077 + iounmap(denali->flash_reg); 2078 + iounmap(denali->flash_mem); 2079 + failed_remap_csr: 2080 + pci_release_regions(dev); 2081 + failed_req_csr: 2082 + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 2083 + PCI_DMA_BIDIRECTIONAL); 2084 + failed_enable: 2085 + kfree(denali); 2086 + return ret; 2087 + } 2088 + 2089 + /* driver exit point */ 2090 + static void denali_pci_remove(struct pci_dev *dev) 2091 + { 2092 + struct denali_nand_info *denali = pci_get_drvdata(dev); 2093 + 2094 + nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", 2095 + __FILE__, __LINE__, __func__); 2096 + 2097 + nand_release(&denali->mtd); 2098 + del_mtd_device(&denali->mtd); 2099 + 2100 + denali_irq_cleanup(dev->irq, denali); 2101 + 2102 + iounmap(denali->flash_reg); 2103 + iounmap(denali->flash_mem); 2104 + pci_release_regions(dev); 2105 + pci_disable_device(dev); 2106 + pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE, 2107 + PCI_DMA_BIDIRECTIONAL); 2108 + pci_set_drvdata(dev, NULL); 2109 + kfree(denali); 2110 + } 2111 + 2112 + MODULE_DEVICE_TABLE(pci, denali_pci_ids); 2113 + 2114 + static struct pci_driver denali_pci_driver = { 2115 + .name = DENALI_NAND_NAME, 2116 + .id_table = denali_pci_ids, 2117 + .probe = denali_pci_probe, 2118 + .remove = denali_pci_remove, 2119 + }; 2120 + 2121 + static int __devinit denali_init(void) 2122 + { 2123 + printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__); 2124 + return pci_register_driver(&denali_pci_driver); 2125 + } 2126 + 2127 + /* Free memory */ 2128 + static void __devexit denali_exit(void) 2129 + { 2130 + pci_unregister_driver(&denali_pci_driver); 2131 + } 2132 + 2133 + module_init(denali_init); 2134 + module_exit(denali_exit);
+816
drivers/mtd/nand/denali.h
··· 1 + /* 2 + * NAND Flash Controller Device Driver 3 + * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 + * 18 + */ 19 + 20 + #include <linux/mtd/nand.h> 21 + 22 + #define DEVICE_RESET 0x0 23 + #define DEVICE_RESET__BANK0 0x0001 24 + #define DEVICE_RESET__BANK1 0x0002 25 + #define DEVICE_RESET__BANK2 0x0004 26 + #define DEVICE_RESET__BANK3 0x0008 27 + 28 + #define TRANSFER_SPARE_REG 0x10 29 + #define TRANSFER_SPARE_REG__FLAG 0x0001 30 + 31 + #define LOAD_WAIT_CNT 0x20 32 + #define LOAD_WAIT_CNT__VALUE 0xffff 33 + 34 + #define PROGRAM_WAIT_CNT 0x30 35 + #define PROGRAM_WAIT_CNT__VALUE 0xffff 36 + 37 + #define ERASE_WAIT_CNT 0x40 38 + #define ERASE_WAIT_CNT__VALUE 0xffff 39 + 40 + #define INT_MON_CYCCNT 0x50 41 + #define INT_MON_CYCCNT__VALUE 0xffff 42 + 43 + #define RB_PIN_ENABLED 0x60 44 + #define RB_PIN_ENABLED__BANK0 0x0001 45 + #define RB_PIN_ENABLED__BANK1 0x0002 46 + #define RB_PIN_ENABLED__BANK2 0x0004 47 + #define RB_PIN_ENABLED__BANK3 0x0008 48 + 49 + #define MULTIPLANE_OPERATION 0x70 50 + #define MULTIPLANE_OPERATION__FLAG 0x0001 51 + 52 + #define MULTIPLANE_READ_ENABLE 0x80 53 + #define MULTIPLANE_READ_ENABLE__FLAG 0x0001 54 + 55 + #define COPYBACK_DISABLE 0x90 56 + #define COPYBACK_DISABLE__FLAG 0x0001 57 + 58 + #define CACHE_WRITE_ENABLE 0xa0 59 + #define CACHE_WRITE_ENABLE__FLAG 0x0001 60 + 61 + #define CACHE_READ_ENABLE 0xb0 62 + #define CACHE_READ_ENABLE__FLAG 0x0001 63 + 64 + #define PREFETCH_MODE 0xc0 65 + #define PREFETCH_MODE__PREFETCH_EN 0x0001 66 + #define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 67 + 68 + #define CHIP_ENABLE_DONT_CARE 0xd0 69 + #define CHIP_EN_DONT_CARE__FLAG 0x01 70 + 71 + #define ECC_ENABLE 0xe0 72 + #define ECC_ENABLE__FLAG 0x0001 73 + 74 + #define GLOBAL_INT_ENABLE 0xf0 75 + #define GLOBAL_INT_EN_FLAG 0x01 76 + 77 + #define WE_2_RE 0x100 78 + #define WE_2_RE__VALUE 0x003f 79 + 80 + #define ADDR_2_DATA 0x110 81 + #define ADDR_2_DATA__VALUE 0x003f 82 + 83 + #define RE_2_WE 0x120 84 + #define RE_2_WE__VALUE 0x003f 85 + 86 + #define ACC_CLKS 0x130 87 + #define ACC_CLKS__VALUE 0x000f 88 + 89 + #define NUMBER_OF_PLANES 0x140 90 + #define NUMBER_OF_PLANES__VALUE 0x0007 91 + 92 + #define PAGES_PER_BLOCK 0x150 93 + #define PAGES_PER_BLOCK__VALUE 0xffff 94 + 95 + #define DEVICE_WIDTH 0x160 96 + #define DEVICE_WIDTH__VALUE 0x0003 97 + 98 + #define DEVICE_MAIN_AREA_SIZE 0x170 99 + #define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff 100 + 101 + #define DEVICE_SPARE_AREA_SIZE 0x180 102 + #define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff 103 + 104 + #define TWO_ROW_ADDR_CYCLES 0x190 105 + #define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 106 + 107 + #define MULTIPLANE_ADDR_RESTRICT 0x1a0 108 + #define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 109 + 110 + #define ECC_CORRECTION 0x1b0 111 + #define ECC_CORRECTION__VALUE 0x001f 112 + 113 + #define READ_MODE 0x1c0 114 + #define READ_MODE__VALUE 0x000f 115 + 116 + #define WRITE_MODE 0x1d0 117 + #define WRITE_MODE__VALUE 0x000f 118 + 119 + #define COPYBACK_MODE 0x1e0 120 + #define COPYBACK_MODE__VALUE 0x000f 121 + 122 + #define RDWR_EN_LO_CNT 0x1f0 123 + #define RDWR_EN_LO_CNT__VALUE 0x001f 124 + 125 + #define RDWR_EN_HI_CNT 0x200 126 + #define RDWR_EN_HI_CNT__VALUE 0x001f 127 + 128 + #define MAX_RD_DELAY 0x210 129 + #define MAX_RD_DELAY__VALUE 0x000f 130 + 131 + #define CS_SETUP_CNT 0x220 132 + #define CS_SETUP_CNT__VALUE 0x001f 133 + 134 + #define SPARE_AREA_SKIP_BYTES 0x230 135 + #define SPARE_AREA_SKIP_BYTES__VALUE 0x003f 136 + 137 + #define SPARE_AREA_MARKER 0x240 138 + #define SPARE_AREA_MARKER__VALUE 0xffff 139 + 140 + #define DEVICES_CONNECTED 0x250 141 + #define DEVICES_CONNECTED__VALUE 0x0007 142 + 143 + #define DIE_MASK 0x260 144 + #define DIE_MASK__VALUE 0x00ff 145 + 146 + #define FIRST_BLOCK_OF_NEXT_PLANE 0x270 147 + #define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff 148 + 149 + #define WRITE_PROTECT 0x280 150 + #define WRITE_PROTECT__FLAG 0x0001 151 + 152 + #define RE_2_RE 0x290 153 + #define RE_2_RE__VALUE 0x003f 154 + 155 + #define MANUFACTURER_ID 0x300 156 + #define MANUFACTURER_ID__VALUE 0x00ff 157 + 158 + #define DEVICE_ID 0x310 159 + #define DEVICE_ID__VALUE 0x00ff 160 + 161 + #define DEVICE_PARAM_0 0x320 162 + #define DEVICE_PARAM_0__VALUE 0x00ff 163 + 164 + #define DEVICE_PARAM_1 0x330 165 + #define DEVICE_PARAM_1__VALUE 0x00ff 166 + 167 + #define DEVICE_PARAM_2 0x340 168 + #define DEVICE_PARAM_2__VALUE 0x00ff 169 + 170 + #define LOGICAL_PAGE_DATA_SIZE 0x350 171 + #define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff 172 + 173 + #define LOGICAL_PAGE_SPARE_SIZE 0x360 174 + #define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff 175 + 176 + #define REVISION 0x370 177 + #define REVISION__VALUE 0xffff 178 + 179 + #define ONFI_DEVICE_FEATURES 0x380 180 + #define ONFI_DEVICE_FEATURES__VALUE 0x003f 181 + 182 + #define ONFI_OPTIONAL_COMMANDS 0x390 183 + #define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f 184 + 185 + #define ONFI_TIMING_MODE 0x3a0 186 + #define ONFI_TIMING_MODE__VALUE 0x003f 187 + 188 + #define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 189 + #define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f 190 + 191 + #define ONFI_DEVICE_NO_OF_LUNS 0x3c0 192 + #define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff 193 + #define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 194 + 195 + #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 196 + #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff 197 + 198 + #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 199 + #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff 200 + 201 + #define FEATURES 0x3f0 202 + #define FEATURES__N_BANKS 0x0003 203 + #define FEATURES__ECC_MAX_ERR 0x003c 204 + #define FEATURES__DMA 0x0040 205 + #define FEATURES__CMD_DMA 0x0080 206 + #define FEATURES__PARTITION 0x0100 207 + #define FEATURES__XDMA_SIDEBAND 0x0200 208 + #define FEATURES__GPREG 0x0400 209 + #define FEATURES__INDEX_ADDR 0x0800 210 + 211 + #define TRANSFER_MODE 0x400 212 + #define TRANSFER_MODE__VALUE 0x0003 213 + 214 + #define INTR_STATUS0 0x410 215 + #define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001 216 + #define INTR_STATUS0__ECC_ERR 0x0002 217 + #define INTR_STATUS0__DMA_CMD_COMP 0x0004 218 + #define INTR_STATUS0__TIME_OUT 0x0008 219 + #define INTR_STATUS0__PROGRAM_FAIL 0x0010 220 + #define INTR_STATUS0__ERASE_FAIL 0x0020 221 + #define INTR_STATUS0__LOAD_COMP 0x0040 222 + #define INTR_STATUS0__PROGRAM_COMP 0x0080 223 + #define INTR_STATUS0__ERASE_COMP 0x0100 224 + #define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200 225 + #define INTR_STATUS0__LOCKED_BLK 0x0400 226 + #define INTR_STATUS0__UNSUP_CMD 0x0800 227 + #define INTR_STATUS0__INT_ACT 0x1000 228 + #define INTR_STATUS0__RST_COMP 0x2000 229 + #define INTR_STATUS0__PIPE_CMD_ERR 0x4000 230 + #define INTR_STATUS0__PAGE_XFER_INC 0x8000 231 + 232 + #define INTR_EN0 0x420 233 + #define INTR_EN0__ECC_TRANSACTION_DONE 0x0001 234 + #define INTR_EN0__ECC_ERR 0x0002 235 + #define INTR_EN0__DMA_CMD_COMP 0x0004 236 + #define INTR_EN0__TIME_OUT 0x0008 237 + #define INTR_EN0__PROGRAM_FAIL 0x0010 238 + #define INTR_EN0__ERASE_FAIL 0x0020 239 + #define INTR_EN0__LOAD_COMP 0x0040 240 + #define INTR_EN0__PROGRAM_COMP 0x0080 241 + #define INTR_EN0__ERASE_COMP 0x0100 242 + #define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200 243 + #define INTR_EN0__LOCKED_BLK 0x0400 244 + #define INTR_EN0__UNSUP_CMD 0x0800 245 + #define INTR_EN0__INT_ACT 0x1000 246 + #define INTR_EN0__RST_COMP 0x2000 247 + #define INTR_EN0__PIPE_CMD_ERR 0x4000 248 + #define INTR_EN0__PAGE_XFER_INC 0x8000 249 + 250 + #define PAGE_CNT0 0x430 251 + #define PAGE_CNT0__VALUE 0x00ff 252 + 253 + #define ERR_PAGE_ADDR0 0x440 254 + #define ERR_PAGE_ADDR0__VALUE 0xffff 255 + 256 + #define ERR_BLOCK_ADDR0 0x450 257 + #define ERR_BLOCK_ADDR0__VALUE 0xffff 258 + 259 + #define INTR_STATUS1 0x460 260 + #define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001 261 + #define INTR_STATUS1__ECC_ERR 0x0002 262 + #define INTR_STATUS1__DMA_CMD_COMP 0x0004 263 + #define INTR_STATUS1__TIME_OUT 0x0008 264 + #define INTR_STATUS1__PROGRAM_FAIL 0x0010 265 + #define INTR_STATUS1__ERASE_FAIL 0x0020 266 + #define INTR_STATUS1__LOAD_COMP 0x0040 267 + #define INTR_STATUS1__PROGRAM_COMP 0x0080 268 + #define INTR_STATUS1__ERASE_COMP 0x0100 269 + #define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200 270 + #define INTR_STATUS1__LOCKED_BLK 0x0400 271 + #define INTR_STATUS1__UNSUP_CMD 0x0800 272 + #define INTR_STATUS1__INT_ACT 0x1000 273 + #define INTR_STATUS1__RST_COMP 0x2000 274 + #define INTR_STATUS1__PIPE_CMD_ERR 0x4000 275 + #define INTR_STATUS1__PAGE_XFER_INC 0x8000 276 + 277 + #define INTR_EN1 0x470 278 + #define INTR_EN1__ECC_TRANSACTION_DONE 0x0001 279 + #define INTR_EN1__ECC_ERR 0x0002 280 + #define INTR_EN1__DMA_CMD_COMP 0x0004 281 + #define INTR_EN1__TIME_OUT 0x0008 282 + #define INTR_EN1__PROGRAM_FAIL 0x0010 283 + #define INTR_EN1__ERASE_FAIL 0x0020 284 + #define INTR_EN1__LOAD_COMP 0x0040 285 + #define INTR_EN1__PROGRAM_COMP 0x0080 286 + #define INTR_EN1__ERASE_COMP 0x0100 287 + #define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200 288 + #define INTR_EN1__LOCKED_BLK 0x0400 289 + #define INTR_EN1__UNSUP_CMD 0x0800 290 + #define INTR_EN1__INT_ACT 0x1000 291 + #define INTR_EN1__RST_COMP 0x2000 292 + #define INTR_EN1__PIPE_CMD_ERR 0x4000 293 + #define INTR_EN1__PAGE_XFER_INC 0x8000 294 + 295 + #define PAGE_CNT1 0x480 296 + #define PAGE_CNT1__VALUE 0x00ff 297 + 298 + #define ERR_PAGE_ADDR1 0x490 299 + #define ERR_PAGE_ADDR1__VALUE 0xffff 300 + 301 + #define ERR_BLOCK_ADDR1 0x4a0 302 + #define ERR_BLOCK_ADDR1__VALUE 0xffff 303 + 304 + #define INTR_STATUS2 0x4b0 305 + #define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001 306 + #define INTR_STATUS2__ECC_ERR 0x0002 307 + #define INTR_STATUS2__DMA_CMD_COMP 0x0004 308 + #define INTR_STATUS2__TIME_OUT 0x0008 309 + #define INTR_STATUS2__PROGRAM_FAIL 0x0010 310 + #define INTR_STATUS2__ERASE_FAIL 0x0020 311 + #define INTR_STATUS2__LOAD_COMP 0x0040 312 + #define INTR_STATUS2__PROGRAM_COMP 0x0080 313 + #define INTR_STATUS2__ERASE_COMP 0x0100 314 + #define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200 315 + #define INTR_STATUS2__LOCKED_BLK 0x0400 316 + #define INTR_STATUS2__UNSUP_CMD 0x0800 317 + #define INTR_STATUS2__INT_ACT 0x1000 318 + #define INTR_STATUS2__RST_COMP 0x2000 319 + #define INTR_STATUS2__PIPE_CMD_ERR 0x4000 320 + #define INTR_STATUS2__PAGE_XFER_INC 0x8000 321 + 322 + #define INTR_EN2 0x4c0 323 + #define INTR_EN2__ECC_TRANSACTION_DONE 0x0001 324 + #define INTR_EN2__ECC_ERR 0x0002 325 + #define INTR_EN2__DMA_CMD_COMP 0x0004 326 + #define INTR_EN2__TIME_OUT 0x0008 327 + #define INTR_EN2__PROGRAM_FAIL 0x0010 328 + #define INTR_EN2__ERASE_FAIL 0x0020 329 + #define INTR_EN2__LOAD_COMP 0x0040 330 + #define INTR_EN2__PROGRAM_COMP 0x0080 331 + #define INTR_EN2__ERASE_COMP 0x0100 332 + #define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200 333 + #define INTR_EN2__LOCKED_BLK 0x0400 334 + #define INTR_EN2__UNSUP_CMD 0x0800 335 + #define INTR_EN2__INT_ACT 0x1000 336 + #define INTR_EN2__RST_COMP 0x2000 337 + #define INTR_EN2__PIPE_CMD_ERR 0x4000 338 + #define INTR_EN2__PAGE_XFER_INC 0x8000 339 + 340 + #define PAGE_CNT2 0x4d0 341 + #define PAGE_CNT2__VALUE 0x00ff 342 + 343 + #define ERR_PAGE_ADDR2 0x4e0 344 + #define ERR_PAGE_ADDR2__VALUE 0xffff 345 + 346 + #define ERR_BLOCK_ADDR2 0x4f0 347 + #define ERR_BLOCK_ADDR2__VALUE 0xffff 348 + 349 + #define INTR_STATUS3 0x500 350 + #define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001 351 + #define INTR_STATUS3__ECC_ERR 0x0002 352 + #define INTR_STATUS3__DMA_CMD_COMP 0x0004 353 + #define INTR_STATUS3__TIME_OUT 0x0008 354 + #define INTR_STATUS3__PROGRAM_FAIL 0x0010 355 + #define INTR_STATUS3__ERASE_FAIL 0x0020 356 + #define INTR_STATUS3__LOAD_COMP 0x0040 357 + #define INTR_STATUS3__PROGRAM_COMP 0x0080 358 + #define INTR_STATUS3__ERASE_COMP 0x0100 359 + #define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200 360 + #define INTR_STATUS3__LOCKED_BLK 0x0400 361 + #define INTR_STATUS3__UNSUP_CMD 0x0800 362 + #define INTR_STATUS3__INT_ACT 0x1000 363 + #define INTR_STATUS3__RST_COMP 0x2000 364 + #define INTR_STATUS3__PIPE_CMD_ERR 0x4000 365 + #define INTR_STATUS3__PAGE_XFER_INC 0x8000 366 + 367 + #define INTR_EN3 0x510 368 + #define INTR_EN3__ECC_TRANSACTION_DONE 0x0001 369 + #define INTR_EN3__ECC_ERR 0x0002 370 + #define INTR_EN3__DMA_CMD_COMP 0x0004 371 + #define INTR_EN3__TIME_OUT 0x0008 372 + #define INTR_EN3__PROGRAM_FAIL 0x0010 373 + #define INTR_EN3__ERASE_FAIL 0x0020 374 + #define INTR_EN3__LOAD_COMP 0x0040 375 + #define INTR_EN3__PROGRAM_COMP 0x0080 376 + #define INTR_EN3__ERASE_COMP 0x0100 377 + #define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200 378 + #define INTR_EN3__LOCKED_BLK 0x0400 379 + #define INTR_EN3__UNSUP_CMD 0x0800 380 + #define INTR_EN3__INT_ACT 0x1000 381 + #define INTR_EN3__RST_COMP 0x2000 382 + #define INTR_EN3__PIPE_CMD_ERR 0x4000 383 + #define INTR_EN3__PAGE_XFER_INC 0x8000 384 + 385 + #define PAGE_CNT3 0x520 386 + #define PAGE_CNT3__VALUE 0x00ff 387 + 388 + #define ERR_PAGE_ADDR3 0x530 389 + #define ERR_PAGE_ADDR3__VALUE 0xffff 390 + 391 + #define ERR_BLOCK_ADDR3 0x540 392 + #define ERR_BLOCK_ADDR3__VALUE 0xffff 393 + 394 + #define DATA_INTR 0x550 395 + #define DATA_INTR__WRITE_SPACE_AV 0x0001 396 + #define DATA_INTR__READ_DATA_AV 0x0002 397 + 398 + #define DATA_INTR_EN 0x560 399 + #define DATA_INTR_EN__WRITE_SPACE_AV 0x0001 400 + #define DATA_INTR_EN__READ_DATA_AV 0x0002 401 + 402 + #define GPREG_0 0x570 403 + #define GPREG_0__VALUE 0xffff 404 + 405 + #define GPREG_1 0x580 406 + #define GPREG_1__VALUE 0xffff 407 + 408 + #define GPREG_2 0x590 409 + #define GPREG_2__VALUE 0xffff 410 + 411 + #define GPREG_3 0x5a0 412 + #define GPREG_3__VALUE 0xffff 413 + 414 + #define ECC_THRESHOLD 0x600 415 + #define ECC_THRESHOLD__VALUE 0x03ff 416 + 417 + #define ECC_ERROR_BLOCK_ADDRESS 0x610 418 + #define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff 419 + 420 + #define ECC_ERROR_PAGE_ADDRESS 0x620 421 + #define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff 422 + #define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 423 + 424 + #define ECC_ERROR_ADDRESS 0x630 425 + #define ECC_ERROR_ADDRESS__OFFSET 0x0fff 426 + #define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 427 + 428 + #define ERR_CORRECTION_INFO 0x640 429 + #define ERR_CORRECTION_INFO__BYTEMASK 0x00ff 430 + #define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 431 + #define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 432 + #define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 433 + 434 + #define DMA_ENABLE 0x700 435 + #define DMA_ENABLE__FLAG 0x0001 436 + 437 + #define IGNORE_ECC_DONE 0x710 438 + #define IGNORE_ECC_DONE__FLAG 0x0001 439 + 440 + #define DMA_INTR 0x720 441 + #define DMA_INTR__TARGET_ERROR 0x0001 442 + #define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 443 + #define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 444 + #define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 445 + #define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 446 + #define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 447 + 448 + #define DMA_INTR_EN 0x730 449 + #define DMA_INTR_EN__TARGET_ERROR 0x0001 450 + #define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002 451 + #define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004 452 + #define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008 453 + #define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010 454 + #define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020 455 + 456 + #define TARGET_ERR_ADDR_LO 0x740 457 + #define TARGET_ERR_ADDR_LO__VALUE 0xffff 458 + 459 + #define TARGET_ERR_ADDR_HI 0x750 460 + #define TARGET_ERR_ADDR_HI__VALUE 0xffff 461 + 462 + #define CHNL_ACTIVE 0x760 463 + #define CHNL_ACTIVE__CHANNEL0 0x0001 464 + #define CHNL_ACTIVE__CHANNEL1 0x0002 465 + #define CHNL_ACTIVE__CHANNEL2 0x0004 466 + #define CHNL_ACTIVE__CHANNEL3 0x0008 467 + 468 + #define ACTIVE_SRC_ID 0x800 469 + #define ACTIVE_SRC_ID__VALUE 0x00ff 470 + 471 + #define PTN_INTR 0x810 472 + #define PTN_INTR__CONFIG_ERROR 0x0001 473 + #define PTN_INTR__ACCESS_ERROR_BANK0 0x0002 474 + #define PTN_INTR__ACCESS_ERROR_BANK1 0x0004 475 + #define PTN_INTR__ACCESS_ERROR_BANK2 0x0008 476 + #define PTN_INTR__ACCESS_ERROR_BANK3 0x0010 477 + #define PTN_INTR__REG_ACCESS_ERROR 0x0020 478 + 479 + #define PTN_INTR_EN 0x820 480 + #define PTN_INTR_EN__CONFIG_ERROR 0x0001 481 + #define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002 482 + #define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004 483 + #define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008 484 + #define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010 485 + #define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020 486 + 487 + #define PERM_SRC_ID_0 0x830 488 + #define PERM_SRC_ID_0__SRCID 0x00ff 489 + #define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800 490 + #define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000 491 + #define PERM_SRC_ID_0__READ_ACTIVE 0x4000 492 + #define PERM_SRC_ID_0__PARTITION_VALID 0x8000 493 + 494 + #define MIN_BLK_ADDR_0 0x840 495 + #define MIN_BLK_ADDR_0__VALUE 0xffff 496 + 497 + #define MAX_BLK_ADDR_0 0x850 498 + #define MAX_BLK_ADDR_0__VALUE 0xffff 499 + 500 + #define MIN_MAX_BANK_0 0x860 501 + #define MIN_MAX_BANK_0__MIN_VALUE 0x0003 502 + #define MIN_MAX_BANK_0__MAX_VALUE 0x000c 503 + 504 + #define PERM_SRC_ID_1 0x870 505 + #define PERM_SRC_ID_1__SRCID 0x00ff 506 + #define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800 507 + #define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000 508 + #define PERM_SRC_ID_1__READ_ACTIVE 0x4000 509 + #define PERM_SRC_ID_1__PARTITION_VALID 0x8000 510 + 511 + #define MIN_BLK_ADDR_1 0x880 512 + #define MIN_BLK_ADDR_1__VALUE 0xffff 513 + 514 + #define MAX_BLK_ADDR_1 0x890 515 + #define MAX_BLK_ADDR_1__VALUE 0xffff 516 + 517 + #define MIN_MAX_BANK_1 0x8a0 518 + #define MIN_MAX_BANK_1__MIN_VALUE 0x0003 519 + #define MIN_MAX_BANK_1__MAX_VALUE 0x000c 520 + 521 + #define PERM_SRC_ID_2 0x8b0 522 + #define PERM_SRC_ID_2__SRCID 0x00ff 523 + #define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800 524 + #define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000 525 + #define PERM_SRC_ID_2__READ_ACTIVE 0x4000 526 + #define PERM_SRC_ID_2__PARTITION_VALID 0x8000 527 + 528 + #define MIN_BLK_ADDR_2 0x8c0 529 + #define MIN_BLK_ADDR_2__VALUE 0xffff 530 + 531 + #define MAX_BLK_ADDR_2 0x8d0 532 + #define MAX_BLK_ADDR_2__VALUE 0xffff 533 + 534 + #define MIN_MAX_BANK_2 0x8e0 535 + #define MIN_MAX_BANK_2__MIN_VALUE 0x0003 536 + #define MIN_MAX_BANK_2__MAX_VALUE 0x000c 537 + 538 + #define PERM_SRC_ID_3 0x8f0 539 + #define PERM_SRC_ID_3__SRCID 0x00ff 540 + #define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800 541 + #define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000 542 + #define PERM_SRC_ID_3__READ_ACTIVE 0x4000 543 + #define PERM_SRC_ID_3__PARTITION_VALID 0x8000 544 + 545 + #define MIN_BLK_ADDR_3 0x900 546 + #define MIN_BLK_ADDR_3__VALUE 0xffff 547 + 548 + #define MAX_BLK_ADDR_3 0x910 549 + #define MAX_BLK_ADDR_3__VALUE 0xffff 550 + 551 + #define MIN_MAX_BANK_3 0x920 552 + #define MIN_MAX_BANK_3__MIN_VALUE 0x0003 553 + #define MIN_MAX_BANK_3__MAX_VALUE 0x000c 554 + 555 + #define PERM_SRC_ID_4 0x930 556 + #define PERM_SRC_ID_4__SRCID 0x00ff 557 + #define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800 558 + #define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000 559 + #define PERM_SRC_ID_4__READ_ACTIVE 0x4000 560 + #define PERM_SRC_ID_4__PARTITION_VALID 0x8000 561 + 562 + #define MIN_BLK_ADDR_4 0x940 563 + #define MIN_BLK_ADDR_4__VALUE 0xffff 564 + 565 + #define MAX_BLK_ADDR_4 0x950 566 + #define MAX_BLK_ADDR_4__VALUE 0xffff 567 + 568 + #define MIN_MAX_BANK_4 0x960 569 + #define MIN_MAX_BANK_4__MIN_VALUE 0x0003 570 + #define MIN_MAX_BANK_4__MAX_VALUE 0x000c 571 + 572 + #define PERM_SRC_ID_5 0x970 573 + #define PERM_SRC_ID_5__SRCID 0x00ff 574 + #define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800 575 + #define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000 576 + #define PERM_SRC_ID_5__READ_ACTIVE 0x4000 577 + #define PERM_SRC_ID_5__PARTITION_VALID 0x8000 578 + 579 + #define MIN_BLK_ADDR_5 0x980 580 + #define MIN_BLK_ADDR_5__VALUE 0xffff 581 + 582 + #define MAX_BLK_ADDR_5 0x990 583 + #define MAX_BLK_ADDR_5__VALUE 0xffff 584 + 585 + #define MIN_MAX_BANK_5 0x9a0 586 + #define MIN_MAX_BANK_5__MIN_VALUE 0x0003 587 + #define MIN_MAX_BANK_5__MAX_VALUE 0x000c 588 + 589 + #define PERM_SRC_ID_6 0x9b0 590 + #define PERM_SRC_ID_6__SRCID 0x00ff 591 + #define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800 592 + #define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000 593 + #define PERM_SRC_ID_6__READ_ACTIVE 0x4000 594 + #define PERM_SRC_ID_6__PARTITION_VALID 0x8000 595 + 596 + #define MIN_BLK_ADDR_6 0x9c0 597 + #define MIN_BLK_ADDR_6__VALUE 0xffff 598 + 599 + #define MAX_BLK_ADDR_6 0x9d0 600 + #define MAX_BLK_ADDR_6__VALUE 0xffff 601 + 602 + #define MIN_MAX_BANK_6 0x9e0 603 + #define MIN_MAX_BANK_6__MIN_VALUE 0x0003 604 + #define MIN_MAX_BANK_6__MAX_VALUE 0x000c 605 + 606 + #define PERM_SRC_ID_7 0x9f0 607 + #define PERM_SRC_ID_7__SRCID 0x00ff 608 + #define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800 609 + #define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000 610 + #define PERM_SRC_ID_7__READ_ACTIVE 0x4000 611 + #define PERM_SRC_ID_7__PARTITION_VALID 0x8000 612 + 613 + #define MIN_BLK_ADDR_7 0xa00 614 + #define MIN_BLK_ADDR_7__VALUE 0xffff 615 + 616 + #define MAX_BLK_ADDR_7 0xa10 617 + #define MAX_BLK_ADDR_7__VALUE 0xffff 618 + 619 + #define MIN_MAX_BANK_7 0xa20 620 + #define MIN_MAX_BANK_7__MIN_VALUE 0x0003 621 + #define MIN_MAX_BANK_7__MAX_VALUE 0x000c 622 + 623 + /* flash.h */ 624 + struct device_info_tag { 625 + uint16_t wDeviceMaker; 626 + uint16_t wDeviceID; 627 + uint8_t bDeviceParam0; 628 + uint8_t bDeviceParam1; 629 + uint8_t bDeviceParam2; 630 + uint32_t wDeviceType; 631 + uint32_t wSpectraStartBlock; 632 + uint32_t wSpectraEndBlock; 633 + uint32_t wTotalBlocks; 634 + uint16_t wPagesPerBlock; 635 + uint16_t wPageSize; 636 + uint16_t wPageDataSize; 637 + uint16_t wPageSpareSize; 638 + uint16_t wNumPageSpareFlag; 639 + uint16_t wECCBytesPerSector; 640 + uint32_t wBlockSize; 641 + uint32_t wBlockDataSize; 642 + uint32_t wDataBlockNum; 643 + uint8_t bPlaneNum; 644 + uint16_t wDeviceMainAreaSize; 645 + uint16_t wDeviceSpareAreaSize; 646 + uint16_t wDevicesConnected; 647 + uint16_t wDeviceWidth; 648 + uint16_t wHWRevision; 649 + uint16_t wHWFeatures; 650 + 651 + uint16_t wONFIDevFeatures; 652 + uint16_t wONFIOptCommands; 653 + uint16_t wONFITimingMode; 654 + uint16_t wONFIPgmCacheTimingMode; 655 + 656 + uint16_t MLCDevice; 657 + uint16_t wSpareSkipBytes; 658 + 659 + uint8_t nBitsInPageNumber; 660 + uint8_t nBitsInPageDataSize; 661 + uint8_t nBitsInBlockDataSize; 662 + }; 663 + 664 + /* ffsdefs.h */ 665 + #define CLEAR 0 /*use this to clear a field instead of "fail"*/ 666 + #define SET 1 /*use this to set a field instead of "pass"*/ 667 + #define FAIL 1 /*failed flag*/ 668 + #define PASS 0 /*success flag*/ 669 + #define ERR -1 /*error flag*/ 670 + 671 + /* lld.h */ 672 + #define GOOD_BLOCK 0 673 + #define DEFECTIVE_BLOCK 1 674 + #define READ_ERROR 2 675 + 676 + #define CLK_X 5 677 + #define CLK_MULTI 4 678 + 679 + /* ffsport.h */ 680 + #define VERBOSE 1 681 + 682 + #define NAND_DBG_WARN 1 683 + #define NAND_DBG_DEBUG 2 684 + #define NAND_DBG_TRACE 3 685 + 686 + #ifdef VERBOSE 687 + #define nand_dbg_print(level, args...) \ 688 + do { \ 689 + if (level <= nand_debug_level) \ 690 + printk(KERN_ALERT args); \ 691 + } while (0) 692 + #else 693 + #define nand_dbg_print(level, args...) 694 + #endif 695 + 696 + 697 + /* spectraswconfig.h */ 698 + #define CMD_DMA 0 699 + 700 + #define SPECTRA_PARTITION_ID 0 701 + /**** Block Table and Reserved Block Parameters *****/ 702 + #define SPECTRA_START_BLOCK 3 703 + #define NUM_FREE_BLOCKS_GATE 30 704 + 705 + /* KBV - Updated to LNW scratch register address */ 706 + #define SCRATCH_REG_ADDR CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR 707 + #define SCRATCH_REG_SIZE 64 708 + 709 + #define GLOB_HWCTL_DEFAULT_BLKS 2048 710 + 711 + #define SUPPORT_15BITECC 1 712 + #define SUPPORT_8BITECC 1 713 + 714 + #define CUSTOM_CONF_PARAMS 0 715 + 716 + #define ONFI_BLOOM_TIME 1 717 + #define MODE5_WORKAROUND 0 718 + 719 + /* lld_nand.h */ 720 + /* 721 + * NAND Flash Controller Device Driver 722 + * Copyright (c) 2009, Intel Corporation and its suppliers. 723 + * 724 + * This program is free software; you can redistribute it and/or modify it 725 + * under the terms and conditions of the GNU General Public License, 726 + * version 2, as published by the Free Software Foundation. 727 + * 728 + * This program is distributed in the hope it will be useful, but WITHOUT 729 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 730 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 731 + * more details. 732 + * 733 + * You should have received a copy of the GNU General Public License along with 734 + * this program; if not, write to the Free Software Foundation, Inc., 735 + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 736 + * 737 + */ 738 + 739 + #ifndef _LLD_NAND_ 740 + #define _LLD_NAND_ 741 + 742 + #define MODE_00 0x00000000 743 + #define MODE_01 0x04000000 744 + #define MODE_10 0x08000000 745 + #define MODE_11 0x0C000000 746 + 747 + 748 + #define DATA_TRANSFER_MODE 0 749 + #define PROTECTION_PER_BLOCK 1 750 + #define LOAD_WAIT_COUNT 2 751 + #define PROGRAM_WAIT_COUNT 3 752 + #define ERASE_WAIT_COUNT 4 753 + #define INT_MONITOR_CYCLE_COUNT 5 754 + #define READ_BUSY_PIN_ENABLED 6 755 + #define MULTIPLANE_OPERATION_SUPPORT 7 756 + #define PRE_FETCH_MODE 8 757 + #define CE_DONT_CARE_SUPPORT 9 758 + #define COPYBACK_SUPPORT 10 759 + #define CACHE_WRITE_SUPPORT 11 760 + #define CACHE_READ_SUPPORT 12 761 + #define NUM_PAGES_IN_BLOCK 13 762 + #define ECC_ENABLE_SELECT 14 763 + #define WRITE_ENABLE_2_READ_ENABLE 15 764 + #define ADDRESS_2_DATA 16 765 + #define READ_ENABLE_2_WRITE_ENABLE 17 766 + #define TWO_ROW_ADDRESS_CYCLES 18 767 + #define MULTIPLANE_ADDRESS_RESTRICT 19 768 + #define ACC_CLOCKS 20 769 + #define READ_WRITE_ENABLE_LOW_COUNT 21 770 + #define READ_WRITE_ENABLE_HIGH_COUNT 22 771 + 772 + #define ECC_SECTOR_SIZE 512 773 + #define LLD_MAX_FLASH_BANKS 4 774 + 775 + #define DENALI_BUF_SIZE NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE 776 + 777 + struct nand_buf 778 + { 779 + int head; 780 + int tail; 781 + uint8_t buf[DENALI_BUF_SIZE]; 782 + dma_addr_t dma_buf; 783 + }; 784 + 785 + #define INTEL_CE4100 1 786 + #define INTEL_MRST 2 787 + 788 + struct denali_nand_info { 789 + struct mtd_info mtd; 790 + struct nand_chip nand; 791 + struct device_info_tag dev_info; 792 + int flash_bank; /* currently selected chip */ 793 + int status; 794 + int platform; 795 + struct nand_buf buf; 796 + struct pci_dev *dev; 797 + int total_used_banks; 798 + uint32_t block; /* stored for future use */ 799 + uint16_t page; 800 + void __iomem *flash_reg; /* Mapped io reg base address */ 801 + void __iomem *flash_mem; /* Mapped io reg base address */ 802 + 803 + /* elements used by ISR */ 804 + struct completion complete; 805 + spinlock_t irq_lock; 806 + uint32_t irq_status; 807 + int irq_debug_array[32]; 808 + int idx; 809 + }; 810 + 811 + static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali); 812 + static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali); 813 + static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali, uint16_t INT_ENABLE); 814 + 815 + #endif /*_LLD_NAND_*/ 816 +
+2 -2
drivers/mtd/nand/fsl_elbc_nand.c
··· 874 874 priv->ctrl = ctrl; 875 875 priv->dev = ctrl->dev; 876 876 877 - priv->vbase = ioremap(res.start, res.end - res.start + 1); 877 + priv->vbase = ioremap(res.start, resource_size(&res)); 878 878 if (!priv->vbase) { 879 879 dev_err(ctrl->dev, "failed to map chip region\n"); 880 880 ret = -ENOMEM; ··· 891 891 if (ret) 892 892 goto err; 893 893 894 - ret = nand_scan_ident(&priv->mtd, 1); 894 + ret = nand_scan_ident(&priv->mtd, 1, NULL); 895 895 if (ret) 896 896 goto err; 897 897
+6 -3
drivers/mtd/nand/fsl_upm.c
··· 49 49 uint32_t wait_flags; 50 50 }; 51 51 52 - #define to_fsl_upm_nand(mtd) container_of(mtd, struct fsl_upm_nand, mtd) 52 + static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) 53 + { 54 + return container_of(mtdinfo, struct fsl_upm_nand, mtd); 55 + } 53 56 54 57 static int fun_chip_ready(struct mtd_info *mtd) 55 58 { ··· 306 303 FSL_UPM_WAIT_WRITE_BYTE; 307 304 308 305 fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start, 309 - io_res.end - io_res.start + 1); 306 + resource_size(&io_res)); 310 307 if (!fun->io_base) { 311 308 ret = -ENOMEM; 312 309 goto err2; ··· 353 350 return 0; 354 351 } 355 352 356 - static struct of_device_id of_fun_match[] = { 353 + static const struct of_device_id of_fun_match[] = { 357 354 { .compatible = "fsl,upm-nand" }, 358 355 {}, 359 356 };
+6 -6
drivers/mtd/nand/gpio.c
··· 181 181 res = platform_get_resource(dev, IORESOURCE_MEM, 1); 182 182 iounmap(gpiomtd->io_sync); 183 183 if (res) 184 - release_mem_region(res->start, res->end - res->start + 1); 184 + release_mem_region(res->start, resource_size(res)); 185 185 186 186 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 187 187 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 188 - release_mem_region(res->start, res->end - res->start + 1); 188 + release_mem_region(res->start, resource_size(res)); 189 189 190 190 if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) 191 191 gpio_set_value(gpiomtd->plat.gpio_nwp, 0); ··· 208 208 { 209 209 void __iomem *ptr; 210 210 211 - if (!request_mem_region(res->start, res->end - res->start + 1, name)) { 211 + if (!request_mem_region(res->start, resource_size(res), name)) { 212 212 *err = -EBUSY; 213 213 return NULL; 214 214 } 215 215 216 216 ptr = ioremap(res->start, size); 217 217 if (!ptr) { 218 - release_mem_region(res->start, res->end - res->start + 1); 218 + release_mem_region(res->start, resource_size(res)); 219 219 *err = -ENOMEM; 220 220 } 221 221 return ptr; ··· 338 338 err_nce: 339 339 iounmap(gpiomtd->io_sync); 340 340 if (res1) 341 - release_mem_region(res1->start, res1->end - res1->start + 1); 341 + release_mem_region(res1->start, resource_size(res1)); 342 342 err_sync: 343 343 iounmap(gpiomtd->nand_chip.IO_ADDR_R); 344 - release_mem_region(res0->start, res0->end - res0->start + 1); 344 + release_mem_region(res0->start, resource_size(res0)); 345 345 err_map: 346 346 kfree(gpiomtd); 347 347 return ret;
+917
drivers/mtd/nand/mpc5121_nfc.c
··· 1 + /* 2 + * Copyright 2004-2008 Freescale Semiconductor, Inc. 3 + * Copyright 2009 Semihalf. 4 + * 5 + * Approved as OSADL project by a majority of OSADL members and funded 6 + * by OSADL membership fees in 2009; for details see www.osadl.org. 7 + * 8 + * Based on original driver from Freescale Semiconductor 9 + * written by John Rigby <jrigby@freescale.com> on basis 10 + * of drivers/mtd/nand/mxc_nand.c. Reworked and extended 11 + * Piotr Ziecik <kosmo@semihalf.com>. 12 + * 13 + * This program is free software; you can redistribute it and/or 14 + * modify it under the terms of the GNU General Public License 15 + * as published by the Free Software Foundation; either version 2 16 + * of the License, or (at your option) any later version. 17 + * This program is distributed in the hope that it will be useful, 18 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 + * GNU General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License 23 + * along with this program; if not, write to the Free Software 24 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 25 + * MA 02110-1301, USA. 26 + */ 27 + 28 + #include <linux/module.h> 29 + #include <linux/clk.h> 30 + #include <linux/gfp.h> 31 + #include <linux/delay.h> 32 + #include <linux/init.h> 33 + #include <linux/interrupt.h> 34 + #include <linux/io.h> 35 + #include <linux/mtd/mtd.h> 36 + #include <linux/mtd/nand.h> 37 + #include <linux/mtd/partitions.h> 38 + #include <linux/of_device.h> 39 + #include <linux/of_platform.h> 40 + 41 + #include <asm/mpc5121.h> 42 + 43 + /* Addresses for NFC MAIN RAM BUFFER areas */ 44 + #define NFC_MAIN_AREA(n) ((n) * 0x200) 45 + 46 + /* Addresses for NFC SPARE BUFFER areas */ 47 + #define NFC_SPARE_BUFFERS 8 48 + #define NFC_SPARE_LEN 0x40 49 + #define NFC_SPARE_AREA(n) (0x1000 + ((n) * NFC_SPARE_LEN)) 50 + 51 + /* MPC5121 NFC registers */ 52 + #define NFC_BUF_ADDR 0x1E04 53 + #define NFC_FLASH_ADDR 0x1E06 54 + #define NFC_FLASH_CMD 0x1E08 55 + #define NFC_CONFIG 0x1E0A 56 + #define NFC_ECC_STATUS1 0x1E0C 57 + #define NFC_ECC_STATUS2 0x1E0E 58 + #define NFC_SPAS 0x1E10 59 + #define NFC_WRPROT 0x1E12 60 + #define NFC_NF_WRPRST 0x1E18 61 + #define NFC_CONFIG1 0x1E1A 62 + #define NFC_CONFIG2 0x1E1C 63 + #define NFC_UNLOCKSTART_BLK0 0x1E20 64 + #define NFC_UNLOCKEND_BLK0 0x1E22 65 + #define NFC_UNLOCKSTART_BLK1 0x1E24 66 + #define NFC_UNLOCKEND_BLK1 0x1E26 67 + #define NFC_UNLOCKSTART_BLK2 0x1E28 68 + #define NFC_UNLOCKEND_BLK2 0x1E2A 69 + #define NFC_UNLOCKSTART_BLK3 0x1E2C 70 + #define NFC_UNLOCKEND_BLK3 0x1E2E 71 + 72 + /* Bit Definitions: NFC_BUF_ADDR */ 73 + #define NFC_RBA_MASK (7 << 0) 74 + #define NFC_ACTIVE_CS_SHIFT 5 75 + #define NFC_ACTIVE_CS_MASK (3 << NFC_ACTIVE_CS_SHIFT) 76 + 77 + /* Bit Definitions: NFC_CONFIG */ 78 + #define NFC_BLS_UNLOCKED (1 << 1) 79 + 80 + /* Bit Definitions: NFC_CONFIG1 */ 81 + #define NFC_ECC_4BIT (1 << 0) 82 + #define NFC_FULL_PAGE_DMA (1 << 1) 83 + #define NFC_SPARE_ONLY (1 << 2) 84 + #define NFC_ECC_ENABLE (1 << 3) 85 + #define NFC_INT_MASK (1 << 4) 86 + #define NFC_BIG_ENDIAN (1 << 5) 87 + #define NFC_RESET (1 << 6) 88 + #define NFC_CE (1 << 7) 89 + #define NFC_ONE_CYCLE (1 << 8) 90 + #define NFC_PPB_32 (0 << 9) 91 + #define NFC_PPB_64 (1 << 9) 92 + #define NFC_PPB_128 (2 << 9) 93 + #define NFC_PPB_256 (3 << 9) 94 + #define NFC_PPB_MASK (3 << 9) 95 + #define NFC_FULL_PAGE_INT (1 << 11) 96 + 97 + /* Bit Definitions: NFC_CONFIG2 */ 98 + #define NFC_COMMAND (1 << 0) 99 + #define NFC_ADDRESS (1 << 1) 100 + #define NFC_INPUT (1 << 2) 101 + #define NFC_OUTPUT (1 << 3) 102 + #define NFC_ID (1 << 4) 103 + #define NFC_STATUS (1 << 5) 104 + #define NFC_CMD_FAIL (1 << 15) 105 + #define NFC_INT (1 << 15) 106 + 107 + /* Bit Definitions: NFC_WRPROT */ 108 + #define NFC_WPC_LOCK_TIGHT (1 << 0) 109 + #define NFC_WPC_LOCK (1 << 1) 110 + #define NFC_WPC_UNLOCK (1 << 2) 111 + 112 + #define DRV_NAME "mpc5121_nfc" 113 + 114 + /* Timeouts */ 115 + #define NFC_RESET_TIMEOUT 1000 /* 1 ms */ 116 + #define NFC_TIMEOUT (HZ / 10) /* 1/10 s */ 117 + 118 + struct mpc5121_nfc_prv { 119 + struct mtd_info mtd; 120 + struct nand_chip chip; 121 + int irq; 122 + void __iomem *regs; 123 + struct clk *clk; 124 + wait_queue_head_t irq_waitq; 125 + uint column; 126 + int spareonly; 127 + void __iomem *csreg; 128 + struct device *dev; 129 + }; 130 + 131 + static void mpc5121_nfc_done(struct mtd_info *mtd); 132 + 133 + #ifdef CONFIG_MTD_PARTITIONS 134 + static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; 135 + #endif 136 + 137 + /* Read NFC register */ 138 + static inline u16 nfc_read(struct mtd_info *mtd, uint reg) 139 + { 140 + struct nand_chip *chip = mtd->priv; 141 + struct mpc5121_nfc_prv *prv = chip->priv; 142 + 143 + return in_be16(prv->regs + reg); 144 + } 145 + 146 + /* Write NFC register */ 147 + static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) 148 + { 149 + struct nand_chip *chip = mtd->priv; 150 + struct mpc5121_nfc_prv *prv = chip->priv; 151 + 152 + out_be16(prv->regs + reg, val); 153 + } 154 + 155 + /* Set bits in NFC register */ 156 + static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) 157 + { 158 + nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); 159 + } 160 + 161 + /* Clear bits in NFC register */ 162 + static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) 163 + { 164 + nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); 165 + } 166 + 167 + /* Invoke address cycle */ 168 + static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) 169 + { 170 + nfc_write(mtd, NFC_FLASH_ADDR, addr); 171 + nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); 172 + mpc5121_nfc_done(mtd); 173 + } 174 + 175 + /* Invoke command cycle */ 176 + static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) 177 + { 178 + nfc_write(mtd, NFC_FLASH_CMD, cmd); 179 + nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); 180 + mpc5121_nfc_done(mtd); 181 + } 182 + 183 + /* Send data from NFC buffers to NAND flash */ 184 + static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) 185 + { 186 + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); 187 + nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); 188 + mpc5121_nfc_done(mtd); 189 + } 190 + 191 + /* Receive data from NAND flash */ 192 + static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) 193 + { 194 + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); 195 + nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); 196 + mpc5121_nfc_done(mtd); 197 + } 198 + 199 + /* Receive ID from NAND flash */ 200 + static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) 201 + { 202 + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); 203 + nfc_write(mtd, NFC_CONFIG2, NFC_ID); 204 + mpc5121_nfc_done(mtd); 205 + } 206 + 207 + /* Receive status from NAND flash */ 208 + static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) 209 + { 210 + nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); 211 + nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); 212 + mpc5121_nfc_done(mtd); 213 + } 214 + 215 + /* NFC interrupt handler */ 216 + static irqreturn_t mpc5121_nfc_irq(int irq, void *data) 217 + { 218 + struct mtd_info *mtd = data; 219 + struct nand_chip *chip = mtd->priv; 220 + struct mpc5121_nfc_prv *prv = chip->priv; 221 + 222 + nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK); 223 + wake_up(&prv->irq_waitq); 224 + 225 + return IRQ_HANDLED; 226 + } 227 + 228 + /* Wait for operation complete */ 229 + static void mpc5121_nfc_done(struct mtd_info *mtd) 230 + { 231 + struct nand_chip *chip = mtd->priv; 232 + struct mpc5121_nfc_prv *prv = chip->priv; 233 + int rv; 234 + 235 + if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) { 236 + nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK); 237 + rv = wait_event_timeout(prv->irq_waitq, 238 + (nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT); 239 + 240 + if (!rv) 241 + dev_warn(prv->dev, 242 + "Timeout while waiting for interrupt.\n"); 243 + } 244 + 245 + nfc_clear(mtd, NFC_CONFIG2, NFC_INT); 246 + } 247 + 248 + /* Do address cycle(s) */ 249 + static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) 250 + { 251 + struct nand_chip *chip = mtd->priv; 252 + u32 pagemask = chip->pagemask; 253 + 254 + if (column != -1) { 255 + mpc5121_nfc_send_addr(mtd, column); 256 + if (mtd->writesize > 512) 257 + mpc5121_nfc_send_addr(mtd, column >> 8); 258 + } 259 + 260 + if (page != -1) { 261 + do { 262 + mpc5121_nfc_send_addr(mtd, page & 0xFF); 263 + page >>= 8; 264 + pagemask >>= 8; 265 + } while (pagemask); 266 + } 267 + } 268 + 269 + /* Control chip select signals */ 270 + static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) 271 + { 272 + if (chip < 0) { 273 + nfc_clear(mtd, NFC_CONFIG1, NFC_CE); 274 + return; 275 + } 276 + 277 + nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); 278 + nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & 279 + NFC_ACTIVE_CS_MASK); 280 + nfc_set(mtd, NFC_CONFIG1, NFC_CE); 281 + } 282 + 283 + /* Init external chip select logic on ADS5121 board */ 284 + static int ads5121_chipselect_init(struct mtd_info *mtd) 285 + { 286 + struct nand_chip *chip = mtd->priv; 287 + struct mpc5121_nfc_prv *prv = chip->priv; 288 + struct device_node *dn; 289 + 290 + dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld"); 291 + if (dn) { 292 + prv->csreg = of_iomap(dn, 0); 293 + of_node_put(dn); 294 + if (!prv->csreg) 295 + return -ENOMEM; 296 + 297 + /* CPLD Register 9 controls NAND /CE Lines */ 298 + prv->csreg += 9; 299 + return 0; 300 + } 301 + 302 + return -EINVAL; 303 + } 304 + 305 + /* Control chips select signal on ADS5121 board */ 306 + static void ads5121_select_chip(struct mtd_info *mtd, int chip) 307 + { 308 + struct nand_chip *nand = mtd->priv; 309 + struct mpc5121_nfc_prv *prv = nand->priv; 310 + u8 v; 311 + 312 + v = in_8(prv->csreg); 313 + v |= 0x0F; 314 + 315 + if (chip >= 0) { 316 + mpc5121_nfc_select_chip(mtd, 0); 317 + v &= ~(1 << chip); 318 + } else 319 + mpc5121_nfc_select_chip(mtd, -1); 320 + 321 + out_8(prv->csreg, v); 322 + } 323 + 324 + /* Read NAND Ready/Busy signal */ 325 + static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) 326 + { 327 + /* 328 + * NFC handles ready/busy signal internally. Therefore, this function 329 + * always returns status as ready. 330 + */ 331 + return 1; 332 + } 333 + 334 + /* Write command to NAND flash */ 335 + static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, 336 + int column, int page) 337 + { 338 + struct nand_chip *chip = mtd->priv; 339 + struct mpc5121_nfc_prv *prv = chip->priv; 340 + 341 + prv->column = (column >= 0) ? column : 0; 342 + prv->spareonly = 0; 343 + 344 + switch (command) { 345 + case NAND_CMD_PAGEPROG: 346 + mpc5121_nfc_send_prog_page(mtd); 347 + break; 348 + /* 349 + * NFC does not support sub-page reads and writes, 350 + * so emulate them using full page transfers. 351 + */ 352 + case NAND_CMD_READ0: 353 + column = 0; 354 + break; 355 + 356 + case NAND_CMD_READ1: 357 + prv->column += 256; 358 + command = NAND_CMD_READ0; 359 + column = 0; 360 + break; 361 + 362 + case NAND_CMD_READOOB: 363 + prv->spareonly = 1; 364 + command = NAND_CMD_READ0; 365 + column = 0; 366 + break; 367 + 368 + case NAND_CMD_SEQIN: 369 + mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); 370 + column = 0; 371 + break; 372 + 373 + case NAND_CMD_ERASE1: 374 + case NAND_CMD_ERASE2: 375 + case NAND_CMD_READID: 376 + case NAND_CMD_STATUS: 377 + break; 378 + 379 + default: 380 + return; 381 + } 382 + 383 + mpc5121_nfc_send_cmd(mtd, command); 384 + mpc5121_nfc_addr_cycle(mtd, column, page); 385 + 386 + switch (command) { 387 + case NAND_CMD_READ0: 388 + if (mtd->writesize > 512) 389 + mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); 390 + mpc5121_nfc_send_read_page(mtd); 391 + break; 392 + 393 + case NAND_CMD_READID: 394 + mpc5121_nfc_send_read_id(mtd); 395 + break; 396 + 397 + case NAND_CMD_STATUS: 398 + mpc5121_nfc_send_read_status(mtd); 399 + if (chip->options & NAND_BUSWIDTH_16) 400 + prv->column = 1; 401 + else 402 + prv->column = 0; 403 + break; 404 + } 405 + } 406 + 407 + /* Copy data from/to NFC spare buffers. */ 408 + static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, 409 + u8 *buffer, uint size, int wr) 410 + { 411 + struct nand_chip *nand = mtd->priv; 412 + struct mpc5121_nfc_prv *prv = nand->priv; 413 + uint o, s, sbsize, blksize; 414 + 415 + /* 416 + * NAND spare area is available through NFC spare buffers. 417 + * The NFC divides spare area into (page_size / 512) chunks. 418 + * Each chunk is placed into separate spare memory area, using 419 + * first (spare_size / num_of_chunks) bytes of the buffer. 420 + * 421 + * For NAND device in which the spare area is not divided fully 422 + * by the number of chunks, number of used bytes in each spare 423 + * buffer is rounded down to the nearest even number of bytes, 424 + * and all remaining bytes are added to the last used spare area. 425 + * 426 + * For more information read section 26.6.10 of MPC5121e 427 + * Microcontroller Reference Manual, Rev. 3. 428 + */ 429 + 430 + /* Calculate number of valid bytes in each spare buffer */ 431 + sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; 432 + 433 + while (size) { 434 + /* Calculate spare buffer number */ 435 + s = offset / sbsize; 436 + if (s > NFC_SPARE_BUFFERS - 1) 437 + s = NFC_SPARE_BUFFERS - 1; 438 + 439 + /* 440 + * Calculate offset to requested data block in selected spare 441 + * buffer and its size. 442 + */ 443 + o = offset - (s * sbsize); 444 + blksize = min(sbsize - o, size); 445 + 446 + if (wr) 447 + memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, 448 + buffer, blksize); 449 + else 450 + memcpy_fromio(buffer, 451 + prv->regs + NFC_SPARE_AREA(s) + o, blksize); 452 + 453 + buffer += blksize; 454 + offset += blksize; 455 + size -= blksize; 456 + }; 457 + } 458 + 459 + /* Copy data from/to NFC main and spare buffers */ 460 + static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len, 461 + int wr) 462 + { 463 + struct nand_chip *chip = mtd->priv; 464 + struct mpc5121_nfc_prv *prv = chip->priv; 465 + uint c = prv->column; 466 + uint l; 467 + 468 + /* Handle spare area access */ 469 + if (prv->spareonly || c >= mtd->writesize) { 470 + /* Calculate offset from beginning of spare area */ 471 + if (c >= mtd->writesize) 472 + c -= mtd->writesize; 473 + 474 + prv->column += len; 475 + mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); 476 + return; 477 + } 478 + 479 + /* 480 + * Handle main area access - limit copy length to prevent 481 + * crossing main/spare boundary. 482 + */ 483 + l = min((uint)len, mtd->writesize - c); 484 + prv->column += l; 485 + 486 + if (wr) 487 + memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); 488 + else 489 + memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); 490 + 491 + /* Handle crossing main/spare boundary */ 492 + if (l != len) { 493 + buf += l; 494 + len -= l; 495 + mpc5121_nfc_buf_copy(mtd, buf, len, wr); 496 + } 497 + } 498 + 499 + /* Read data from NFC buffers */ 500 + static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len) 501 + { 502 + mpc5121_nfc_buf_copy(mtd, buf, len, 0); 503 + } 504 + 505 + /* Write data to NFC buffers */ 506 + static void mpc5121_nfc_write_buf(struct mtd_info *mtd, 507 + const u_char *buf, int len) 508 + { 509 + mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1); 510 + } 511 + 512 + /* Compare buffer with NAND flash */ 513 + static int mpc5121_nfc_verify_buf(struct mtd_info *mtd, 514 + const u_char *buf, int len) 515 + { 516 + u_char tmp[256]; 517 + uint bsize; 518 + 519 + while (len) { 520 + bsize = min(len, 256); 521 + mpc5121_nfc_read_buf(mtd, tmp, bsize); 522 + 523 + if (memcmp(buf, tmp, bsize)) 524 + return 1; 525 + 526 + buf += bsize; 527 + len -= bsize; 528 + } 529 + 530 + return 0; 531 + } 532 + 533 + /* Read byte from NFC buffers */ 534 + static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) 535 + { 536 + u8 tmp; 537 + 538 + mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); 539 + 540 + return tmp; 541 + } 542 + 543 + /* Read word from NFC buffers */ 544 + static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) 545 + { 546 + u16 tmp; 547 + 548 + mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp)); 549 + 550 + return tmp; 551 + } 552 + 553 + /* 554 + * Read NFC configuration from Reset Config Word 555 + * 556 + * NFC is configured during reset in basis of information stored 557 + * in Reset Config Word. There is no other way to set NAND block 558 + * size, spare size and bus width. 559 + */ 560 + static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) 561 + { 562 + struct nand_chip *chip = mtd->priv; 563 + struct mpc5121_nfc_prv *prv = chip->priv; 564 + struct mpc512x_reset_module *rm; 565 + struct device_node *rmnode; 566 + uint rcw_pagesize = 0; 567 + uint rcw_sparesize = 0; 568 + uint rcw_width; 569 + uint rcwh; 570 + uint romloc, ps; 571 + 572 + rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset"); 573 + if (!rmnode) { 574 + dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' " 575 + "node in device tree!\n"); 576 + return -ENODEV; 577 + } 578 + 579 + rm = of_iomap(rmnode, 0); 580 + if (!rm) { 581 + dev_err(prv->dev, "Error mapping reset module node!\n"); 582 + return -EBUSY; 583 + } 584 + 585 + rcwh = in_be32(&rm->rcwhr); 586 + 587 + /* Bit 6: NFC bus width */ 588 + rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; 589 + 590 + /* Bit 7: NFC Page/Spare size */ 591 + ps = (rcwh >> 7) & 0x1; 592 + 593 + /* Bits [22:21]: ROM Location */ 594 + romloc = (rcwh >> 21) & 0x3; 595 + 596 + /* Decode RCW bits */ 597 + switch ((ps << 2) | romloc) { 598 + case 0x00: 599 + case 0x01: 600 + rcw_pagesize = 512; 601 + rcw_sparesize = 16; 602 + break; 603 + case 0x02: 604 + case 0x03: 605 + rcw_pagesize = 4096; 606 + rcw_sparesize = 128; 607 + break; 608 + case 0x04: 609 + case 0x05: 610 + rcw_pagesize = 2048; 611 + rcw_sparesize = 64; 612 + break; 613 + case 0x06: 614 + case 0x07: 615 + rcw_pagesize = 4096; 616 + rcw_sparesize = 218; 617 + break; 618 + } 619 + 620 + mtd->writesize = rcw_pagesize; 621 + mtd->oobsize = rcw_sparesize; 622 + if (rcw_width == 2) 623 + chip->options |= NAND_BUSWIDTH_16; 624 + 625 + dev_notice(prv->dev, "Configured for " 626 + "%u-bit NAND, page size %u " 627 + "with %u spare.\n", 628 + rcw_width * 8, rcw_pagesize, 629 + rcw_sparesize); 630 + iounmap(rm); 631 + of_node_put(rmnode); 632 + return 0; 633 + } 634 + 635 + /* Free driver resources */ 636 + static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) 637 + { 638 + struct nand_chip *chip = mtd->priv; 639 + struct mpc5121_nfc_prv *prv = chip->priv; 640 + 641 + if (prv->clk) { 642 + clk_disable(prv->clk); 643 + clk_put(prv->clk); 644 + } 645 + 646 + if (prv->csreg) 647 + iounmap(prv->csreg); 648 + } 649 + 650 + static int __devinit mpc5121_nfc_probe(struct of_device *op, 651 + const struct of_device_id *match) 652 + { 653 + struct device_node *rootnode, *dn = op->node; 654 + struct device *dev = &op->dev; 655 + struct mpc5121_nfc_prv *prv; 656 + struct resource res; 657 + struct mtd_info *mtd; 658 + #ifdef CONFIG_MTD_PARTITIONS 659 + struct mtd_partition *parts; 660 + #endif 661 + struct nand_chip *chip; 662 + unsigned long regs_paddr, regs_size; 663 + const uint *chips_no; 664 + int resettime = 0; 665 + int retval = 0; 666 + int rev, len; 667 + 668 + /* 669 + * Check SoC revision. This driver supports only NFC 670 + * in MPC5121 revision 2 and MPC5123 revision 3. 671 + */ 672 + rev = (mfspr(SPRN_SVR) >> 4) & 0xF; 673 + if ((rev != 2) && (rev != 3)) { 674 + dev_err(dev, "SoC revision %u is not supported!\n", rev); 675 + return -ENXIO; 676 + } 677 + 678 + prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL); 679 + if (!prv) { 680 + dev_err(dev, "Memory exhausted!\n"); 681 + return -ENOMEM; 682 + } 683 + 684 + mtd = &prv->mtd; 685 + chip = &prv->chip; 686 + 687 + mtd->priv = chip; 688 + chip->priv = prv; 689 + prv->dev = dev; 690 + 691 + /* Read NFC configuration from Reset Config Word */ 692 + retval = mpc5121_nfc_read_hw_config(mtd); 693 + if (retval) { 694 + dev_err(dev, "Unable to read NFC config!\n"); 695 + return retval; 696 + } 697 + 698 + prv->irq = irq_of_parse_and_map(dn, 0); 699 + if (prv->irq == NO_IRQ) { 700 + dev_err(dev, "Error mapping IRQ!\n"); 701 + return -EINVAL; 702 + } 703 + 704 + retval = of_address_to_resource(dn, 0, &res); 705 + if (retval) { 706 + dev_err(dev, "Error parsing memory region!\n"); 707 + return retval; 708 + } 709 + 710 + chips_no = of_get_property(dn, "chips", &len); 711 + if (!chips_no || len != sizeof(*chips_no)) { 712 + dev_err(dev, "Invalid/missing 'chips' property!\n"); 713 + return -EINVAL; 714 + } 715 + 716 + regs_paddr = res.start; 717 + regs_size = res.end - res.start + 1; 718 + 719 + if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) { 720 + dev_err(dev, "Error requesting memory region!\n"); 721 + return -EBUSY; 722 + } 723 + 724 + prv->regs = devm_ioremap(dev, regs_paddr, regs_size); 725 + if (!prv->regs) { 726 + dev_err(dev, "Error mapping memory region!\n"); 727 + return -ENOMEM; 728 + } 729 + 730 + mtd->name = "MPC5121 NAND"; 731 + chip->dev_ready = mpc5121_nfc_dev_ready; 732 + chip->cmdfunc = mpc5121_nfc_command; 733 + chip->read_byte = mpc5121_nfc_read_byte; 734 + chip->read_word = mpc5121_nfc_read_word; 735 + chip->read_buf = mpc5121_nfc_read_buf; 736 + chip->write_buf = mpc5121_nfc_write_buf; 737 + chip->verify_buf = mpc5121_nfc_verify_buf; 738 + chip->select_chip = mpc5121_nfc_select_chip; 739 + chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; 740 + chip->ecc.mode = NAND_ECC_SOFT; 741 + 742 + /* Support external chip-select logic on ADS5121 board */ 743 + rootnode = of_find_node_by_path("/"); 744 + if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) { 745 + retval = ads5121_chipselect_init(mtd); 746 + if (retval) { 747 + dev_err(dev, "Chipselect init error!\n"); 748 + of_node_put(rootnode); 749 + return retval; 750 + } 751 + 752 + chip->select_chip = ads5121_select_chip; 753 + } 754 + of_node_put(rootnode); 755 + 756 + /* Enable NFC clock */ 757 + prv->clk = clk_get(dev, "nfc_clk"); 758 + if (!prv->clk) { 759 + dev_err(dev, "Unable to acquire NFC clock!\n"); 760 + retval = -ENODEV; 761 + goto error; 762 + } 763 + 764 + clk_enable(prv->clk); 765 + 766 + /* Reset NAND Flash controller */ 767 + nfc_set(mtd, NFC_CONFIG1, NFC_RESET); 768 + while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { 769 + if (resettime++ >= NFC_RESET_TIMEOUT) { 770 + dev_err(dev, "Timeout while resetting NFC!\n"); 771 + retval = -EINVAL; 772 + goto error; 773 + } 774 + 775 + udelay(1); 776 + } 777 + 778 + /* Enable write to NFC memory */ 779 + nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); 780 + 781 + /* Enable write to all NAND pages */ 782 + nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); 783 + nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); 784 + nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); 785 + 786 + /* 787 + * Setup NFC: 788 + * - Big Endian transfers, 789 + * - Interrupt after full page read/write. 790 + */ 791 + nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | 792 + NFC_FULL_PAGE_INT); 793 + 794 + /* Set spare area size */ 795 + nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); 796 + 797 + init_waitqueue_head(&prv->irq_waitq); 798 + retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME, 799 + mtd); 800 + if (retval) { 801 + dev_err(dev, "Error requesting IRQ!\n"); 802 + goto error; 803 + } 804 + 805 + /* Detect NAND chips */ 806 + if (nand_scan(mtd, *chips_no)) { 807 + dev_err(dev, "NAND Flash not found !\n"); 808 + devm_free_irq(dev, prv->irq, mtd); 809 + retval = -ENXIO; 810 + goto error; 811 + } 812 + 813 + /* Set erase block size */ 814 + switch (mtd->erasesize / mtd->writesize) { 815 + case 32: 816 + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); 817 + break; 818 + 819 + case 64: 820 + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); 821 + break; 822 + 823 + case 128: 824 + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); 825 + break; 826 + 827 + case 256: 828 + nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); 829 + break; 830 + 831 + default: 832 + dev_err(dev, "Unsupported NAND flash!\n"); 833 + devm_free_irq(dev, prv->irq, mtd); 834 + retval = -ENXIO; 835 + goto error; 836 + } 837 + 838 + dev_set_drvdata(dev, mtd); 839 + 840 + /* Register device in MTD */ 841 + #ifdef CONFIG_MTD_PARTITIONS 842 + retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); 843 + #ifdef CONFIG_MTD_OF_PARTS 844 + if (retval == 0) 845 + retval = of_mtd_parse_partitions(dev, dn, &parts); 846 + #endif 847 + if (retval < 0) { 848 + dev_err(dev, "Error parsing MTD partitions!\n"); 849 + devm_free_irq(dev, prv->irq, mtd); 850 + retval = -EINVAL; 851 + goto error; 852 + } 853 + 854 + if (retval > 0) 855 + retval = add_mtd_partitions(mtd, parts, retval); 856 + else 857 + #endif 858 + retval = add_mtd_device(mtd); 859 + 860 + if (retval) { 861 + dev_err(dev, "Error adding MTD device!\n"); 862 + devm_free_irq(dev, prv->irq, mtd); 863 + goto error; 864 + } 865 + 866 + return 0; 867 + error: 868 + mpc5121_nfc_free(dev, mtd); 869 + return retval; 870 + } 871 + 872 + static int __devexit mpc5121_nfc_remove(struct of_device *op) 873 + { 874 + struct device *dev = &op->dev; 875 + struct mtd_info *mtd = dev_get_drvdata(dev); 876 + struct nand_chip *chip = mtd->priv; 877 + struct mpc5121_nfc_prv *prv = chip->priv; 878 + 879 + nand_release(mtd); 880 + devm_free_irq(dev, prv->irq, mtd); 881 + mpc5121_nfc_free(dev, mtd); 882 + 883 + return 0; 884 + } 885 + 886 + static struct of_device_id mpc5121_nfc_match[] __devinitdata = { 887 + { .compatible = "fsl,mpc5121-nfc", }, 888 + {}, 889 + }; 890 + 891 + static struct of_platform_driver mpc5121_nfc_driver = { 892 + .match_table = mpc5121_nfc_match, 893 + .probe = mpc5121_nfc_probe, 894 + .remove = __devexit_p(mpc5121_nfc_remove), 895 + .driver = { 896 + .name = DRV_NAME, 897 + .owner = THIS_MODULE, 898 + }, 899 + }; 900 + 901 + static int __init mpc5121_nfc_init(void) 902 + { 903 + return of_register_platform_driver(&mpc5121_nfc_driver); 904 + } 905 + 906 + module_init(mpc5121_nfc_init); 907 + 908 + static void __exit mpc5121_nfc_cleanup(void) 909 + { 910 + of_unregister_platform_driver(&mpc5121_nfc_driver); 911 + } 912 + 913 + module_exit(mpc5121_nfc_cleanup); 914 + 915 + MODULE_AUTHOR("Freescale Semiconductor, Inc."); 916 + MODULE_DESCRIPTION("MPC5121 NAND MTD driver"); 917 + MODULE_LICENSE("GPL");
+80 -66
drivers/mtd/nand/mxc_nand.c
··· 38 38 #define DRIVER_NAME "mxc_nand" 39 39 40 40 #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) 41 - #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27()) 41 + #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) 42 42 43 43 /* Addresses for NFC registers */ 44 44 #define NFC_BUF_SIZE 0xE00 ··· 168 168 { 169 169 struct mxc_nand_host *host = dev_id; 170 170 171 - uint16_t tmp; 172 - 173 - tmp = readw(host->regs + NFC_CONFIG1); 174 - tmp |= NFC_INT_MSK; /* Disable interrupt */ 175 - writew(tmp, host->regs + NFC_CONFIG1); 171 + disable_irq_nosync(irq); 176 172 177 173 wake_up(&host->irq_waitq); 178 174 ··· 180 184 */ 181 185 static void wait_op_done(struct mxc_nand_host *host, int useirq) 182 186 { 183 - uint32_t tmp; 184 - int max_retries = 2000; 187 + uint16_t tmp; 188 + int max_retries = 8000; 185 189 186 190 if (useirq) { 187 191 if ((readw(host->regs + NFC_CONFIG2) & NFC_INT) == 0) { 188 192 189 - tmp = readw(host->regs + NFC_CONFIG1); 190 - tmp &= ~NFC_INT_MSK; /* Enable interrupt */ 191 - writew(tmp, host->regs + NFC_CONFIG1); 193 + enable_irq(host->irq); 192 194 193 195 wait_event(host->irq_waitq, 194 196 readw(host->regs + NFC_CONFIG2) & NFC_INT); ··· 220 226 writew(cmd, host->regs + NFC_FLASH_CMD); 221 227 writew(NFC_CMD, host->regs + NFC_CONFIG2); 222 228 223 - /* Wait for operation to complete */ 224 - wait_op_done(host, useirq); 229 + if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { 230 + int max_retries = 100; 231 + /* Reset completion is indicated by NFC_CONFIG2 */ 232 + /* being set to 0 */ 233 + while (max_retries-- > 0) { 234 + if (readw(host->regs + NFC_CONFIG2) == 0) { 235 + break; 236 + } 237 + udelay(1); 238 + } 239 + if (max_retries < 0) 240 + DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", 241 + __func__); 242 + } else { 243 + /* Wait for operation to complete */ 244 + wait_op_done(host, useirq); 245 + } 225 246 } 226 247 227 248 /* This function sends an address (or partial address) to the ··· 551 542 } 552 543 } 553 544 545 + static void preset(struct mtd_info *mtd) 546 + { 547 + struct nand_chip *nand_chip = mtd->priv; 548 + struct mxc_nand_host *host = nand_chip->priv; 549 + uint16_t tmp; 550 + 551 + /* enable interrupt, disable spare enable */ 552 + tmp = readw(host->regs + NFC_CONFIG1); 553 + tmp &= ~NFC_INT_MSK; 554 + tmp &= ~NFC_SP_EN; 555 + if (nand_chip->ecc.mode == NAND_ECC_HW) { 556 + tmp |= NFC_ECC_EN; 557 + } else { 558 + tmp &= ~NFC_ECC_EN; 559 + } 560 + writew(tmp, host->regs + NFC_CONFIG1); 561 + /* preset operation */ 562 + 563 + /* Unlock the internal RAM Buffer */ 564 + writew(0x2, host->regs + NFC_CONFIG); 565 + 566 + /* Blocks to be unlocked */ 567 + if (nfc_is_v21()) { 568 + writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); 569 + writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); 570 + } else if (nfc_is_v1()) { 571 + writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); 572 + writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); 573 + } else 574 + BUG(); 575 + 576 + /* Unlock Block Command for given address range */ 577 + writew(0x4, host->regs + NFC_WRPROT); 578 + } 579 + 554 580 /* Used by the upper layer to write command to NAND Flash for 555 581 * different operations to be carried out on NAND Flash */ 556 582 static void mxc_nand_command(struct mtd_info *mtd, unsigned command, ··· 603 559 604 560 /* Command pre-processing step */ 605 561 switch (command) { 562 + case NAND_CMD_RESET: 563 + send_cmd(host, command, false); 564 + preset(mtd); 565 + break; 606 566 607 567 case NAND_CMD_STATUS: 608 568 host->buf_start = 0; ··· 727 679 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; 728 680 struct mxc_nand_host *host; 729 681 struct resource *res; 730 - uint16_t tmp; 731 682 int err = 0, nr_parts = 0; 732 683 struct nand_ecclayout *oob_smallpage, *oob_largepage; 733 684 ··· 790 743 host->spare_len = 64; 791 744 oob_smallpage = &nandv2_hw_eccoob_smallpage; 792 745 oob_largepage = &nandv2_hw_eccoob_largepage; 746 + this->ecc.bytes = 9; 793 747 } else if (nfc_is_v1()) { 794 748 host->regs = host->base; 795 749 host->spare0 = host->base + 0x800; 796 750 host->spare_len = 16; 797 751 oob_smallpage = &nandv1_hw_eccoob_smallpage; 798 752 oob_largepage = &nandv1_hw_eccoob_largepage; 799 - } else 800 - BUG(); 801 - 802 - /* disable interrupt and spare enable */ 803 - tmp = readw(host->regs + NFC_CONFIG1); 804 - tmp |= NFC_INT_MSK; 805 - tmp &= ~NFC_SP_EN; 806 - writew(tmp, host->regs + NFC_CONFIG1); 807 - 808 - init_waitqueue_head(&host->irq_waitq); 809 - 810 - host->irq = platform_get_irq(pdev, 0); 811 - 812 - err = request_irq(host->irq, mxc_nfc_irq, 0, DRIVER_NAME, host); 813 - if (err) 814 - goto eirq; 815 - 816 - /* Reset NAND */ 817 - this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 818 - 819 - /* preset operation */ 820 - /* Unlock the internal RAM Buffer */ 821 - writew(0x2, host->regs + NFC_CONFIG); 822 - 823 - /* Blocks to be unlocked */ 824 - if (nfc_is_v21()) { 825 - writew(0x0, host->regs + NFC_V21_UNLOCKSTART_BLKADDR); 826 - writew(0xffff, host->regs + NFC_V21_UNLOCKEND_BLKADDR); 827 - this->ecc.bytes = 9; 828 - } else if (nfc_is_v1()) { 829 - writew(0x0, host->regs + NFC_V1_UNLOCKSTART_BLKADDR); 830 - writew(0x4000, host->regs + NFC_V1_UNLOCKEND_BLKADDR); 831 753 this->ecc.bytes = 3; 832 754 } else 833 755 BUG(); 834 - 835 - /* Unlock Block Command for given address range */ 836 - writew(0x4, host->regs + NFC_WRPROT); 837 756 838 757 this->ecc.size = 512; 839 758 this->ecc.layout = oob_smallpage; ··· 809 796 this->ecc.hwctl = mxc_nand_enable_hwecc; 810 797 this->ecc.correct = mxc_nand_correct_data; 811 798 this->ecc.mode = NAND_ECC_HW; 812 - tmp = readw(host->regs + NFC_CONFIG1); 813 - tmp |= NFC_ECC_EN; 814 - writew(tmp, host->regs + NFC_CONFIG1); 815 799 } else { 816 800 this->ecc.mode = NAND_ECC_SOFT; 817 - tmp = readw(host->regs + NFC_CONFIG1); 818 - tmp &= ~NFC_ECC_EN; 819 - writew(tmp, host->regs + NFC_CONFIG1); 820 801 } 821 802 822 803 /* NAND bus width determines access funtions used by upper layer */ ··· 824 817 this->options |= NAND_USE_FLASH_BBT; 825 818 } 826 819 820 + init_waitqueue_head(&host->irq_waitq); 821 + 822 + host->irq = platform_get_irq(pdev, 0); 823 + 824 + err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); 825 + if (err) 826 + goto eirq; 827 + 827 828 /* first scan to find the device and get the page size */ 828 - if (nand_scan_ident(mtd, 1)) { 829 + if (nand_scan_ident(mtd, 1, NULL)) { 829 830 err = -ENXIO; 830 831 goto escan; 831 832 } ··· 901 886 int ret = 0; 902 887 903 888 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND suspend\n"); 904 - if (mtd) { 905 - ret = mtd->suspend(mtd); 906 - /* Disable the NFC clock */ 907 - clk_disable(host->clk); 908 - } 889 + 890 + ret = mtd->suspend(mtd); 891 + 892 + /* 893 + * nand_suspend locks the device for exclusive access, so 894 + * the clock must already be off. 895 + */ 896 + BUG_ON(!ret && host->clk_act); 909 897 910 898 return ret; 911 899 } ··· 922 904 923 905 DEBUG(MTD_DEBUG_LEVEL0, "MXC_ND : NAND resume\n"); 924 906 925 - if (mtd) { 926 - /* Enable the NFC clock */ 927 - clk_enable(host->clk); 928 - mtd->resume(mtd); 929 - } 907 + mtd->resume(mtd); 930 908 931 909 return ret; 932 910 }
+310 -77
drivers/mtd/nand/nand_base.c
··· 108 108 */ 109 109 DEFINE_LED_TRIGGER(nand_led_trigger); 110 110 111 + static int check_offs_len(struct mtd_info *mtd, 112 + loff_t ofs, uint64_t len) 113 + { 114 + struct nand_chip *chip = mtd->priv; 115 + int ret = 0; 116 + 117 + /* Start address must align on block boundary */ 118 + if (ofs & ((1 << chip->phys_erase_shift) - 1)) { 119 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 120 + ret = -EINVAL; 121 + } 122 + 123 + /* Length must align on block boundary */ 124 + if (len & ((1 << chip->phys_erase_shift) - 1)) { 125 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 126 + __func__); 127 + ret = -EINVAL; 128 + } 129 + 130 + /* Do not allow past end of device */ 131 + if (ofs + len > mtd->size) { 132 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", 133 + __func__); 134 + ret = -EINVAL; 135 + } 136 + 137 + return ret; 138 + } 139 + 111 140 /** 112 141 * nand_release_device - [GENERIC] release chip 113 142 * @mtd: MTD device structure ··· 347 318 struct nand_chip *chip = mtd->priv; 348 319 u16 bad; 349 320 321 + if (chip->options & NAND_BB_LAST_PAGE) 322 + ofs += mtd->erasesize - mtd->writesize; 323 + 350 324 page = (int)(ofs >> chip->page_shift) & chip->pagemask; 351 325 352 326 if (getchip) { ··· 367 335 bad = cpu_to_le16(chip->read_word(mtd)); 368 336 if (chip->badblockpos & 0x1) 369 337 bad >>= 8; 370 - if ((bad & 0xFF) != 0xff) 371 - res = 1; 338 + else 339 + bad &= 0xFF; 372 340 } else { 373 341 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); 374 - if (chip->read_byte(mtd) != 0xff) 375 - res = 1; 342 + bad = chip->read_byte(mtd); 376 343 } 344 + 345 + if (likely(chip->badblockbits == 8)) 346 + res = bad != 0xFF; 347 + else 348 + res = hweight8(bad) < chip->badblockbits; 377 349 378 350 if (getchip) 379 351 nand_release_device(mtd); ··· 398 362 struct nand_chip *chip = mtd->priv; 399 363 uint8_t buf[2] = { 0, 0 }; 400 364 int block, ret; 365 + 366 + if (chip->options & NAND_BB_LAST_PAGE) 367 + ofs += mtd->erasesize - mtd->writesize; 401 368 402 369 /* Get block number */ 403 370 block = (int)(ofs >> chip->bbt_erase_shift); ··· 440 401 static int nand_check_wp(struct mtd_info *mtd) 441 402 { 442 403 struct nand_chip *chip = mtd->priv; 404 + 405 + /* broken xD cards report WP despite being writable */ 406 + if (chip->options & NAND_BROKEN_XD) 407 + return 0; 408 + 443 409 /* Check the WP bit */ 444 410 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 445 411 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; ··· 788 744 chip->state = FL_PM_SUSPENDED; 789 745 spin_unlock(lock); 790 746 return 0; 791 - } else { 792 - spin_unlock(lock); 793 - return -EAGAIN; 794 747 } 795 748 } 796 749 set_current_state(TASK_UNINTERRUPTIBLE); ··· 873 832 874 833 status = (int)chip->read_byte(mtd); 875 834 return status; 835 + } 836 + 837 + /** 838 + * __nand_unlock - [REPLACABLE] unlocks specified locked blockes 839 + * 840 + * @param mtd - mtd info 841 + * @param ofs - offset to start unlock from 842 + * @param len - length to unlock 843 + * @invert - when = 0, unlock the range of blocks within the lower and 844 + * upper boundary address 845 + * whne = 1, unlock the range of blocks outside the boundaries 846 + * of the lower and upper boundary address 847 + * 848 + * @return - unlock status 849 + */ 850 + static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, 851 + uint64_t len, int invert) 852 + { 853 + int ret = 0; 854 + int status, page; 855 + struct nand_chip *chip = mtd->priv; 856 + 857 + /* Submit address of first page to unlock */ 858 + page = ofs >> chip->page_shift; 859 + chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask); 860 + 861 + /* Submit address of last page to unlock */ 862 + page = (ofs + len) >> chip->page_shift; 863 + chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, 864 + (page | invert) & chip->pagemask); 865 + 866 + /* Call wait ready function */ 867 + status = chip->waitfunc(mtd, chip); 868 + udelay(1000); 869 + /* See if device thinks it succeeded */ 870 + if (status & 0x01) { 871 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 872 + __func__, status); 873 + ret = -EIO; 874 + } 875 + 876 + return ret; 877 + } 878 + 879 + /** 880 + * nand_unlock - [REPLACABLE] unlocks specified locked blockes 881 + * 882 + * @param mtd - mtd info 883 + * @param ofs - offset to start unlock from 884 + * @param len - length to unlock 885 + * 886 + * @return - unlock status 887 + */ 888 + int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 889 + { 890 + int ret = 0; 891 + int chipnr; 892 + struct nand_chip *chip = mtd->priv; 893 + 894 + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 895 + __func__, (unsigned long long)ofs, len); 896 + 897 + if (check_offs_len(mtd, ofs, len)) 898 + ret = -EINVAL; 899 + 900 + /* Align to last block address if size addresses end of the device */ 901 + if (ofs + len == mtd->size) 902 + len -= mtd->erasesize; 903 + 904 + nand_get_device(chip, mtd, FL_UNLOCKING); 905 + 906 + /* Shift to get chip number */ 907 + chipnr = ofs >> chip->chip_shift; 908 + 909 + chip->select_chip(mtd, chipnr); 910 + 911 + /* Check, if it is write protected */ 912 + if (nand_check_wp(mtd)) { 913 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 914 + __func__); 915 + ret = -EIO; 916 + goto out; 917 + } 918 + 919 + ret = __nand_unlock(mtd, ofs, len, 0); 920 + 921 + out: 922 + /* de-select the NAND device */ 923 + chip->select_chip(mtd, -1); 924 + 925 + nand_release_device(mtd); 926 + 927 + return ret; 928 + } 929 + 930 + /** 931 + * nand_lock - [REPLACABLE] locks all blockes present in the device 932 + * 933 + * @param mtd - mtd info 934 + * @param ofs - offset to start unlock from 935 + * @param len - length to unlock 936 + * 937 + * @return - lock status 938 + * 939 + * This feature is not support in many NAND parts. 'Micron' NAND parts 940 + * do have this feature, but it allows only to lock all blocks not for 941 + * specified range for block. 942 + * 943 + * Implementing 'lock' feature by making use of 'unlock', for now. 944 + */ 945 + int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 946 + { 947 + int ret = 0; 948 + int chipnr, status, page; 949 + struct nand_chip *chip = mtd->priv; 950 + 951 + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", 952 + __func__, (unsigned long long)ofs, len); 953 + 954 + if (check_offs_len(mtd, ofs, len)) 955 + ret = -EINVAL; 956 + 957 + nand_get_device(chip, mtd, FL_LOCKING); 958 + 959 + /* Shift to get chip number */ 960 + chipnr = ofs >> chip->chip_shift; 961 + 962 + chip->select_chip(mtd, chipnr); 963 + 964 + /* Check, if it is write protected */ 965 + if (nand_check_wp(mtd)) { 966 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", 967 + __func__); 968 + status = MTD_ERASE_FAILED; 969 + ret = -EIO; 970 + goto out; 971 + } 972 + 973 + /* Submit address of first page to lock */ 974 + page = ofs >> chip->page_shift; 975 + chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask); 976 + 977 + /* Call wait ready function */ 978 + status = chip->waitfunc(mtd, chip); 979 + udelay(1000); 980 + /* See if device thinks it succeeded */ 981 + if (status & 0x01) { 982 + DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", 983 + __func__, status); 984 + ret = -EIO; 985 + goto out; 986 + } 987 + 988 + ret = __nand_unlock(mtd, ofs, len, 0x1); 989 + 990 + out: 991 + /* de-select the NAND device */ 992 + chip->select_chip(mtd, -1); 993 + 994 + nand_release_device(mtd); 995 + 996 + return ret; 876 997 } 877 998 878 999 /** ··· 1435 1232 int ret = 0; 1436 1233 uint32_t readlen = ops->len; 1437 1234 uint32_t oobreadlen = ops->ooblen; 1235 + uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? 1236 + mtd->oobavail : mtd->oobsize; 1237 + 1438 1238 uint8_t *bufpoi, *oob, *buf; 1439 1239 1440 1240 stats = mtd->ecc_stats; ··· 1488 1282 buf += bytes; 1489 1283 1490 1284 if (unlikely(oob)) { 1491 - /* Raw mode does data:oob:data:oob */ 1492 - if (ops->mode != MTD_OOB_RAW) { 1493 - int toread = min(oobreadlen, 1494 - chip->ecc.layout->oobavail); 1495 - if (toread) { 1496 - oob = nand_transfer_oob(chip, 1497 - oob, ops, toread); 1498 - oobreadlen -= toread; 1499 - } 1500 - } else 1501 - buf = nand_transfer_oob(chip, 1502 - buf, ops, mtd->oobsize); 1285 + 1286 + int toread = min(oobreadlen, max_oobsize); 1287 + 1288 + if (toread) { 1289 + oob = nand_transfer_oob(chip, 1290 + oob, ops, toread); 1291 + oobreadlen -= toread; 1292 + } 1503 1293 } 1504 1294 1505 1295 if (!(chip->options & NAND_NO_READRDY)) { ··· 2082 1880 * @oob: oob data buffer 2083 1881 * @ops: oob ops structure 2084 1882 */ 2085 - static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, 2086 - struct mtd_oob_ops *ops) 1883 + static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, 1884 + struct mtd_oob_ops *ops) 2087 1885 { 2088 - size_t len = ops->ooblen; 2089 - 2090 1886 switch(ops->mode) { 2091 1887 2092 1888 case MTD_OOB_PLACE: ··· 2139 1939 int chipnr, realpage, page, blockmask, column; 2140 1940 struct nand_chip *chip = mtd->priv; 2141 1941 uint32_t writelen = ops->len; 1942 + 1943 + uint32_t oobwritelen = ops->ooblen; 1944 + uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? 1945 + mtd->oobavail : mtd->oobsize; 1946 + 2142 1947 uint8_t *oob = ops->oobbuf; 2143 1948 uint8_t *buf = ops->datbuf; 2144 1949 int ret, subpage; ··· 2185 1980 if (likely(!oob)) 2186 1981 memset(chip->oob_poi, 0xff, mtd->oobsize); 2187 1982 1983 + /* Don't allow multipage oob writes with offset */ 1984 + if (ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) 1985 + return -EINVAL; 1986 + 2188 1987 while(1) { 2189 1988 int bytes = mtd->writesize; 2190 1989 int cached = writelen > bytes && page != blockmask; ··· 2204 1995 wbuf = chip->buffers->databuf; 2205 1996 } 2206 1997 2207 - if (unlikely(oob)) 2208 - oob = nand_fill_oob(chip, oob, ops); 1998 + if (unlikely(oob)) { 1999 + size_t len = min(oobwritelen, oobmaxlen); 2000 + oob = nand_fill_oob(chip, oob, len, ops); 2001 + oobwritelen -= len; 2002 + } 2209 2003 2210 2004 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2211 2005 (ops->mode == MTD_OOB_RAW)); ··· 2382 2170 chip->pagebuf = -1; 2383 2171 2384 2172 memset(chip->oob_poi, 0xff, mtd->oobsize); 2385 - nand_fill_oob(chip, ops->oobbuf, ops); 2173 + nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); 2386 2174 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 2387 2175 memset(chip->oob_poi, 0xff, mtd->oobsize); 2388 2176 ··· 2505 2293 __func__, (unsigned long long)instr->addr, 2506 2294 (unsigned long long)instr->len); 2507 2295 2508 - /* Start address must align on block boundary */ 2509 - if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { 2510 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); 2296 + if (check_offs_len(mtd, instr->addr, instr->len)) 2511 2297 return -EINVAL; 2512 - } 2513 - 2514 - /* Length must align on block boundary */ 2515 - if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { 2516 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", 2517 - __func__); 2518 - return -EINVAL; 2519 - } 2520 - 2521 - /* Do not allow erase past end of device */ 2522 - if ((instr->len + instr->addr) > mtd->size) { 2523 - DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", 2524 - __func__); 2525 - return -EINVAL; 2526 - } 2527 2298 2528 2299 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 2529 2300 ··· 2777 2582 */ 2778 2583 static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, 2779 2584 struct nand_chip *chip, 2780 - int busw, int *maf_id) 2585 + int busw, int *maf_id, 2586 + struct nand_flash_dev *type) 2781 2587 { 2782 - struct nand_flash_dev *type = NULL; 2783 2588 int i, dev_id, maf_idx; 2784 - int tmp_id, tmp_manf; 2589 + u8 id_data[8]; 2785 2590 2786 2591 /* Select the device */ 2787 2592 chip->select_chip(mtd, 0); ··· 2807 2612 2808 2613 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); 2809 2614 2810 - /* Read manufacturer and device IDs */ 2615 + /* Read entire ID string */ 2811 2616 2812 - tmp_manf = chip->read_byte(mtd); 2813 - tmp_id = chip->read_byte(mtd); 2617 + for (i = 0; i < 8; i++) 2618 + id_data[i] = chip->read_byte(mtd); 2814 2619 2815 - if (tmp_manf != *maf_id || tmp_id != dev_id) { 2620 + if (id_data[0] != *maf_id || id_data[1] != dev_id) { 2816 2621 printk(KERN_INFO "%s: second ID read did not match " 2817 2622 "%02x,%02x against %02x,%02x\n", __func__, 2818 - *maf_id, dev_id, tmp_manf, tmp_id); 2623 + *maf_id, dev_id, id_data[0], id_data[1]); 2819 2624 return ERR_PTR(-ENODEV); 2820 2625 } 2821 2626 2822 - /* Lookup the flash id */ 2823 - for (i = 0; nand_flash_ids[i].name != NULL; i++) { 2824 - if (dev_id == nand_flash_ids[i].id) { 2825 - type = &nand_flash_ids[i]; 2826 - break; 2827 - } 2828 - } 2829 - 2830 2627 if (!type) 2628 + type = nand_flash_ids; 2629 + 2630 + for (; type->name != NULL; type++) 2631 + if (dev_id == type->id) 2632 + break; 2633 + 2634 + if (!type->name) 2831 2635 return ERR_PTR(-ENODEV); 2832 2636 2833 2637 if (!mtd->name) ··· 2838 2644 if (!type->pagesize) { 2839 2645 int extid; 2840 2646 /* The 3rd id byte holds MLC / multichip data */ 2841 - chip->cellinfo = chip->read_byte(mtd); 2647 + chip->cellinfo = id_data[2]; 2842 2648 /* The 4th id byte is the important one */ 2843 - extid = chip->read_byte(mtd); 2844 - /* Calc pagesize */ 2845 - mtd->writesize = 1024 << (extid & 0x3); 2846 - extid >>= 2; 2847 - /* Calc oobsize */ 2848 - mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9); 2849 - extid >>= 2; 2850 - /* Calc blocksize. Blocksize is multiples of 64KiB */ 2851 - mtd->erasesize = (64 * 1024) << (extid & 0x03); 2852 - extid >>= 2; 2853 - /* Get buswidth information */ 2854 - busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; 2649 + extid = id_data[3]; 2855 2650 2651 + /* 2652 + * Field definitions are in the following datasheets: 2653 + * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) 2654 + * New style (6 byte ID): Samsung K9GAG08U0D (p.40) 2655 + * 2656 + * Check for wraparound + Samsung ID + nonzero 6th byte 2657 + * to decide what to do. 2658 + */ 2659 + if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && 2660 + id_data[0] == NAND_MFR_SAMSUNG && 2661 + id_data[5] != 0x00) { 2662 + /* Calc pagesize */ 2663 + mtd->writesize = 2048 << (extid & 0x03); 2664 + extid >>= 2; 2665 + /* Calc oobsize */ 2666 + mtd->oobsize = (extid & 0x03) == 0x01 ? 128 : 218; 2667 + extid >>= 2; 2668 + /* Calc blocksize */ 2669 + mtd->erasesize = (128 * 1024) << 2670 + (((extid >> 1) & 0x04) | (extid & 0x03)); 2671 + busw = 0; 2672 + } else { 2673 + /* Calc pagesize */ 2674 + mtd->writesize = 1024 << (extid & 0x03); 2675 + extid >>= 2; 2676 + /* Calc oobsize */ 2677 + mtd->oobsize = (8 << (extid & 0x01)) * 2678 + (mtd->writesize >> 9); 2679 + extid >>= 2; 2680 + /* Calc blocksize. Blocksize is multiples of 64KiB */ 2681 + mtd->erasesize = (64 * 1024) << (extid & 0x03); 2682 + extid >>= 2; 2683 + /* Get buswidth information */ 2684 + busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; 2685 + } 2856 2686 } else { 2857 2687 /* 2858 2688 * Old devices have chip data hardcoded in the device id table ··· 2922 2704 /* Set the bad block position */ 2923 2705 chip->badblockpos = mtd->writesize > 512 ? 2924 2706 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; 2707 + chip->badblockbits = 8; 2925 2708 2926 2709 /* Get chip options, preserve non chip based options */ 2927 2710 chip->options &= ~NAND_CHIPOPTIONS_MSK; ··· 2938 2719 */ 2939 2720 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) 2940 2721 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 2722 + 2723 + /* 2724 + * Bad block marker is stored in the last page of each block 2725 + * on Samsung and Hynix MLC devices 2726 + */ 2727 + if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && 2728 + (*maf_id == NAND_MFR_SAMSUNG || 2729 + *maf_id == NAND_MFR_HYNIX)) 2730 + chip->options |= NAND_BB_LAST_PAGE; 2941 2731 2942 2732 /* Check for AND chips with 4 page planes */ 2943 2733 if (chip->options & NAND_4PAGE_ARRAY) ··· 2969 2741 * nand_scan_ident - [NAND Interface] Scan for the NAND device 2970 2742 * @mtd: MTD device structure 2971 2743 * @maxchips: Number of chips to scan for 2744 + * @table: Alternative NAND ID table 2972 2745 * 2973 2746 * This is the first phase of the normal nand_scan() function. It 2974 2747 * reads the flash ID and sets up MTD fields accordingly. 2975 2748 * 2976 2749 * The mtd->owner field must be set to the module of the caller. 2977 2750 */ 2978 - int nand_scan_ident(struct mtd_info *mtd, int maxchips) 2751 + int nand_scan_ident(struct mtd_info *mtd, int maxchips, 2752 + struct nand_flash_dev *table) 2979 2753 { 2980 2754 int i, busw, nand_maf_id; 2981 2755 struct nand_chip *chip = mtd->priv; ··· 2989 2759 nand_set_defaults(chip, busw); 2990 2760 2991 2761 /* Read the flash type */ 2992 - type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id); 2762 + type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id, table); 2993 2763 2994 2764 if (IS_ERR(type)) { 2995 2765 if (!(chip->options & NAND_SCAN_SILENT_NODEV)) ··· 3219 2989 3220 2990 /* Fill in remaining MTD driver data */ 3221 2991 mtd->type = MTD_NANDFLASH; 3222 - mtd->flags = MTD_CAP_NANDFLASH; 2992 + mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : 2993 + MTD_CAP_NANDFLASH; 3223 2994 mtd->erase = nand_erase; 3224 2995 mtd->point = NULL; 3225 2996 mtd->unpoint = NULL; ··· 3281 3050 BUG(); 3282 3051 } 3283 3052 3284 - ret = nand_scan_ident(mtd, maxchips); 3053 + ret = nand_scan_ident(mtd, maxchips, NULL); 3285 3054 if (!ret) 3286 3055 ret = nand_scan_tail(mtd); 3287 3056 return ret; ··· 3308 3077 kfree(chip->buffers); 3309 3078 } 3310 3079 3080 + EXPORT_SYMBOL_GPL(nand_lock); 3081 + EXPORT_SYMBOL_GPL(nand_unlock); 3311 3082 EXPORT_SYMBOL_GPL(nand_scan); 3312 3083 EXPORT_SYMBOL_GPL(nand_scan_ident); 3313 3084 EXPORT_SYMBOL_GPL(nand_scan_tail);
+25 -4
drivers/mtd/nand/nand_bbt.c
··· 237 237 size_t len) 238 238 { 239 239 struct mtd_oob_ops ops; 240 + int res; 240 241 241 242 ops.mode = MTD_OOB_RAW; 242 243 ops.ooboffs = 0; 243 244 ops.ooblen = mtd->oobsize; 244 - ops.oobbuf = buf; 245 - ops.datbuf = buf; 246 - ops.len = len; 247 245 248 - return mtd->read_oob(mtd, offs, &ops); 246 + 247 + while (len > 0) { 248 + if (len <= mtd->writesize) { 249 + ops.oobbuf = buf + len; 250 + ops.datbuf = buf; 251 + ops.len = len; 252 + return mtd->read_oob(mtd, offs, &ops); 253 + } else { 254 + ops.oobbuf = buf + mtd->writesize; 255 + ops.datbuf = buf; 256 + ops.len = mtd->writesize; 257 + res = mtd->read_oob(mtd, offs, &ops); 258 + 259 + if (res) 260 + return res; 261 + } 262 + 263 + buf += mtd->oobsize + mtd->writesize; 264 + len -= mtd->writesize; 265 + } 266 + return 0; 249 267 } 250 268 251 269 /* ··· 431 413 numblocks += startblock; 432 414 from = (loff_t)startblock << (this->bbt_erase_shift - 1); 433 415 } 416 + 417 + if (this->options & NAND_BB_LAST_PAGE) 418 + from += mtd->erasesize - (mtd->writesize * len); 434 419 435 420 for (i = startblock; i < numblocks;) { 436 421 int ret;
+28 -49
drivers/mtd/nand/nand_bcm_umi.h
··· 167 167 int numToRead = 16; /* There are 16 bytes per sector in the OOB */ 168 168 169 169 /* ECC is already paused when this function is called */ 170 + if (pageSize != NAND_DATA_ACCESS_SIZE) { 171 + /* skip BI */ 172 + #if defined(__KERNEL__) && !defined(STANDALONE) 173 + *oobp++ = REG_NAND_DATA8; 174 + #else 175 + REG_NAND_DATA8; 176 + #endif 177 + numToRead--; 178 + } 179 + 180 + while (numToRead > numEccBytes) { 181 + /* skip free oob region */ 182 + #if defined(__KERNEL__) && !defined(STANDALONE) 183 + *oobp++ = REG_NAND_DATA8; 184 + #else 185 + REG_NAND_DATA8; 186 + #endif 187 + numToRead--; 188 + } 170 189 171 190 if (pageSize == NAND_DATA_ACCESS_SIZE) { 172 - while (numToRead > numEccBytes) { 173 - /* skip free oob region */ 174 - #if defined(__KERNEL__) && !defined(STANDALONE) 175 - *oobp++ = REG_NAND_DATA8; 176 - #else 177 - REG_NAND_DATA8; 178 - #endif 179 - numToRead--; 180 - } 181 - 182 191 /* read ECC bytes before BI */ 183 192 nand_bcm_umi_bch_resume_read_ecc_calc(); 184 193 ··· 199 190 #else 200 191 eccCalc[eccPos++] = REG_NAND_DATA8; 201 192 #endif 193 + numToRead--; 202 194 } 203 195 204 196 nand_bcm_umi_bch_pause_read_ecc_calc(); ··· 214 204 numToRead--; 215 205 } 216 206 217 - /* read ECC bytes */ 218 - nand_bcm_umi_bch_resume_read_ecc_calc(); 219 - while (numToRead) { 207 + } 208 + /* read ECC bytes */ 209 + nand_bcm_umi_bch_resume_read_ecc_calc(); 210 + while (numToRead) { 220 211 #if defined(__KERNEL__) && !defined(STANDALONE) 221 - *oobp = REG_NAND_DATA8; 222 - eccCalc[eccPos++] = *oobp; 223 - oobp++; 212 + *oobp = REG_NAND_DATA8; 213 + eccCalc[eccPos++] = *oobp; 214 + oobp++; 224 215 #else 225 - eccCalc[eccPos++] = REG_NAND_DATA8; 226 - #endif 227 - numToRead--; 228 - } 229 - } else { 230 - /* skip BI */ 231 - #if defined(__KERNEL__) && !defined(STANDALONE) 232 - *oobp++ = REG_NAND_DATA8; 233 - #else 234 - REG_NAND_DATA8; 216 + eccCalc[eccPos++] = REG_NAND_DATA8; 235 217 #endif 236 218 numToRead--; 237 - 238 - while (numToRead > numEccBytes) { 239 - /* skip free oob region */ 240 - #if defined(__KERNEL__) && !defined(STANDALONE) 241 - *oobp++ = REG_NAND_DATA8; 242 - #else 243 - REG_NAND_DATA8; 244 - #endif 245 - numToRead--; 246 - } 247 - 248 - /* read ECC bytes */ 249 - nand_bcm_umi_bch_resume_read_ecc_calc(); 250 - while (numToRead) { 251 - #if defined(__KERNEL__) && !defined(STANDALONE) 252 - *oobp = REG_NAND_DATA8; 253 - eccCalc[eccPos++] = *oobp; 254 - oobp++; 255 - #else 256 - eccCalc[eccPos++] = REG_NAND_DATA8; 257 - #endif 258 - numToRead--; 259 - } 260 219 } 261 220 } 262 221
+1
drivers/mtd/nand/nand_ids.c
··· 82 82 /* 1 Gigabit */ 83 83 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, 84 84 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, 85 + {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, 85 86 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, 86 87 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, 87 88
+10 -7
drivers/mtd/nand/nandsim.c
··· 80 80 #ifndef CONFIG_NANDSIM_DBG 81 81 #define CONFIG_NANDSIM_DBG 0 82 82 #endif 83 + #ifndef CONFIG_NANDSIM_MAX_PARTS 84 + #define CONFIG_NANDSIM_MAX_PARTS 32 85 + #endif 83 86 84 87 static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE; 85 88 static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE; ··· 97 94 static uint do_delays = CONFIG_NANDSIM_DO_DELAYS; 98 95 static uint log = CONFIG_NANDSIM_LOG; 99 96 static uint dbg = CONFIG_NANDSIM_DBG; 100 - static unsigned long parts[MAX_MTD_DEVICES]; 97 + static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS]; 101 98 static unsigned int parts_num; 102 99 static char *badblocks = NULL; 103 100 static char *weakblocks = NULL; ··· 138 135 MODULE_PARM_DESC(access_delay, "Initial page access delay (microseconds)"); 139 136 MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds"); 140 137 MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)"); 141 - MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanodeconds)"); 142 - MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanodeconds)"); 138 + MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)"); 139 + MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)"); 143 140 MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)"); 144 141 MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero"); 145 142 MODULE_PARM_DESC(log, "Perform logging if not zero"); ··· 291 288 * The structure which describes all the internal simulator data. 292 289 */ 293 290 struct nandsim { 294 - struct mtd_partition partitions[MAX_MTD_DEVICES]; 291 + struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS]; 295 292 unsigned int nbparts; 296 293 297 294 uint busw; /* flash chip bus width (8 or 16) */ ··· 315 312 union ns_mem buf; 316 313 317 314 /* NAND flash "geometry" */ 318 - struct nandsin_geometry { 315 + struct { 319 316 uint64_t totsz; /* total flash size, bytes */ 320 317 uint32_t secsz; /* flash sector (erase block) size, bytes */ 321 318 uint pgsz; /* NAND flash page size, bytes */ ··· 334 331 } geom; 335 332 336 333 /* NAND flash internal registers */ 337 - struct nandsim_regs { 334 + struct { 338 335 unsigned command; /* the command register */ 339 336 u_char status; /* the status register */ 340 337 uint row; /* the page number */ ··· 345 342 } regs; 346 343 347 344 /* NAND flash lines state */ 348 - struct ns_lines_status { 345 + struct { 349 346 int ce; /* chip Enable */ 350 347 int cle; /* command Latch Enable */ 351 348 int ale; /* address Latch Enable */
+3 -3
drivers/mtd/nand/nomadik_nand.c
··· 105 105 ret = -EIO; 106 106 goto err_unmap; 107 107 } 108 - host->addr_va = ioremap(res->start, res->end - res->start + 1); 108 + host->addr_va = ioremap(res->start, resource_size(res)); 109 109 110 110 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); 111 111 if (!res) { 112 112 ret = -EIO; 113 113 goto err_unmap; 114 114 } 115 - host->data_va = ioremap(res->start, res->end - res->start + 1); 115 + host->data_va = ioremap(res->start, resource_size(res)); 116 116 117 117 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd"); 118 118 if (!res) { 119 119 ret = -EIO; 120 120 goto err_unmap; 121 121 } 122 - host->cmd_va = ioremap(res->start, res->end - res->start + 1); 122 + host->cmd_va = ioremap(res->start, resource_size(res)); 123 123 124 124 if (!host->addr_va || !host->data_va || !host->cmd_va) { 125 125 ret = -ENOMEM;
+10 -6
drivers/mtd/nand/omap2.c
··· 292 292 u32 *p = (u32 *)buf; 293 293 294 294 /* take care of subpage reads */ 295 - for (; len % 4 != 0; ) { 296 - *buf++ = __raw_readb(info->nand.IO_ADDR_R); 297 - len--; 295 + if (len % 4) { 296 + if (info->nand.options & NAND_BUSWIDTH_16) 297 + omap_read_buf16(mtd, buf, len % 4); 298 + else 299 + omap_read_buf8(mtd, buf, len % 4); 300 + p = (u32 *) (buf + len % 4); 301 + len -= len % 4; 298 302 } 299 - p = (u32 *) buf; 300 303 301 304 /* configure and start prefetch transfer */ 302 305 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); ··· 505 502 omap_write_buf_pref(mtd, buf, len); 506 503 else 507 504 /* start transfer in DMA mode */ 508 - omap_nand_dma_transfer(mtd, buf, len, 0x1); 505 + omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 509 506 } 510 507 511 508 /** ··· 1031 1028 static int omap_nand_remove(struct platform_device *pdev) 1032 1029 { 1033 1030 struct mtd_info *mtd = platform_get_drvdata(pdev); 1034 - struct omap_nand_info *info = mtd->priv; 1031 + struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1032 + mtd); 1035 1033 1036 1034 platform_set_drvdata(pdev, NULL); 1037 1035 if (use_dma)
+11 -2
drivers/mtd/nand/orion_nand.c
··· 80 80 struct mtd_info *mtd; 81 81 struct nand_chip *nc; 82 82 struct orion_nand_data *board; 83 + struct resource *res; 83 84 void __iomem *io_base; 84 85 int ret = 0; 85 86 #ifdef CONFIG_MTD_PARTITIONS ··· 96 95 } 97 96 mtd = (struct mtd_info *)(nc + 1); 98 97 99 - io_base = ioremap(pdev->resource[0].start, 100 - pdev->resource[0].end - pdev->resource[0].start + 1); 98 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 99 + if (!res) { 100 + ret = -ENODEV; 101 + goto no_res; 102 + } 103 + 104 + io_base = ioremap(res->start, resource_size(res)); 101 105 if (!io_base) { 102 106 printk(KERN_ERR "orion_nand: ioremap failed\n"); 103 107 ret = -EIO; ··· 125 119 126 120 if (board->width == 16) 127 121 nc->options |= NAND_BUSWIDTH_16; 122 + 123 + if (board->dev_ready) 124 + nc->dev_ready = board->dev_ready; 128 125 129 126 platform_set_drvdata(pdev, mtd); 130 127
+1 -1
drivers/mtd/nand/pasemi_nand.c
··· 209 209 return 0; 210 210 } 211 211 212 - static struct of_device_id pasemi_nand_match[] = 212 + static const struct of_device_id pasemi_nand_match[] = 213 213 { 214 214 { 215 215 .compatible = "pasemi,localbus-nand",
+11
drivers/mtd/nand/pxa3xx_nand.c
··· 1320 1320 goto fail_free_irq; 1321 1321 } 1322 1322 1323 + if (mtd_has_cmdlinepart()) { 1324 + static const char *probes[] = { "cmdlinepart", NULL }; 1325 + struct mtd_partition *parts; 1326 + int nr_parts; 1327 + 1328 + nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); 1329 + 1330 + if (nr_parts) 1331 + return add_mtd_partitions(mtd, parts, nr_parts); 1332 + } 1333 + 1323 1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1324 1335 1325 1336 fail_free_irq:
+1140
drivers/mtd/nand/r852.c
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * driver for Ricoh xD readers 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/kernel.h> 11 + #include <linux/module.h> 12 + #include <linux/jiffies.h> 13 + #include <linux/workqueue.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/pci.h> 16 + #include <linux/pci_ids.h> 17 + #include <linux/delay.h> 18 + #include <linux/slab.h> 19 + #include <asm/byteorder.h> 20 + #include <linux/sched.h> 21 + #include "sm_common.h" 22 + #include "r852.h" 23 + 24 + 25 + static int r852_enable_dma = 1; 26 + module_param(r852_enable_dma, bool, S_IRUGO); 27 + MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); 28 + 29 + static int debug; 30 + module_param(debug, int, S_IRUGO | S_IWUSR); 31 + MODULE_PARM_DESC(debug, "Debug level (0-2)"); 32 + 33 + /* read register */ 34 + static inline uint8_t r852_read_reg(struct r852_device *dev, int address) 35 + { 36 + uint8_t reg = readb(dev->mmio + address); 37 + return reg; 38 + } 39 + 40 + /* write register */ 41 + static inline void r852_write_reg(struct r852_device *dev, 42 + int address, uint8_t value) 43 + { 44 + writeb(value, dev->mmio + address); 45 + mmiowb(); 46 + } 47 + 48 + 49 + /* read dword sized register */ 50 + static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) 51 + { 52 + uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); 53 + return reg; 54 + } 55 + 56 + /* write dword sized register */ 57 + static inline void r852_write_reg_dword(struct r852_device *dev, 58 + int address, uint32_t value) 59 + { 60 + writel(cpu_to_le32(value), dev->mmio + address); 61 + mmiowb(); 62 + } 63 + 64 + /* returns pointer to our private structure */ 65 + static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) 66 + { 67 + struct nand_chip *chip = (struct nand_chip *)mtd->priv; 68 + return (struct r852_device *)chip->priv; 69 + } 70 + 71 + 72 + /* check if controller supports dma */ 73 + static void r852_dma_test(struct r852_device *dev) 74 + { 75 + dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & 76 + (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); 77 + 78 + if (!dev->dma_usable) 79 + message("Non dma capable device detected, dma disabled"); 80 + 81 + if (!r852_enable_dma) { 82 + message("disabling dma on user request"); 83 + dev->dma_usable = 0; 84 + } 85 + } 86 + 87 + /* 88 + * Enable dma. Enables ether first or second stage of the DMA, 89 + * Expects dev->dma_dir and dev->dma_state be set 90 + */ 91 + static void r852_dma_enable(struct r852_device *dev) 92 + { 93 + uint8_t dma_reg, dma_irq_reg; 94 + 95 + /* Set up dma settings */ 96 + dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); 97 + dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); 98 + 99 + if (dev->dma_dir) 100 + dma_reg |= R852_DMA_READ; 101 + 102 + if (dev->dma_state == DMA_INTERNAL) { 103 + dma_reg |= R852_DMA_INTERNAL; 104 + /* Precaution to make sure HW doesn't write */ 105 + /* to random kernel memory */ 106 + r852_write_reg_dword(dev, R852_DMA_ADDR, 107 + cpu_to_le32(dev->phys_bounce_buffer)); 108 + } else { 109 + dma_reg |= R852_DMA_MEMORY; 110 + r852_write_reg_dword(dev, R852_DMA_ADDR, 111 + cpu_to_le32(dev->phys_dma_addr)); 112 + } 113 + 114 + /* Precaution: make sure write reached the device */ 115 + r852_read_reg_dword(dev, R852_DMA_ADDR); 116 + 117 + r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); 118 + 119 + /* Set dma irq */ 120 + dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 121 + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 122 + dma_irq_reg | 123 + R852_DMA_IRQ_INTERNAL | 124 + R852_DMA_IRQ_ERROR | 125 + R852_DMA_IRQ_MEMORY); 126 + } 127 + 128 + /* 129 + * Disable dma, called from the interrupt handler, which specifies 130 + * success of the operation via 'error' argument 131 + */ 132 + static void r852_dma_done(struct r852_device *dev, int error) 133 + { 134 + WARN_ON(dev->dma_stage == 0); 135 + 136 + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, 137 + r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); 138 + 139 + r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); 140 + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); 141 + 142 + /* Precaution to make sure HW doesn't write to random kernel memory */ 143 + r852_write_reg_dword(dev, R852_DMA_ADDR, 144 + cpu_to_le32(dev->phys_bounce_buffer)); 145 + r852_read_reg_dword(dev, R852_DMA_ADDR); 146 + 147 + dev->dma_error = error; 148 + dev->dma_stage = 0; 149 + 150 + if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) 151 + pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, 152 + dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 153 + complete(&dev->dma_done); 154 + } 155 + 156 + /* 157 + * Wait, till dma is done, which includes both phases of it 158 + */ 159 + static int r852_dma_wait(struct r852_device *dev) 160 + { 161 + long timeout = wait_for_completion_timeout(&dev->dma_done, 162 + msecs_to_jiffies(1000)); 163 + if (!timeout) { 164 + dbg("timeout waiting for DMA interrupt"); 165 + return -ETIMEDOUT; 166 + } 167 + 168 + return 0; 169 + } 170 + 171 + /* 172 + * Read/Write one page using dma. Only pages can be read (512 bytes) 173 + */ 174 + static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) 175 + { 176 + int bounce = 0; 177 + unsigned long flags; 178 + int error; 179 + 180 + dev->dma_error = 0; 181 + 182 + /* Set dma direction */ 183 + dev->dma_dir = do_read; 184 + dev->dma_stage = 1; 185 + 186 + dbg_verbose("doing dma %s ", do_read ? "read" : "write"); 187 + 188 + /* Set intial dma state: for reading first fill on board buffer, 189 + from device, for writes first fill the buffer from memory*/ 190 + dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; 191 + 192 + /* if incoming buffer is not page aligned, we should do bounce */ 193 + if ((unsigned long)buf & (R852_DMA_LEN-1)) 194 + bounce = 1; 195 + 196 + if (!bounce) { 197 + dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, 198 + R852_DMA_LEN, 199 + (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); 200 + 201 + if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) 202 + bounce = 1; 203 + } 204 + 205 + if (bounce) { 206 + dbg_verbose("dma: using bounce buffer"); 207 + dev->phys_dma_addr = dev->phys_bounce_buffer; 208 + if (!do_read) 209 + memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); 210 + } 211 + 212 + /* Enable DMA */ 213 + spin_lock_irqsave(&dev->irqlock, flags); 214 + r852_dma_enable(dev); 215 + spin_unlock_irqrestore(&dev->irqlock, flags); 216 + 217 + /* Wait till complete */ 218 + error = r852_dma_wait(dev); 219 + 220 + if (error) { 221 + r852_dma_done(dev, error); 222 + return; 223 + } 224 + 225 + if (do_read && bounce) 226 + memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); 227 + } 228 + 229 + /* 230 + * Program data lines of the nand chip to send data to it 231 + */ 232 + void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 233 + { 234 + struct r852_device *dev = r852_get_dev(mtd); 235 + uint32_t reg; 236 + 237 + /* Don't allow any access to hardware if we suspect card removal */ 238 + if (dev->card_unstable) 239 + return; 240 + 241 + /* Special case for whole sector read */ 242 + if (len == R852_DMA_LEN && dev->dma_usable) { 243 + r852_do_dma(dev, (uint8_t *)buf, 0); 244 + return; 245 + } 246 + 247 + /* write DWORD chinks - faster */ 248 + while (len) { 249 + reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; 250 + r852_write_reg_dword(dev, R852_DATALINE, reg); 251 + buf += 4; 252 + len -= 4; 253 + 254 + } 255 + 256 + /* write rest */ 257 + while (len) 258 + r852_write_reg(dev, R852_DATALINE, *buf++); 259 + } 260 + 261 + /* 262 + * Read data lines of the nand chip to retrieve data 263 + */ 264 + void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 265 + { 266 + struct r852_device *dev = r852_get_dev(mtd); 267 + uint32_t reg; 268 + 269 + if (dev->card_unstable) { 270 + /* since we can't signal error here, at least, return 271 + predictable buffer */ 272 + memset(buf, 0, len); 273 + return; 274 + } 275 + 276 + /* special case for whole sector read */ 277 + if (len == R852_DMA_LEN && dev->dma_usable) { 278 + r852_do_dma(dev, buf, 1); 279 + return; 280 + } 281 + 282 + /* read in dword sized chunks */ 283 + while (len >= 4) { 284 + 285 + reg = r852_read_reg_dword(dev, R852_DATALINE); 286 + *buf++ = reg & 0xFF; 287 + *buf++ = (reg >> 8) & 0xFF; 288 + *buf++ = (reg >> 16) & 0xFF; 289 + *buf++ = (reg >> 24) & 0xFF; 290 + len -= 4; 291 + } 292 + 293 + /* read the reset by bytes */ 294 + while (len--) 295 + *buf++ = r852_read_reg(dev, R852_DATALINE); 296 + } 297 + 298 + /* 299 + * Read one byte from nand chip 300 + */ 301 + static uint8_t r852_read_byte(struct mtd_info *mtd) 302 + { 303 + struct r852_device *dev = r852_get_dev(mtd); 304 + 305 + /* Same problem as in r852_read_buf.... */ 306 + if (dev->card_unstable) 307 + return 0; 308 + 309 + return r852_read_reg(dev, R852_DATALINE); 310 + } 311 + 312 + 313 + /* 314 + * Readback the buffer to verify it 315 + */ 316 + int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 317 + { 318 + struct r852_device *dev = r852_get_dev(mtd); 319 + 320 + /* We can't be sure about anything here... */ 321 + if (dev->card_unstable) 322 + return -1; 323 + 324 + /* This will never happen, unless you wired up a nand chip 325 + with > 512 bytes page size to the reader */ 326 + if (len > SM_SECTOR_SIZE) 327 + return 0; 328 + 329 + r852_read_buf(mtd, dev->tmp_buffer, len); 330 + return memcmp(buf, dev->tmp_buffer, len); 331 + } 332 + 333 + /* 334 + * Control several chip lines & send commands 335 + */ 336 + void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) 337 + { 338 + struct r852_device *dev = r852_get_dev(mtd); 339 + 340 + if (dev->card_unstable) 341 + return; 342 + 343 + if (ctrl & NAND_CTRL_CHANGE) { 344 + 345 + dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | 346 + R852_CTL_ON | R852_CTL_CARDENABLE); 347 + 348 + if (ctrl & NAND_ALE) 349 + dev->ctlreg |= R852_CTL_DATA; 350 + 351 + if (ctrl & NAND_CLE) 352 + dev->ctlreg |= R852_CTL_COMMAND; 353 + 354 + if (ctrl & NAND_NCE) 355 + dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); 356 + else 357 + dev->ctlreg &= ~R852_CTL_WRITE; 358 + 359 + /* when write is stareted, enable write access */ 360 + if (dat == NAND_CMD_ERASE1) 361 + dev->ctlreg |= R852_CTL_WRITE; 362 + 363 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 364 + } 365 + 366 + /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need 367 + to set write mode */ 368 + if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { 369 + dev->ctlreg |= R852_CTL_WRITE; 370 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 371 + } 372 + 373 + if (dat != NAND_CMD_NONE) 374 + r852_write_reg(dev, R852_DATALINE, dat); 375 + } 376 + 377 + /* 378 + * Wait till card is ready. 379 + * based on nand_wait, but returns errors on DMA error 380 + */ 381 + int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) 382 + { 383 + struct r852_device *dev = (struct r852_device *)chip->priv; 384 + 385 + unsigned long timeout; 386 + int status; 387 + 388 + timeout = jiffies + (chip->state == FL_ERASING ? 389 + msecs_to_jiffies(400) : msecs_to_jiffies(20)); 390 + 391 + while (time_before(jiffies, timeout)) 392 + if (chip->dev_ready(mtd)) 393 + break; 394 + 395 + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); 396 + status = (int)chip->read_byte(mtd); 397 + 398 + /* Unfortunelly, no way to send detailed error status... */ 399 + if (dev->dma_error) { 400 + status |= NAND_STATUS_FAIL; 401 + dev->dma_error = 0; 402 + } 403 + return status; 404 + } 405 + 406 + /* 407 + * Check if card is ready 408 + */ 409 + 410 + int r852_ready(struct mtd_info *mtd) 411 + { 412 + struct r852_device *dev = r852_get_dev(mtd); 413 + return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); 414 + } 415 + 416 + 417 + /* 418 + * Set ECC engine mode 419 + */ 420 + 421 + void r852_ecc_hwctl(struct mtd_info *mtd, int mode) 422 + { 423 + struct r852_device *dev = r852_get_dev(mtd); 424 + 425 + if (dev->card_unstable) 426 + return; 427 + 428 + switch (mode) { 429 + case NAND_ECC_READ: 430 + case NAND_ECC_WRITE: 431 + /* enable ecc generation/check*/ 432 + dev->ctlreg |= R852_CTL_ECC_ENABLE; 433 + 434 + /* flush ecc buffer */ 435 + r852_write_reg(dev, R852_CTL, 436 + dev->ctlreg | R852_CTL_ECC_ACCESS); 437 + 438 + r852_read_reg_dword(dev, R852_DATALINE); 439 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 440 + return; 441 + 442 + case NAND_ECC_READSYN: 443 + /* disable ecc generation */ 444 + dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 445 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 446 + } 447 + } 448 + 449 + /* 450 + * Calculate ECC, only used for writes 451 + */ 452 + 453 + int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, 454 + uint8_t *ecc_code) 455 + { 456 + struct r852_device *dev = r852_get_dev(mtd); 457 + struct sm_oob *oob = (struct sm_oob *)ecc_code; 458 + uint32_t ecc1, ecc2; 459 + 460 + if (dev->card_unstable) 461 + return 0; 462 + 463 + dev->ctlreg &= ~R852_CTL_ECC_ENABLE; 464 + r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 465 + 466 + ecc1 = r852_read_reg_dword(dev, R852_DATALINE); 467 + ecc2 = r852_read_reg_dword(dev, R852_DATALINE); 468 + 469 + oob->ecc1[0] = (ecc1) & 0xFF; 470 + oob->ecc1[1] = (ecc1 >> 8) & 0xFF; 471 + oob->ecc1[2] = (ecc1 >> 16) & 0xFF; 472 + 473 + oob->ecc2[0] = (ecc2) & 0xFF; 474 + oob->ecc2[1] = (ecc2 >> 8) & 0xFF; 475 + oob->ecc2[2] = (ecc2 >> 16) & 0xFF; 476 + 477 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 478 + return 0; 479 + } 480 + 481 + /* 482 + * Correct the data using ECC, hw did almost everything for us 483 + */ 484 + 485 + int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, 486 + uint8_t *read_ecc, uint8_t *calc_ecc) 487 + { 488 + uint16_t ecc_reg; 489 + uint8_t ecc_status, err_byte; 490 + int i, error = 0; 491 + 492 + struct r852_device *dev = r852_get_dev(mtd); 493 + 494 + if (dev->card_unstable) 495 + return 0; 496 + 497 + r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); 498 + ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); 499 + r852_write_reg(dev, R852_CTL, dev->ctlreg); 500 + 501 + for (i = 0 ; i <= 1 ; i++) { 502 + 503 + ecc_status = (ecc_reg >> 8) & 0xFF; 504 + 505 + /* ecc uncorrectable error */ 506 + if (ecc_status & R852_ECC_FAIL) { 507 + dbg("ecc: unrecoverable error, in half %d", i); 508 + error = -1; 509 + goto exit; 510 + } 511 + 512 + /* correctable error */ 513 + if (ecc_status & R852_ECC_CORRECTABLE) { 514 + 515 + err_byte = ecc_reg & 0xFF; 516 + dbg("ecc: recoverable error, " 517 + "in half %d, byte %d, bit %d", i, 518 + err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); 519 + 520 + dat[err_byte] ^= 521 + 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); 522 + error++; 523 + } 524 + 525 + dat += 256; 526 + ecc_reg >>= 16; 527 + } 528 + exit: 529 + return error; 530 + } 531 + 532 + /* 533 + * This is copy of nand_read_oob_std 534 + * nand_read_oob_syndrome assumes we can send column address - we can't 535 + */ 536 + static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 537 + int page, int sndcmd) 538 + { 539 + if (sndcmd) { 540 + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 541 + sndcmd = 0; 542 + } 543 + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 544 + return sndcmd; 545 + } 546 + 547 + /* 548 + * Start the nand engine 549 + */ 550 + 551 + void r852_engine_enable(struct r852_device *dev) 552 + { 553 + if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { 554 + r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 555 + r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 556 + } else { 557 + r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); 558 + r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); 559 + } 560 + msleep(300); 561 + r852_write_reg(dev, R852_CTL, 0); 562 + } 563 + 564 + 565 + /* 566 + * Stop the nand engine 567 + */ 568 + 569 + void r852_engine_disable(struct r852_device *dev) 570 + { 571 + r852_write_reg_dword(dev, R852_HW, 0); 572 + r852_write_reg(dev, R852_CTL, R852_CTL_RESET); 573 + } 574 + 575 + /* 576 + * Test if card is present 577 + */ 578 + 579 + void r852_card_update_present(struct r852_device *dev) 580 + { 581 + unsigned long flags; 582 + uint8_t reg; 583 + 584 + spin_lock_irqsave(&dev->irqlock, flags); 585 + reg = r852_read_reg(dev, R852_CARD_STA); 586 + dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); 587 + spin_unlock_irqrestore(&dev->irqlock, flags); 588 + } 589 + 590 + /* 591 + * Update card detection IRQ state according to current card state 592 + * which is read in r852_card_update_present 593 + */ 594 + void r852_update_card_detect(struct r852_device *dev) 595 + { 596 + int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 597 + dev->card_unstable = 0; 598 + 599 + card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); 600 + card_detect_reg |= R852_CARD_IRQ_GENABLE; 601 + 602 + card_detect_reg |= dev->card_detected ? 603 + R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; 604 + 605 + r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); 606 + } 607 + 608 + ssize_t r852_media_type_show(struct device *sys_dev, 609 + struct device_attribute *attr, char *buf) 610 + { 611 + struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); 612 + struct r852_device *dev = r852_get_dev(mtd); 613 + char *data = dev->sm ? "smartmedia" : "xd"; 614 + 615 + strcpy(buf, data); 616 + return strlen(data); 617 + } 618 + 619 + DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); 620 + 621 + 622 + /* Detect properties of card in slot */ 623 + void r852_update_media_status(struct r852_device *dev) 624 + { 625 + uint8_t reg; 626 + unsigned long flags; 627 + int readonly; 628 + 629 + spin_lock_irqsave(&dev->irqlock, flags); 630 + if (!dev->card_detected) { 631 + message("card removed"); 632 + spin_unlock_irqrestore(&dev->irqlock, flags); 633 + return ; 634 + } 635 + 636 + readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; 637 + reg = r852_read_reg(dev, R852_DMA_CAP); 638 + dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); 639 + 640 + message("detected %s %s card in slot", 641 + dev->sm ? "SmartMedia" : "xD", 642 + readonly ? "readonly" : "writeable"); 643 + 644 + dev->readonly = readonly; 645 + spin_unlock_irqrestore(&dev->irqlock, flags); 646 + } 647 + 648 + /* 649 + * Register the nand device 650 + * Called when the card is detected 651 + */ 652 + int r852_register_nand_device(struct r852_device *dev) 653 + { 654 + dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); 655 + 656 + if (!dev->mtd) 657 + goto error1; 658 + 659 + WARN_ON(dev->card_registred); 660 + 661 + dev->mtd->owner = THIS_MODULE; 662 + dev->mtd->priv = dev->chip; 663 + dev->mtd->dev.parent = &dev->pci_dev->dev; 664 + 665 + if (dev->readonly) 666 + dev->chip->options |= NAND_ROM; 667 + 668 + r852_engine_enable(dev); 669 + 670 + if (sm_register_device(dev->mtd, dev->sm)) 671 + goto error2; 672 + 673 + if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) 674 + message("can't create media type sysfs attribute"); 675 + 676 + dev->card_registred = 1; 677 + return 0; 678 + error2: 679 + kfree(dev->mtd); 680 + error1: 681 + /* Force card redetect */ 682 + dev->card_detected = 0; 683 + return -1; 684 + } 685 + 686 + /* 687 + * Unregister the card 688 + */ 689 + 690 + void r852_unregister_nand_device(struct r852_device *dev) 691 + { 692 + if (!dev->card_registred) 693 + return; 694 + 695 + device_remove_file(&dev->mtd->dev, &dev_attr_media_type); 696 + nand_release(dev->mtd); 697 + r852_engine_disable(dev); 698 + dev->card_registred = 0; 699 + kfree(dev->mtd); 700 + dev->mtd = NULL; 701 + } 702 + 703 + /* Card state updater */ 704 + void r852_card_detect_work(struct work_struct *work) 705 + { 706 + struct r852_device *dev = 707 + container_of(work, struct r852_device, card_detect_work.work); 708 + 709 + r852_card_update_present(dev); 710 + dev->card_unstable = 0; 711 + 712 + /* False alarm */ 713 + if (dev->card_detected == dev->card_registred) 714 + goto exit; 715 + 716 + /* Read media properties */ 717 + r852_update_media_status(dev); 718 + 719 + /* Register the card */ 720 + if (dev->card_detected) 721 + r852_register_nand_device(dev); 722 + else 723 + r852_unregister_nand_device(dev); 724 + exit: 725 + /* Update detection logic */ 726 + r852_update_card_detect(dev); 727 + } 728 + 729 + /* Ack + disable IRQ generation */ 730 + static void r852_disable_irqs(struct r852_device *dev) 731 + { 732 + uint8_t reg; 733 + reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); 734 + r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); 735 + 736 + reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); 737 + r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 738 + reg & ~R852_DMA_IRQ_MASK); 739 + 740 + r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); 741 + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); 742 + } 743 + 744 + /* Interrupt handler */ 745 + static irqreturn_t r852_irq(int irq, void *data) 746 + { 747 + struct r852_device *dev = (struct r852_device *)data; 748 + 749 + uint8_t card_status, dma_status; 750 + unsigned long flags; 751 + irqreturn_t ret = IRQ_NONE; 752 + 753 + spin_lock_irqsave(&dev->irqlock, flags); 754 + 755 + /* We can recieve shared interrupt while pci is suspended 756 + in that case reads will return 0xFFFFFFFF.... */ 757 + if (dev->insuspend) 758 + goto out; 759 + 760 + /* handle card detection interrupts first */ 761 + card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); 762 + r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); 763 + 764 + if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { 765 + 766 + ret = IRQ_HANDLED; 767 + dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); 768 + 769 + /* we shouldn't recieve any interrupts if we wait for card 770 + to settle */ 771 + WARN_ON(dev->card_unstable); 772 + 773 + /* disable irqs while card is unstable */ 774 + /* this will timeout DMA if active, but better that garbage */ 775 + r852_disable_irqs(dev); 776 + 777 + if (dev->card_unstable) 778 + goto out; 779 + 780 + /* let, card state to settle a bit, and then do the work */ 781 + dev->card_unstable = 1; 782 + queue_delayed_work(dev->card_workqueue, 783 + &dev->card_detect_work, msecs_to_jiffies(100)); 784 + goto out; 785 + } 786 + 787 + 788 + /* Handle dma interrupts */ 789 + dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); 790 + r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); 791 + 792 + if (dma_status & R852_DMA_IRQ_MASK) { 793 + 794 + ret = IRQ_HANDLED; 795 + 796 + if (dma_status & R852_DMA_IRQ_ERROR) { 797 + dbg("recieved dma error IRQ"); 798 + r852_dma_done(dev, -EIO); 799 + goto out; 800 + } 801 + 802 + /* recieved DMA interrupt out of nowhere? */ 803 + WARN_ON_ONCE(dev->dma_stage == 0); 804 + 805 + if (dev->dma_stage == 0) 806 + goto out; 807 + 808 + /* done device access */ 809 + if (dev->dma_state == DMA_INTERNAL && 810 + (dma_status & R852_DMA_IRQ_INTERNAL)) { 811 + 812 + dev->dma_state = DMA_MEMORY; 813 + dev->dma_stage++; 814 + } 815 + 816 + /* done memory DMA */ 817 + if (dev->dma_state == DMA_MEMORY && 818 + (dma_status & R852_DMA_IRQ_MEMORY)) { 819 + dev->dma_state = DMA_INTERNAL; 820 + dev->dma_stage++; 821 + } 822 + 823 + /* Enable 2nd half of dma dance */ 824 + if (dev->dma_stage == 2) 825 + r852_dma_enable(dev); 826 + 827 + /* Operation done */ 828 + if (dev->dma_stage == 3) 829 + r852_dma_done(dev, 0); 830 + goto out; 831 + } 832 + 833 + /* Handle unknown interrupts */ 834 + if (dma_status) 835 + dbg("bad dma IRQ status = %x", dma_status); 836 + 837 + if (card_status & ~R852_CARD_STA_CD) 838 + dbg("strange card status = %x", card_status); 839 + 840 + out: 841 + spin_unlock_irqrestore(&dev->irqlock, flags); 842 + return ret; 843 + } 844 + 845 + int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 846 + { 847 + int error; 848 + struct nand_chip *chip; 849 + struct r852_device *dev; 850 + 851 + /* pci initialization */ 852 + error = pci_enable_device(pci_dev); 853 + 854 + if (error) 855 + goto error1; 856 + 857 + pci_set_master(pci_dev); 858 + 859 + error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 860 + if (error) 861 + goto error2; 862 + 863 + error = pci_request_regions(pci_dev, DRV_NAME); 864 + 865 + if (error) 866 + goto error3; 867 + 868 + error = -ENOMEM; 869 + 870 + /* init nand chip, but register it only on card insert */ 871 + chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); 872 + 873 + if (!chip) 874 + goto error4; 875 + 876 + /* commands */ 877 + chip->cmd_ctrl = r852_cmdctl; 878 + chip->waitfunc = r852_wait; 879 + chip->dev_ready = r852_ready; 880 + 881 + /* I/O */ 882 + chip->read_byte = r852_read_byte; 883 + chip->read_buf = r852_read_buf; 884 + chip->write_buf = r852_write_buf; 885 + chip->verify_buf = r852_verify_buf; 886 + 887 + /* ecc */ 888 + chip->ecc.mode = NAND_ECC_HW_SYNDROME; 889 + chip->ecc.size = R852_DMA_LEN; 890 + chip->ecc.bytes = SM_OOB_SIZE; 891 + chip->ecc.hwctl = r852_ecc_hwctl; 892 + chip->ecc.calculate = r852_ecc_calculate; 893 + chip->ecc.correct = r852_ecc_correct; 894 + 895 + /* TODO: hack */ 896 + chip->ecc.read_oob = r852_read_oob; 897 + 898 + /* init our device structure */ 899 + dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); 900 + 901 + if (!dev) 902 + goto error5; 903 + 904 + chip->priv = dev; 905 + dev->chip = chip; 906 + dev->pci_dev = pci_dev; 907 + pci_set_drvdata(pci_dev, dev); 908 + 909 + dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, 910 + &dev->phys_bounce_buffer); 911 + 912 + if (!dev->bounce_buffer) 913 + goto error6; 914 + 915 + 916 + error = -ENODEV; 917 + dev->mmio = pci_ioremap_bar(pci_dev, 0); 918 + 919 + if (!dev->mmio) 920 + goto error7; 921 + 922 + error = -ENOMEM; 923 + dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 924 + 925 + if (!dev->tmp_buffer) 926 + goto error8; 927 + 928 + init_completion(&dev->dma_done); 929 + 930 + dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 931 + 932 + if (!dev->card_workqueue) 933 + goto error9; 934 + 935 + INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); 936 + 937 + /* shutdown everything - precation */ 938 + r852_engine_disable(dev); 939 + r852_disable_irqs(dev); 940 + 941 + r852_dma_test(dev); 942 + 943 + /*register irq handler*/ 944 + error = -ENODEV; 945 + if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, 946 + DRV_NAME, dev)) 947 + goto error10; 948 + 949 + dev->irq = pci_dev->irq; 950 + spin_lock_init(&dev->irqlock); 951 + 952 + /* kick initial present test */ 953 + dev->card_detected = 0; 954 + r852_card_update_present(dev); 955 + queue_delayed_work(dev->card_workqueue, 956 + &dev->card_detect_work, 0); 957 + 958 + 959 + printk(KERN_NOTICE DRV_NAME ": driver loaded succesfully\n"); 960 + return 0; 961 + 962 + error10: 963 + destroy_workqueue(dev->card_workqueue); 964 + error9: 965 + kfree(dev->tmp_buffer); 966 + error8: 967 + pci_iounmap(pci_dev, dev->mmio); 968 + error7: 969 + pci_free_consistent(pci_dev, R852_DMA_LEN, 970 + dev->bounce_buffer, dev->phys_bounce_buffer); 971 + error6: 972 + kfree(dev); 973 + error5: 974 + kfree(chip); 975 + error4: 976 + pci_release_regions(pci_dev); 977 + error3: 978 + error2: 979 + pci_disable_device(pci_dev); 980 + error1: 981 + return error; 982 + } 983 + 984 + void r852_remove(struct pci_dev *pci_dev) 985 + { 986 + struct r852_device *dev = pci_get_drvdata(pci_dev); 987 + 988 + /* Stop detect workqueue - 989 + we are going to unregister the device anyway*/ 990 + cancel_delayed_work_sync(&dev->card_detect_work); 991 + destroy_workqueue(dev->card_workqueue); 992 + 993 + /* Unregister the device, this might make more IO */ 994 + r852_unregister_nand_device(dev); 995 + 996 + /* Stop interrupts */ 997 + r852_disable_irqs(dev); 998 + synchronize_irq(dev->irq); 999 + free_irq(dev->irq, dev); 1000 + 1001 + /* Cleanup */ 1002 + kfree(dev->tmp_buffer); 1003 + pci_iounmap(pci_dev, dev->mmio); 1004 + pci_free_consistent(pci_dev, R852_DMA_LEN, 1005 + dev->bounce_buffer, dev->phys_bounce_buffer); 1006 + 1007 + kfree(dev->chip); 1008 + kfree(dev); 1009 + 1010 + /* Shutdown the PCI device */ 1011 + pci_release_regions(pci_dev); 1012 + pci_disable_device(pci_dev); 1013 + } 1014 + 1015 + void r852_shutdown(struct pci_dev *pci_dev) 1016 + { 1017 + struct r852_device *dev = pci_get_drvdata(pci_dev); 1018 + 1019 + cancel_delayed_work_sync(&dev->card_detect_work); 1020 + r852_disable_irqs(dev); 1021 + synchronize_irq(dev->irq); 1022 + pci_disable_device(pci_dev); 1023 + } 1024 + 1025 + #ifdef CONFIG_PM 1026 + int r852_suspend(struct device *device) 1027 + { 1028 + struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1029 + unsigned long flags; 1030 + 1031 + if (dev->ctlreg & R852_CTL_CARDENABLE) 1032 + return -EBUSY; 1033 + 1034 + /* First make sure the detect work is gone */ 1035 + cancel_delayed_work_sync(&dev->card_detect_work); 1036 + 1037 + /* Turn off the interrupts and stop the device */ 1038 + r852_disable_irqs(dev); 1039 + r852_engine_disable(dev); 1040 + 1041 + spin_lock_irqsave(&dev->irqlock, flags); 1042 + dev->insuspend = 1; 1043 + spin_unlock_irqrestore(&dev->irqlock, flags); 1044 + 1045 + /* At that point, even if interrupt handler is running, it will quit */ 1046 + /* So wait for this to happen explictly */ 1047 + synchronize_irq(dev->irq); 1048 + 1049 + /* If card was pulled off just during the suspend, which is very 1050 + unlikely, we will remove it on resume, it too late now 1051 + anyway... */ 1052 + dev->card_unstable = 0; 1053 + 1054 + pci_save_state(to_pci_dev(device)); 1055 + return pci_prepare_to_sleep(to_pci_dev(device)); 1056 + } 1057 + 1058 + int r852_resume(struct device *device) 1059 + { 1060 + struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); 1061 + unsigned long flags; 1062 + 1063 + /* Turn on the hardware */ 1064 + pci_back_from_sleep(to_pci_dev(device)); 1065 + pci_restore_state(to_pci_dev(device)); 1066 + 1067 + r852_disable_irqs(dev); 1068 + r852_card_update_present(dev); 1069 + r852_engine_disable(dev); 1070 + 1071 + 1072 + /* Now its safe for IRQ to run */ 1073 + spin_lock_irqsave(&dev->irqlock, flags); 1074 + dev->insuspend = 0; 1075 + spin_unlock_irqrestore(&dev->irqlock, flags); 1076 + 1077 + 1078 + /* If card status changed, just do the work */ 1079 + if (dev->card_detected != dev->card_registred) { 1080 + dbg("card was %s during low power state", 1081 + dev->card_detected ? "added" : "removed"); 1082 + 1083 + queue_delayed_work(dev->card_workqueue, 1084 + &dev->card_detect_work, 1000); 1085 + return 0; 1086 + } 1087 + 1088 + /* Otherwise, initialize the card */ 1089 + if (dev->card_registred) { 1090 + r852_engine_enable(dev); 1091 + dev->chip->select_chip(dev->mtd, 0); 1092 + dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); 1093 + dev->chip->select_chip(dev->mtd, -1); 1094 + } 1095 + 1096 + /* Program card detection IRQ */ 1097 + r852_update_card_detect(dev); 1098 + return 0; 1099 + } 1100 + #else 1101 + #define r852_suspend NULL 1102 + #define r852_resume NULL 1103 + #endif 1104 + 1105 + static const struct pci_device_id r852_pci_id_tbl[] = { 1106 + 1107 + { PCI_VDEVICE(RICOH, 0x0852), }, 1108 + { }, 1109 + }; 1110 + 1111 + MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); 1112 + 1113 + SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); 1114 + 1115 + 1116 + static struct pci_driver r852_pci_driver = { 1117 + .name = DRV_NAME, 1118 + .id_table = r852_pci_id_tbl, 1119 + .probe = r852_probe, 1120 + .remove = r852_remove, 1121 + .shutdown = r852_shutdown, 1122 + .driver.pm = &r852_pm_ops, 1123 + }; 1124 + 1125 + static __init int r852_module_init(void) 1126 + { 1127 + return pci_register_driver(&r852_pci_driver); 1128 + } 1129 + 1130 + static void __exit r852_module_exit(void) 1131 + { 1132 + pci_unregister_driver(&r852_pci_driver); 1133 + } 1134 + 1135 + module_init(r852_module_init); 1136 + module_exit(r852_module_exit); 1137 + 1138 + MODULE_LICENSE("GPL"); 1139 + MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1140 + MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
+163
drivers/mtd/nand/r852.h
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * driver for Ricoh xD readers 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/pci.h> 11 + #include <linux/completion.h> 12 + #include <linux/workqueue.h> 13 + #include <linux/mtd/nand.h> 14 + #include <linux/spinlock.h> 15 + 16 + 17 + /* nand interface + ecc 18 + byte write/read does one cycle on nand data lines. 19 + dword write/read does 4 cycles 20 + if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads 21 + results of ecc correction, if DMA read was done before. 22 + If write was done two dword reads read generated ecc checksums 23 + */ 24 + #define R852_DATALINE 0x00 25 + 26 + /* control register */ 27 + #define R852_CTL 0x04 28 + #define R852_CTL_COMMAND 0x01 /* send command (#CLE)*/ 29 + #define R852_CTL_DATA 0x02 /* read/write data (#ALE)*/ 30 + #define R852_CTL_ON 0x04 /* only seem to controls the hd led, */ 31 + /* but has to be set on start...*/ 32 + #define R852_CTL_RESET 0x08 /* unknown, set only on start once*/ 33 + #define R852_CTL_CARDENABLE 0x10 /* probably (#CE) - always set*/ 34 + #define R852_CTL_ECC_ENABLE 0x20 /* enable ecc engine */ 35 + #define R852_CTL_ECC_ACCESS 0x40 /* read/write ecc via reg #0*/ 36 + #define R852_CTL_WRITE 0x80 /* set when performing writes (#WP) */ 37 + 38 + /* card detection status */ 39 + #define R852_CARD_STA 0x05 40 + 41 + #define R852_CARD_STA_CD 0x01 /* state of #CD line, same as 0x04 */ 42 + #define R852_CARD_STA_RO 0x02 /* card is readonly */ 43 + #define R852_CARD_STA_PRESENT 0x04 /* card is present (#CD) */ 44 + #define R852_CARD_STA_ABSENT 0x08 /* card is absent */ 45 + #define R852_CARD_STA_BUSY 0x80 /* card is busy - (#R/B) */ 46 + 47 + /* card detection irq status & enable*/ 48 + #define R852_CARD_IRQ_STA 0x06 /* IRQ status */ 49 + #define R852_CARD_IRQ_ENABLE 0x07 /* IRQ enable */ 50 + 51 + #define R852_CARD_IRQ_CD 0x01 /* fire when #CD lights, same as 0x04*/ 52 + #define R852_CARD_IRQ_REMOVE 0x04 /* detect card removal */ 53 + #define R852_CARD_IRQ_INSERT 0x08 /* detect card insert */ 54 + #define R852_CARD_IRQ_UNK1 0x10 /* unknown */ 55 + #define R852_CARD_IRQ_GENABLE 0x80 /* general enable */ 56 + #define R852_CARD_IRQ_MASK 0x1D 57 + 58 + 59 + 60 + /* hardware enable */ 61 + #define R852_HW 0x08 62 + #define R852_HW_ENABLED 0x01 /* hw enabled */ 63 + #define R852_HW_UNKNOWN 0x80 64 + 65 + 66 + /* dma capabilities */ 67 + #define R852_DMA_CAP 0x09 68 + #define R852_SMBIT 0x20 /* if set with bit #6 or bit #7, then */ 69 + /* hw is smartmedia */ 70 + #define R852_DMA1 0x40 /* if set w/bit #7, dma is supported */ 71 + #define R852_DMA2 0x80 /* if set w/bit #6, dma is supported */ 72 + 73 + 74 + /* physical DMA address - 32 bit value*/ 75 + #define R852_DMA_ADDR 0x0C 76 + 77 + 78 + /* dma settings */ 79 + #define R852_DMA_SETTINGS 0x10 80 + #define R852_DMA_MEMORY 0x01 /* (memory <-> internal hw buffer) */ 81 + #define R852_DMA_READ 0x02 /* 0 = write, 1 = read */ 82 + #define R852_DMA_INTERNAL 0x04 /* (internal hw buffer <-> card) */ 83 + 84 + /* dma IRQ status */ 85 + #define R852_DMA_IRQ_STA 0x14 86 + 87 + /* dma IRQ enable */ 88 + #define R852_DMA_IRQ_ENABLE 0x18 89 + 90 + #define R852_DMA_IRQ_MEMORY 0x01 /* (memory <-> internal hw buffer) */ 91 + #define R852_DMA_IRQ_ERROR 0x02 /* error did happen */ 92 + #define R852_DMA_IRQ_INTERNAL 0x04 /* (internal hw buffer <-> card) */ 93 + #define R852_DMA_IRQ_MASK 0x07 /* mask of all IRQ bits */ 94 + 95 + 96 + /* ECC syndrome format - read from reg #0 will return two copies of these for 97 + each half of the page. 98 + first byte is error byte location, and second, bit location + flags */ 99 + #define R852_ECC_ERR_BIT_MSK 0x07 /* error bit location */ 100 + #define R852_ECC_CORRECT 0x10 /* no errors - (guessed) */ 101 + #define R852_ECC_CORRECTABLE 0x20 /* correctable error exist */ 102 + #define R852_ECC_FAIL 0x40 /* non correctable error detected */ 103 + 104 + #define R852_DMA_LEN 512 105 + 106 + #define DMA_INTERNAL 0 107 + #define DMA_MEMORY 1 108 + 109 + struct r852_device { 110 + void __iomem *mmio; /* mmio */ 111 + struct mtd_info *mtd; /* mtd backpointer */ 112 + struct nand_chip *chip; /* nand chip backpointer */ 113 + struct pci_dev *pci_dev; /* pci backpointer */ 114 + 115 + /* dma area */ 116 + dma_addr_t phys_dma_addr; /* bus address of buffer*/ 117 + struct completion dma_done; /* data transfer done */ 118 + 119 + dma_addr_t phys_bounce_buffer; /* bus address of bounce buffer */ 120 + uint8_t *bounce_buffer; /* virtual address of bounce buffer */ 121 + 122 + int dma_dir; /* 1 = read, 0 = write */ 123 + int dma_stage; /* 0 - idle, 1 - first step, 124 + 2 - second step */ 125 + 126 + int dma_state; /* 0 = internal, 1 = memory */ 127 + int dma_error; /* dma errors */ 128 + int dma_usable; /* is it possible to use dma */ 129 + 130 + /* card status area */ 131 + struct delayed_work card_detect_work; 132 + struct workqueue_struct *card_workqueue; 133 + int card_registred; /* card registered with mtd */ 134 + int card_detected; /* card detected in slot */ 135 + int card_unstable; /* whenever the card is inserted, 136 + is not known yet */ 137 + int readonly; /* card is readonly */ 138 + int sm; /* Is card smartmedia */ 139 + 140 + /* interrupt handling */ 141 + spinlock_t irqlock; /* IRQ protecting lock */ 142 + int irq; /* irq num */ 143 + int insuspend; /* device is suspended */ 144 + 145 + /* misc */ 146 + void *tmp_buffer; /* temporary buffer */ 147 + uint8_t ctlreg; /* cached contents of control reg */ 148 + }; 149 + 150 + #define DRV_NAME "r852" 151 + 152 + 153 + #define dbg(format, ...) \ 154 + if (debug) \ 155 + printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) 156 + 157 + #define dbg_verbose(format, ...) \ 158 + if (debug > 1) \ 159 + printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__) 160 + 161 + 162 + #define message(format, ...) \ 163 + printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
+5 -7
drivers/mtd/nand/s3c2410.c
··· 929 929 930 930 pr_debug("s3c2410_nand_probe(%p)\n", pdev); 931 931 932 - info = kmalloc(sizeof(*info), GFP_KERNEL); 932 + info = kzalloc(sizeof(*info), GFP_KERNEL); 933 933 if (info == NULL) { 934 934 dev_err(&pdev->dev, "no memory for flash info\n"); 935 935 err = -ENOMEM; 936 936 goto exit_error; 937 937 } 938 938 939 - memset(info, 0, sizeof(*info)); 940 939 platform_set_drvdata(pdev, info); 941 940 942 941 spin_lock_init(&info->controller.lock); ··· 956 957 957 958 /* currently we assume we have the one resource */ 958 959 res = pdev->resource; 959 - size = res->end - res->start + 1; 960 + size = resource_size(res); 960 961 961 962 info->area = request_mem_region(res->start, size, pdev->name); 962 963 ··· 993 994 /* allocate our information */ 994 995 995 996 size = nr_sets * sizeof(*info->mtds); 996 - info->mtds = kmalloc(size, GFP_KERNEL); 997 + info->mtds = kzalloc(size, GFP_KERNEL); 997 998 if (info->mtds == NULL) { 998 999 dev_err(&pdev->dev, "failed to allocate mtd storage\n"); 999 1000 err = -ENOMEM; 1000 1001 goto exit_error; 1001 1002 } 1002 - 1003 - memset(info->mtds, 0, size); 1004 1003 1005 1004 /* initialise all possible chips */ 1006 1005 ··· 1010 1013 s3c2410_nand_init_chip(info, nmtd, sets); 1011 1014 1012 1015 nmtd->scan_res = nand_scan_ident(&nmtd->mtd, 1013 - (sets) ? sets->nr_chips : 1); 1016 + (sets) ? sets->nr_chips : 1, 1017 + NULL); 1014 1018 1015 1019 if (nmtd->scan_res == 0) { 1016 1020 s3c2410_nand_update_chip(info, nmtd);
+1 -1
drivers/mtd/nand/sh_flctl.c
··· 855 855 nand->read_word = flctl_read_word; 856 856 } 857 857 858 - ret = nand_scan_ident(flctl_mtd, 1); 858 + ret = nand_scan_ident(flctl_mtd, 1, NULL); 859 859 if (ret) 860 860 goto err; 861 861
+148
drivers/mtd/nand/sm_common.c
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * Common routines & support for xD format 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + #include <linux/kernel.h> 10 + #include <linux/mtd/nand.h> 11 + #include "sm_common.h" 12 + 13 + static struct nand_ecclayout nand_oob_sm = { 14 + .eccbytes = 6, 15 + .eccpos = {8, 9, 10, 13, 14, 15}, 16 + .oobfree = { 17 + {.offset = 0 , .length = 4}, /* reserved */ 18 + {.offset = 6 , .length = 2}, /* LBA1 */ 19 + {.offset = 11, .length = 2} /* LBA2 */ 20 + } 21 + }; 22 + 23 + /* NOTE: This layout is is not compatabable with SmartMedia, */ 24 + /* because the 256 byte devices have page depenent oob layout */ 25 + /* However it does preserve the bad block markers */ 26 + /* If you use smftl, it will bypass this and work correctly */ 27 + /* If you not, then you break SmartMedia compliance anyway */ 28 + 29 + static struct nand_ecclayout nand_oob_sm_small = { 30 + .eccbytes = 3, 31 + .eccpos = {0, 1, 2}, 32 + .oobfree = { 33 + {.offset = 3 , .length = 2}, /* reserved */ 34 + {.offset = 6 , .length = 2}, /* LBA1 */ 35 + } 36 + }; 37 + 38 + 39 + static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) 40 + { 41 + struct mtd_oob_ops ops; 42 + struct sm_oob oob; 43 + int ret, error = 0; 44 + 45 + memset(&oob, -1, SM_OOB_SIZE); 46 + oob.block_status = 0x0F; 47 + 48 + /* As long as this function is called on erase block boundaries 49 + it will work correctly for 256 byte nand */ 50 + ops.mode = MTD_OOB_PLACE; 51 + ops.ooboffs = 0; 52 + ops.ooblen = mtd->oobsize; 53 + ops.oobbuf = (void *)&oob; 54 + ops.datbuf = NULL; 55 + 56 + 57 + ret = mtd->write_oob(mtd, ofs, &ops); 58 + if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { 59 + printk(KERN_NOTICE 60 + "sm_common: can't mark sector at %i as bad\n", 61 + (int)ofs); 62 + error = -EIO; 63 + } else 64 + mtd->ecc_stats.badblocks++; 65 + 66 + return error; 67 + } 68 + 69 + 70 + static struct nand_flash_dev nand_smartmedia_flash_ids[] = { 71 + {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0}, 72 + {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0}, 73 + {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0}, 74 + {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0}, 75 + {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0}, 76 + {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM}, 77 + {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0}, 78 + {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0}, 79 + {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0}, 80 + {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM}, 81 + {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0}, 82 + {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM}, 83 + {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, 84 + {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM}, 85 + {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, 86 + {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM}, 87 + {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, 88 + {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM}, 89 + {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, 90 + {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM}, 91 + {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 }, 92 + {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM}, 93 + {NULL,} 94 + }; 95 + 96 + #define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD) 97 + static struct nand_flash_dev nand_xd_flash_ids[] = { 98 + 99 + {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, 100 + {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, 101 + {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, 102 + {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, 103 + {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, 104 + {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, 105 + {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, 106 + {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, 107 + {NULL,} 108 + }; 109 + 110 + int sm_register_device(struct mtd_info *mtd, int smartmedia) 111 + { 112 + struct nand_chip *chip = (struct nand_chip *)mtd->priv; 113 + int ret; 114 + 115 + chip->options |= NAND_SKIP_BBTSCAN; 116 + 117 + /* Scan for card properties */ 118 + ret = nand_scan_ident(mtd, 1, smartmedia ? 119 + nand_smartmedia_flash_ids : nand_xd_flash_ids); 120 + 121 + if (ret) 122 + return ret; 123 + 124 + /* Bad block marker postion */ 125 + chip->badblockpos = 0x05; 126 + chip->badblockbits = 7; 127 + chip->block_markbad = sm_block_markbad; 128 + 129 + /* ECC layout */ 130 + if (mtd->writesize == SM_SECTOR_SIZE) 131 + chip->ecc.layout = &nand_oob_sm; 132 + else if (mtd->writesize == SM_SMALL_PAGE) 133 + chip->ecc.layout = &nand_oob_sm_small; 134 + else 135 + return -ENODEV; 136 + 137 + ret = nand_scan_tail(mtd); 138 + 139 + if (ret) 140 + return ret; 141 + 142 + return add_mtd_device(mtd); 143 + } 144 + EXPORT_SYMBOL_GPL(sm_register_device); 145 + 146 + MODULE_LICENSE("GPL"); 147 + MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 148 + MODULE_DESCRIPTION("Common SmartMedia/xD functions");
+61
drivers/mtd/nand/sm_common.h
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * Common routines & support for SmartMedia/xD format 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + #include <linux/bitops.h> 10 + #include <linux/mtd/mtd.h> 11 + 12 + /* Full oob structure as written on the flash */ 13 + struct sm_oob { 14 + uint32_t reserved; 15 + uint8_t data_status; 16 + uint8_t block_status; 17 + uint8_t lba_copy1[2]; 18 + uint8_t ecc2[3]; 19 + uint8_t lba_copy2[2]; 20 + uint8_t ecc1[3]; 21 + } __attribute__((packed)); 22 + 23 + 24 + /* one sector is always 512 bytes, but it can consist of two nand pages */ 25 + #define SM_SECTOR_SIZE 512 26 + 27 + /* oob area is also 16 bytes, but might be from two pages */ 28 + #define SM_OOB_SIZE 16 29 + 30 + /* This is maximum zone size, and all devices that have more that one zone 31 + have this size */ 32 + #define SM_MAX_ZONE_SIZE 1024 33 + 34 + /* support for small page nand */ 35 + #define SM_SMALL_PAGE 256 36 + #define SM_SMALL_OOB_SIZE 8 37 + 38 + 39 + extern int sm_register_device(struct mtd_info *mtd, int smartmedia); 40 + 41 + 42 + static inline int sm_sector_valid(struct sm_oob *oob) 43 + { 44 + return hweight16(oob->data_status) >= 5; 45 + } 46 + 47 + static inline int sm_block_valid(struct sm_oob *oob) 48 + { 49 + return hweight16(oob->block_status) >= 7; 50 + } 51 + 52 + static inline int sm_block_erased(struct sm_oob *oob) 53 + { 54 + static const uint32_t erased_pattern[4] = { 55 + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; 56 + 57 + /* First test for erased block */ 58 + if (!memcmp(oob, erased_pattern, sizeof(*oob))) 59 + return 1; 60 + return 0; 61 + }
+2 -2
drivers/mtd/nand/socrates_nand.c
··· 220 220 dev_set_drvdata(&ofdev->dev, host); 221 221 222 222 /* first scan to find the device and get the page size */ 223 - if (nand_scan_ident(mtd, 1)) { 223 + if (nand_scan_ident(mtd, 1, NULL)) { 224 224 res = -ENXIO; 225 225 goto out; 226 226 } ··· 290 290 return 0; 291 291 } 292 292 293 - static struct of_device_id socrates_nand_match[] = 293 + static const struct of_device_id socrates_nand_match[] = 294 294 { 295 295 { 296 296 .compatible = "abb,socrates-nand",
+7 -7
drivers/mtd/nand/tmio_nand.c
··· 319 319 320 320 static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) 321 321 { 322 - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 322 + struct mfd_cell *cell = dev_get_platdata(&dev->dev); 323 323 int ret; 324 324 325 325 if (cell->enable) { ··· 363 363 364 364 static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) 365 365 { 366 - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 366 + struct mfd_cell *cell = dev_get_platdata(&dev->dev); 367 367 368 368 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); 369 369 if (cell->disable) ··· 372 372 373 373 static int tmio_probe(struct platform_device *dev) 374 374 { 375 - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 375 + struct mfd_cell *cell = dev_get_platdata(&dev->dev); 376 376 struct tmio_nand_data *data = cell->driver_data; 377 377 struct resource *fcr = platform_get_resource(dev, 378 378 IORESOURCE_MEM, 0); ··· 405 405 mtd->priv = nand_chip; 406 406 mtd->name = "tmio-nand"; 407 407 408 - tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1); 408 + tmio->ccr = ioremap(ccr->start, resource_size(ccr)); 409 409 if (!tmio->ccr) { 410 410 retval = -EIO; 411 411 goto err_iomap_ccr; 412 412 } 413 413 414 414 tmio->fcr_base = fcr->start & 0xfffff; 415 - tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1); 415 + tmio->fcr = ioremap(fcr->start, resource_size(fcr)); 416 416 if (!tmio->fcr) { 417 417 retval = -EIO; 418 418 goto err_iomap_fcr; ··· 516 516 #ifdef CONFIG_PM 517 517 static int tmio_suspend(struct platform_device *dev, pm_message_t state) 518 518 { 519 - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 519 + struct mfd_cell *cell = dev_get_platdata(&dev->dev); 520 520 521 521 if (cell->suspend) 522 522 cell->suspend(dev); ··· 527 527 528 528 static int tmio_resume(struct platform_device *dev) 529 529 { 530 - struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data; 530 + struct mfd_cell *cell = dev_get_platdata(&dev->dev); 531 531 532 532 /* FIXME - is this required or merely another attack of the broken 533 533 * SHARP platform? Looks suspicious.
-207
drivers/mtd/nand/ts7250.c
··· 1 - /* 2 - * drivers/mtd/nand/ts7250.c 3 - * 4 - * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com) 5 - * 6 - * Derived from drivers/mtd/nand/edb7312.c 7 - * Copyright (C) 2004 Marius Gröger (mag@sysgo.de) 8 - * 9 - * Derived from drivers/mtd/nand/autcpu12.c 10 - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of the GNU General Public License version 2 as 14 - * published by the Free Software Foundation. 15 - * 16 - * Overview: 17 - * This is a device driver for the NAND flash device found on the 18 - * TS-7250 board which utilizes a Samsung 32 Mbyte part. 19 - */ 20 - 21 - #include <linux/slab.h> 22 - #include <linux/module.h> 23 - #include <linux/init.h> 24 - #include <linux/mtd/mtd.h> 25 - #include <linux/mtd/nand.h> 26 - #include <linux/mtd/partitions.h> 27 - #include <linux/io.h> 28 - 29 - #include <mach/hardware.h> 30 - #include <mach/ts72xx.h> 31 - 32 - #include <asm/sizes.h> 33 - #include <asm/mach-types.h> 34 - 35 - /* 36 - * MTD structure for TS7250 board 37 - */ 38 - static struct mtd_info *ts7250_mtd = NULL; 39 - 40 - #ifdef CONFIG_MTD_PARTITIONS 41 - static const char *part_probes[] = { "cmdlinepart", NULL }; 42 - 43 - #define NUM_PARTITIONS 3 44 - 45 - /* 46 - * Define static partitions for flash device 47 - */ 48 - static struct mtd_partition partition_info32[] = { 49 - { 50 - .name = "TS-BOOTROM", 51 - .offset = 0x00000000, 52 - .size = 0x00004000, 53 - }, { 54 - .name = "Linux", 55 - .offset = 0x00004000, 56 - .size = 0x01d00000, 57 - }, { 58 - .name = "RedBoot", 59 - .offset = 0x01d04000, 60 - .size = 0x002fc000, 61 - }, 62 - }; 63 - 64 - /* 65 - * Define static partitions for flash device 66 - */ 67 - static struct mtd_partition partition_info128[] = { 68 - { 69 - .name = "TS-BOOTROM", 70 - .offset = 0x00000000, 71 - .size = 0x00004000, 72 - }, { 73 - .name = "Linux", 74 - .offset = 0x00004000, 75 - .size = 0x07d00000, 76 - }, { 77 - .name = "RedBoot", 78 - .offset = 0x07d04000, 79 - .size = 0x002fc000, 80 - }, 81 - }; 82 - #endif 83 - 84 - 85 - /* 86 - * hardware specific access to control-lines 87 - * 88 - * ctrl: 89 - * NAND_NCE: bit 0 -> bit 2 90 - * NAND_CLE: bit 1 -> bit 1 91 - * NAND_ALE: bit 2 -> bit 0 92 - */ 93 - static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 94 - { 95 - struct nand_chip *chip = mtd->priv; 96 - 97 - if (ctrl & NAND_CTRL_CHANGE) { 98 - unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE; 99 - unsigned char bits; 100 - 101 - bits = (ctrl & NAND_NCE) << 2; 102 - bits |= ctrl & NAND_CLE; 103 - bits |= (ctrl & NAND_ALE) >> 2; 104 - 105 - __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr); 106 - } 107 - 108 - if (cmd != NAND_CMD_NONE) 109 - writeb(cmd, chip->IO_ADDR_W); 110 - } 111 - 112 - /* 113 - * read device ready pin 114 - */ 115 - static int ts7250_device_ready(struct mtd_info *mtd) 116 - { 117 - return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20; 118 - } 119 - 120 - /* 121 - * Main initialization routine 122 - */ 123 - static int __init ts7250_init(void) 124 - { 125 - struct nand_chip *this; 126 - const char *part_type = 0; 127 - int mtd_parts_nb = 0; 128 - struct mtd_partition *mtd_parts = 0; 129 - 130 - if (!machine_is_ts72xx() || board_is_ts7200()) 131 - return -ENXIO; 132 - 133 - /* Allocate memory for MTD device structure and private data */ 134 - ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); 135 - if (!ts7250_mtd) { 136 - printk("Unable to allocate TS7250 NAND MTD device structure.\n"); 137 - return -ENOMEM; 138 - } 139 - 140 - /* Get pointer to private data */ 141 - this = (struct nand_chip *)(&ts7250_mtd[1]); 142 - 143 - /* Initialize structures */ 144 - memset(ts7250_mtd, 0, sizeof(struct mtd_info)); 145 - memset(this, 0, sizeof(struct nand_chip)); 146 - 147 - /* Link the private data with the MTD structure */ 148 - ts7250_mtd->priv = this; 149 - ts7250_mtd->owner = THIS_MODULE; 150 - 151 - /* insert callbacks */ 152 - this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE; 153 - this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE; 154 - this->cmd_ctrl = ts7250_hwcontrol; 155 - this->dev_ready = ts7250_device_ready; 156 - this->chip_delay = 15; 157 - this->ecc.mode = NAND_ECC_SOFT; 158 - 159 - printk("Searching for NAND flash...\n"); 160 - /* Scan to find existence of the device */ 161 - if (nand_scan(ts7250_mtd, 1)) { 162 - kfree(ts7250_mtd); 163 - return -ENXIO; 164 - } 165 - #ifdef CONFIG_MTD_PARTITIONS 166 - ts7250_mtd->name = "ts7250-nand"; 167 - mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0); 168 - if (mtd_parts_nb > 0) 169 - part_type = "command line"; 170 - else 171 - mtd_parts_nb = 0; 172 - #endif 173 - if (mtd_parts_nb == 0) { 174 - mtd_parts = partition_info32; 175 - if (ts7250_mtd->size >= (128 * 0x100000)) 176 - mtd_parts = partition_info128; 177 - mtd_parts_nb = NUM_PARTITIONS; 178 - part_type = "static"; 179 - } 180 - 181 - /* Register the partitions */ 182 - printk(KERN_NOTICE "Using %s partition definition\n", part_type); 183 - add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb); 184 - 185 - /* Return happy */ 186 - return 0; 187 - } 188 - 189 - module_init(ts7250_init); 190 - 191 - /* 192 - * Clean up routine 193 - */ 194 - static void __exit ts7250_cleanup(void) 195 - { 196 - /* Unregister the device */ 197 - del_mtd_device(ts7250_mtd); 198 - 199 - /* Free the MTD device structure */ 200 - kfree(ts7250_mtd); 201 - } 202 - 203 - module_exit(ts7250_cleanup); 204 - 205 - MODULE_LICENSE("GPL"); 206 - MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>"); 207 - MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
+1 -1
drivers/mtd/nand/txx9ndfmc.c
··· 274 274 struct nand_chip *chip = mtd->priv; 275 275 int ret; 276 276 277 - ret = nand_scan_ident(mtd, 1); 277 + ret = nand_scan_ident(mtd, 1, NULL); 278 278 if (!ret) { 279 279 if (mtd->writesize >= 512) { 280 280 chip->ecc.size = mtd->writesize;
+71 -71
drivers/mtd/nand/w90p910_nand.c drivers/mtd/nand/nuc900_nand.c
··· 1 1 /* 2 - * Copyright (c) 2009 Nuvoton technology corporation. 2 + * Copyright © 2009 Nuvoton technology corporation. 3 3 * 4 4 * Wan ZongShun <mcuos.com@gmail.com> 5 5 * ··· 55 55 #define write_addr_reg(dev, val) \ 56 56 __raw_writel((val), (dev)->reg + REG_SMADDR) 57 57 58 - struct w90p910_nand { 58 + struct nuc900_nand { 59 59 struct mtd_info mtd; 60 60 struct nand_chip chip; 61 61 void __iomem *reg; ··· 76 76 } 77 77 }; 78 78 79 - static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd) 79 + static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd) 80 80 { 81 81 unsigned char ret; 82 - struct w90p910_nand *nand; 82 + struct nuc900_nand *nand; 83 83 84 - nand = container_of(mtd, struct w90p910_nand, mtd); 84 + nand = container_of(mtd, struct nuc900_nand, mtd); 85 85 86 86 ret = (unsigned char)read_data_reg(nand); 87 87 88 88 return ret; 89 89 } 90 90 91 - static void w90p910_nand_read_buf(struct mtd_info *mtd, 92 - unsigned char *buf, int len) 91 + static void nuc900_nand_read_buf(struct mtd_info *mtd, 92 + unsigned char *buf, int len) 93 93 { 94 94 int i; 95 - struct w90p910_nand *nand; 95 + struct nuc900_nand *nand; 96 96 97 - nand = container_of(mtd, struct w90p910_nand, mtd); 97 + nand = container_of(mtd, struct nuc900_nand, mtd); 98 98 99 99 for (i = 0; i < len; i++) 100 100 buf[i] = (unsigned char)read_data_reg(nand); 101 101 } 102 102 103 - static void w90p910_nand_write_buf(struct mtd_info *mtd, 104 - const unsigned char *buf, int len) 103 + static void nuc900_nand_write_buf(struct mtd_info *mtd, 104 + const unsigned char *buf, int len) 105 105 { 106 106 int i; 107 - struct w90p910_nand *nand; 107 + struct nuc900_nand *nand; 108 108 109 - nand = container_of(mtd, struct w90p910_nand, mtd); 109 + nand = container_of(mtd, struct nuc900_nand, mtd); 110 110 111 111 for (i = 0; i < len; i++) 112 112 write_data_reg(nand, buf[i]); 113 113 } 114 114 115 - static int w90p910_verify_buf(struct mtd_info *mtd, 116 - const unsigned char *buf, int len) 115 + static int nuc900_verify_buf(struct mtd_info *mtd, 116 + const unsigned char *buf, int len) 117 117 { 118 118 int i; 119 - struct w90p910_nand *nand; 119 + struct nuc900_nand *nand; 120 120 121 - nand = container_of(mtd, struct w90p910_nand, mtd); 121 + nand = container_of(mtd, struct nuc900_nand, mtd); 122 122 123 123 for (i = 0; i < len; i++) { 124 124 if (buf[i] != (unsigned char)read_data_reg(nand)) ··· 128 128 return 0; 129 129 } 130 130 131 - static int w90p910_check_rb(struct w90p910_nand *nand) 131 + static int nuc900_check_rb(struct nuc900_nand *nand) 132 132 { 133 133 unsigned int val; 134 134 spin_lock(&nand->lock); ··· 139 139 return val; 140 140 } 141 141 142 - static int w90p910_nand_devready(struct mtd_info *mtd) 142 + static int nuc900_nand_devready(struct mtd_info *mtd) 143 143 { 144 - struct w90p910_nand *nand; 144 + struct nuc900_nand *nand; 145 145 int ready; 146 146 147 - nand = container_of(mtd, struct w90p910_nand, mtd); 147 + nand = container_of(mtd, struct nuc900_nand, mtd); 148 148 149 - ready = (w90p910_check_rb(nand)) ? 1 : 0; 149 + ready = (nuc900_check_rb(nand)) ? 1 : 0; 150 150 return ready; 151 151 } 152 152 153 - static void w90p910_nand_command_lp(struct mtd_info *mtd, 154 - unsigned int command, int column, int page_addr) 153 + static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, 154 + int column, int page_addr) 155 155 { 156 156 register struct nand_chip *chip = mtd->priv; 157 - struct w90p910_nand *nand; 157 + struct nuc900_nand *nand; 158 158 159 - nand = container_of(mtd, struct w90p910_nand, mtd); 159 + nand = container_of(mtd, struct nuc900_nand, mtd); 160 160 161 161 if (command == NAND_CMD_READOOB) { 162 162 column += mtd->writesize; ··· 212 212 write_cmd_reg(nand, NAND_CMD_STATUS); 213 213 write_cmd_reg(nand, command); 214 214 215 - while (!w90p910_check_rb(nand)) 215 + while (!nuc900_check_rb(nand)) 216 216 ; 217 217 218 218 return; ··· 241 241 } 242 242 243 243 244 - static void w90p910_nand_enable(struct w90p910_nand *nand) 244 + static void nuc900_nand_enable(struct nuc900_nand *nand) 245 245 { 246 246 unsigned int val; 247 247 spin_lock(&nand->lock); ··· 262 262 spin_unlock(&nand->lock); 263 263 } 264 264 265 - static int __devinit w90p910_nand_probe(struct platform_device *pdev) 265 + static int __devinit nuc900_nand_probe(struct platform_device *pdev) 266 266 { 267 - struct w90p910_nand *w90p910_nand; 267 + struct nuc900_nand *nuc900_nand; 268 268 struct nand_chip *chip; 269 269 int retval; 270 270 struct resource *res; 271 271 272 272 retval = 0; 273 273 274 - w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL); 275 - if (!w90p910_nand) 274 + nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL); 275 + if (!nuc900_nand) 276 276 return -ENOMEM; 277 - chip = &(w90p910_nand->chip); 277 + chip = &(nuc900_nand->chip); 278 278 279 - w90p910_nand->mtd.priv = chip; 280 - w90p910_nand->mtd.owner = THIS_MODULE; 281 - spin_lock_init(&w90p910_nand->lock); 279 + nuc900_nand->mtd.priv = chip; 280 + nuc900_nand->mtd.owner = THIS_MODULE; 281 + spin_lock_init(&nuc900_nand->lock); 282 282 283 - w90p910_nand->clk = clk_get(&pdev->dev, NULL); 284 - if (IS_ERR(w90p910_nand->clk)) { 283 + nuc900_nand->clk = clk_get(&pdev->dev, NULL); 284 + if (IS_ERR(nuc900_nand->clk)) { 285 285 retval = -ENOENT; 286 286 goto fail1; 287 287 } 288 - clk_enable(w90p910_nand->clk); 288 + clk_enable(nuc900_nand->clk); 289 289 290 - chip->cmdfunc = w90p910_nand_command_lp; 291 - chip->dev_ready = w90p910_nand_devready; 292 - chip->read_byte = w90p910_nand_read_byte; 293 - chip->write_buf = w90p910_nand_write_buf; 294 - chip->read_buf = w90p910_nand_read_buf; 295 - chip->verify_buf = w90p910_verify_buf; 290 + chip->cmdfunc = nuc900_nand_command_lp; 291 + chip->dev_ready = nuc900_nand_devready; 292 + chip->read_byte = nuc900_nand_read_byte; 293 + chip->write_buf = nuc900_nand_write_buf; 294 + chip->read_buf = nuc900_nand_read_buf; 295 + chip->verify_buf = nuc900_verify_buf; 296 296 chip->chip_delay = 50; 297 297 chip->options = 0; 298 298 chip->ecc.mode = NAND_ECC_SOFT; ··· 308 308 goto fail1; 309 309 } 310 310 311 - w90p910_nand->reg = ioremap(res->start, resource_size(res)); 312 - if (!w90p910_nand->reg) { 311 + nuc900_nand->reg = ioremap(res->start, resource_size(res)); 312 + if (!nuc900_nand->reg) { 313 313 retval = -ENOMEM; 314 314 goto fail2; 315 315 } 316 316 317 - w90p910_nand_enable(w90p910_nand); 317 + nuc900_nand_enable(nuc900_nand); 318 318 319 - if (nand_scan(&(w90p910_nand->mtd), 1)) { 319 + if (nand_scan(&(nuc900_nand->mtd), 1)) { 320 320 retval = -ENXIO; 321 321 goto fail3; 322 322 } 323 323 324 - add_mtd_partitions(&(w90p910_nand->mtd), partitions, 324 + add_mtd_partitions(&(nuc900_nand->mtd), partitions, 325 325 ARRAY_SIZE(partitions)); 326 326 327 - platform_set_drvdata(pdev, w90p910_nand); 327 + platform_set_drvdata(pdev, nuc900_nand); 328 328 329 329 return retval; 330 330 331 - fail3: iounmap(w90p910_nand->reg); 331 + fail3: iounmap(nuc900_nand->reg); 332 332 fail2: release_mem_region(res->start, resource_size(res)); 333 - fail1: kfree(w90p910_nand); 333 + fail1: kfree(nuc900_nand); 334 334 return retval; 335 335 } 336 336 337 - static int __devexit w90p910_nand_remove(struct platform_device *pdev) 337 + static int __devexit nuc900_nand_remove(struct platform_device *pdev) 338 338 { 339 - struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev); 339 + struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); 340 340 struct resource *res; 341 341 342 - iounmap(w90p910_nand->reg); 342 + iounmap(nuc900_nand->reg); 343 343 344 344 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 345 345 release_mem_region(res->start, resource_size(res)); 346 346 347 - clk_disable(w90p910_nand->clk); 348 - clk_put(w90p910_nand->clk); 347 + clk_disable(nuc900_nand->clk); 348 + clk_put(nuc900_nand->clk); 349 349 350 - kfree(w90p910_nand); 350 + kfree(nuc900_nand); 351 351 352 352 platform_set_drvdata(pdev, NULL); 353 353 354 354 return 0; 355 355 } 356 356 357 - static struct platform_driver w90p910_nand_driver = { 358 - .probe = w90p910_nand_probe, 359 - .remove = __devexit_p(w90p910_nand_remove), 357 + static struct platform_driver nuc900_nand_driver = { 358 + .probe = nuc900_nand_probe, 359 + .remove = __devexit_p(nuc900_nand_remove), 360 360 .driver = { 361 - .name = "w90p910-fmi", 361 + .name = "nuc900-fmi", 362 362 .owner = THIS_MODULE, 363 363 }, 364 364 }; 365 365 366 - static int __init w90p910_nand_init(void) 366 + static int __init nuc900_nand_init(void) 367 367 { 368 - return platform_driver_register(&w90p910_nand_driver); 368 + return platform_driver_register(&nuc900_nand_driver); 369 369 } 370 370 371 - static void __exit w90p910_nand_exit(void) 371 + static void __exit nuc900_nand_exit(void) 372 372 { 373 - platform_driver_unregister(&w90p910_nand_driver); 373 + platform_driver_unregister(&nuc900_nand_driver); 374 374 } 375 375 376 - module_init(w90p910_nand_init); 377 - module_exit(w90p910_nand_exit); 376 + module_init(nuc900_nand_init); 377 + module_exit(nuc900_nand_exit); 378 378 379 379 MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 380 - MODULE_DESCRIPTION("w90p910 nand driver!"); 380 + MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!"); 381 381 MODULE_LICENSE("GPL"); 382 - MODULE_ALIAS("platform:w90p910-fmi"); 382 + MODULE_ALIAS("platform:nuc900-fmi");
-1
drivers/mtd/nftlcore.c
··· 126 126 del_mtd_blktrans_dev(dev); 127 127 kfree(nftl->ReplUnitTable); 128 128 kfree(nftl->EUNtable); 129 - kfree(nftl); 130 129 } 131 130 132 131 /*
+7
drivers/mtd/onenand/Kconfig
··· 30 30 Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU 31 31 via the GPMC memory controller. 32 32 33 + config MTD_ONENAND_SAMSUNG 34 + tristate "OneNAND on Samsung SOC controller support" 35 + depends on MTD_ONENAND && (ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210) 36 + help 37 + Support for a OneNAND flash device connected to an Samsung SOC 38 + S3C64XX/S5PC1XX controller. 39 + 33 40 config MTD_ONENAND_OTP 34 41 bool "OneNAND OTP Support" 35 42 select HAVE_MTD_OTP
+1
drivers/mtd/onenand/Makefile
··· 8 8 # Board specific. 9 9 obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o 10 10 obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o 11 + obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o 11 12 12 13 # Simulator 13 14 obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o
+6 -6
drivers/mtd/onenand/omap2.c
··· 309 309 goto out_copy; 310 310 311 311 /* panic_write() may be in an interrupt context */ 312 - if (in_interrupt()) 312 + if (in_interrupt() || oops_in_progress) 313 313 goto out_copy; 314 314 315 315 if (buf >= high_memory) { ··· 386 386 goto out_copy; 387 387 388 388 /* panic_write() may be in an interrupt context */ 389 - if (in_interrupt()) 389 + if (in_interrupt() || oops_in_progress) 390 390 goto out_copy; 391 391 392 392 if (buf >= high_memory) { ··· 403 403 404 404 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE); 405 405 dma_dst = c->phys_base + bram_offset; 406 - if (dma_mapping_error(&c->pdev->dev, dma_dst)) { 406 + if (dma_mapping_error(&c->pdev->dev, dma_src)) { 407 407 dev_err(&c->pdev->dev, 408 408 "Couldn't DMA map a %d byte buffer\n", 409 409 count); ··· 426 426 if (*done) 427 427 break; 428 428 429 - dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); 429 + dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); 430 430 431 431 if (!*done) { 432 432 dev_err(&c->pdev->dev, "timeout waiting for DMA\n"); ··· 521 521 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, 522 522 DMA_TO_DEVICE); 523 523 dma_dst = c->phys_base + bram_offset; 524 - if (dma_mapping_error(&c->pdev->dev, dma_dst)) { 524 + if (dma_mapping_error(&c->pdev->dev, dma_src)) { 525 525 dev_err(&c->pdev->dev, 526 526 "Couldn't DMA map a %d byte buffer\n", 527 527 count); ··· 539 539 omap_start_dma(c->dma_channel); 540 540 wait_for_completion(&c->dma_done); 541 541 542 - dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE); 542 + dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); 543 543 544 544 return 0; 545 545 }
+42 -21
drivers/mtd/onenand/onenand_base.c
··· 397 397 value = onenand_bufferram_address(this, block); 398 398 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 399 399 400 - if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this)) 400 + if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this) || 401 + ONENAND_IS_4KB_PAGE(this)) 401 402 /* It is always BufferRAM0 */ 402 403 ONENAND_SET_BUFFERRAM0(this); 403 404 else ··· 427 426 case FLEXONENAND_CMD_RECOVER_LSB: 428 427 case ONENAND_CMD_READ: 429 428 case ONENAND_CMD_READOOB: 430 - if (ONENAND_IS_MLC(this)) 429 + if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 431 430 /* It is always BufferRAM0 */ 432 431 dataram = ONENAND_SET_BUFFERRAM0(this); 433 432 else ··· 467 466 { 468 467 int ecc, i, result = 0; 469 468 470 - if (!FLEXONENAND(this)) 469 + if (!FLEXONENAND(this) && !ONENAND_IS_4KB_PAGE(this)) 471 470 return this->read_word(this->base + ONENAND_REG_ECC_STATUS); 472 471 473 472 for (i = 0; i < 4; i++) { 474 - ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i); 473 + ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i*2); 475 474 if (likely(!ecc)) 476 475 continue; 477 476 if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR) ··· 1426 1425 int ret; 1427 1426 1428 1427 onenand_get_device(mtd, FL_READING); 1429 - ret = ONENAND_IS_MLC(this) ? 1428 + ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1430 1429 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 1431 1430 onenand_read_ops_nolock(mtd, from, &ops); 1432 1431 onenand_release_device(mtd); ··· 1461 1460 1462 1461 onenand_get_device(mtd, FL_READING); 1463 1462 if (ops->datbuf) 1464 - ret = ONENAND_IS_MLC(this) ? 1463 + ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 1465 1464 onenand_mlc_read_ops_nolock(mtd, from, ops) : 1466 1465 onenand_read_ops_nolock(mtd, from, ops); 1467 1466 else ··· 1635 1634 static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) 1636 1635 { 1637 1636 struct onenand_chip *this = mtd->priv; 1638 - void __iomem *dataram; 1639 1637 int ret = 0; 1640 1638 int thislen, column; 1641 1639 ··· 1654 1654 1655 1655 onenand_update_bufferram(mtd, addr, 1); 1656 1656 1657 - dataram = this->base + ONENAND_DATARAM; 1658 - dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM); 1657 + this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); 1659 1658 1660 - if (memcmp(buf, dataram + column, thislen)) 1659 + if (memcmp(buf, this->verify_buf, thislen)) 1661 1660 return -EBADMSG; 1662 1661 1663 1662 len -= thislen; ··· 1925 1926 * 2 PLANE, MLC, and Flex-OneNAND do not support 1926 1927 * write-while-program feature. 1927 1928 */ 1928 - if (!ONENAND_IS_2PLANE(this) && !first) { 1929 + if (!ONENAND_IS_2PLANE(this) && !ONENAND_IS_4KB_PAGE(this) && !first) { 1929 1930 ONENAND_SET_PREV_BUFFERRAM(this); 1930 1931 1931 1932 ret = this->wait(mtd, FL_WRITING); ··· 1956 1957 /* 1957 1958 * 2 PLANE, MLC, and Flex-OneNAND wait here 1958 1959 */ 1959 - if (ONENAND_IS_2PLANE(this)) { 1960 + if (ONENAND_IS_2PLANE(this) || ONENAND_IS_4KB_PAGE(this)) { 1960 1961 ret = this->wait(mtd, FL_WRITING); 1961 1962 1962 1963 /* In partial page write we don't update bufferram */ ··· 2083 2084 memcpy(oobbuf + column, buf, thislen); 2084 2085 this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 2085 2086 2086 - if (ONENAND_IS_MLC(this)) { 2087 + if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) { 2087 2088 /* Set main area of DataRAM to 0xff*/ 2088 2089 memset(this->page_buf, 0xff, mtd->writesize); 2089 2090 this->write_bufferram(mtd, ONENAND_DATARAM, ··· 3026 3027 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0); 3027 3028 this->wait(mtd, FL_OTPING); 3028 3029 3029 - ret = ONENAND_IS_MLC(this) ? 3030 + ret = ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this) ? 3030 3031 onenand_mlc_read_ops_nolock(mtd, from, &ops) : 3031 3032 onenand_read_ops_nolock(mtd, from, &ops); 3032 3033 ··· 3371 3372 /* Lock scheme */ 3372 3373 switch (density) { 3373 3374 case ONENAND_DEVICE_DENSITY_4Gb: 3374 - this->options |= ONENAND_HAS_2PLANE; 3375 + if (ONENAND_IS_DDP(this)) 3376 + this->options |= ONENAND_HAS_2PLANE; 3377 + else 3378 + this->options |= ONENAND_HAS_4KB_PAGE; 3375 3379 3376 3380 case ONENAND_DEVICE_DENSITY_2Gb: 3377 3381 /* 2Gb DDP does not have 2 plane */ ··· 3395 3393 break; 3396 3394 } 3397 3395 3398 - if (ONENAND_IS_MLC(this)) 3396 + if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3399 3397 this->options &= ~ONENAND_HAS_2PLANE; 3400 3398 3401 3399 if (FLEXONENAND(this)) { ··· 3409 3407 printk(KERN_DEBUG "Chip support all block unlock\n"); 3410 3408 if (this->options & ONENAND_HAS_2PLANE) 3411 3409 printk(KERN_DEBUG "Chip has 2 plane\n"); 3410 + if (this->options & ONENAND_HAS_4KB_PAGE) 3411 + printk(KERN_DEBUG "Chip has 4KiB pagesize\n"); 3412 3412 } 3413 3413 3414 3414 /** ··· 3763 3759 /* Restore system configuration 1 */ 3764 3760 this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); 3765 3761 3762 + /* Workaround */ 3763 + if (syscfg & ONENAND_SYS_CFG1_SYNC_WRITE) { 3764 + bram_maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); 3765 + bram_dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); 3766 + } 3767 + 3766 3768 /* Check manufacturer ID */ 3767 3769 if (onenand_check_maf(bram_maf_id)) 3768 3770 return -ENXIO; ··· 3787 3777 onenand_print_device_info(dev_id, ver_id); 3788 3778 this->device_id = dev_id; 3789 3779 this->version_id = ver_id; 3780 + 3781 + /* Check OneNAND features */ 3782 + onenand_check_features(mtd); 3790 3783 3791 3784 density = onenand_get_density(dev_id); 3792 3785 if (FLEXONENAND(this)) { ··· 3812 3799 /* The data buffer size is equal to page size */ 3813 3800 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); 3814 3801 /* We use the full BufferRAM */ 3815 - if (ONENAND_IS_MLC(this)) 3802 + if (ONENAND_IS_MLC(this) || ONENAND_IS_4KB_PAGE(this)) 3816 3803 mtd->writesize <<= 1; 3817 3804 3818 3805 mtd->oobsize = mtd->writesize >> 5; ··· 3841 3828 flexonenand_get_size(mtd); 3842 3829 else 3843 3830 mtd->size = this->chipsize; 3844 - 3845 - /* Check OneNAND features */ 3846 - onenand_check_features(mtd); 3847 3831 3848 3832 /* 3849 3833 * We emulate the 4KiB page and 256KiB erase block size ··· 3936 3926 __func__); 3937 3927 return -ENOMEM; 3938 3928 } 3929 + #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE 3930 + this->verify_buf = kzalloc(mtd->writesize, GFP_KERNEL); 3931 + if (!this->verify_buf) { 3932 + kfree(this->page_buf); 3933 + return -ENOMEM; 3934 + } 3935 + #endif 3939 3936 this->options |= ONENAND_PAGEBUF_ALLOC; 3940 3937 } 3941 3938 if (!this->oob_buf) { ··· 4070 4053 kfree(this->bbm); 4071 4054 } 4072 4055 /* Buffers allocated by onenand_scan */ 4073 - if (this->options & ONENAND_PAGEBUF_ALLOC) 4056 + if (this->options & ONENAND_PAGEBUF_ALLOC) { 4074 4057 kfree(this->page_buf); 4058 + #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE 4059 + kfree(this->verify_buf); 4060 + #endif 4061 + } 4075 4062 if (this->options & ONENAND_OOBBUF_ALLOC) 4076 4063 kfree(this->oob_buf); 4077 4064 kfree(mtd->eraseregions);
+1071
drivers/mtd/onenand/samsung.c
··· 1 + /* 2 + * Samsung S3C64XX/S5PC1XX OneNAND driver 3 + * 4 + * Copyright © 2008-2010 Samsung Electronics 5 + * Kyungmin Park <kyungmin.park@samsung.com> 6 + * Marek Szyprowski <m.szyprowski@samsung.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * Implementation: 13 + * S3C64XX and S5PC100: emulate the pseudo BufferRAM 14 + * S5PC110: use DMA 15 + */ 16 + 17 + #include <linux/module.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/sched.h> 20 + #include <linux/slab.h> 21 + #include <linux/mtd/mtd.h> 22 + #include <linux/mtd/onenand.h> 23 + #include <linux/mtd/partitions.h> 24 + #include <linux/dma-mapping.h> 25 + 26 + #include <asm/mach/flash.h> 27 + #include <plat/regs-onenand.h> 28 + 29 + #include <linux/io.h> 30 + 31 + enum soc_type { 32 + TYPE_S3C6400, 33 + TYPE_S3C6410, 34 + TYPE_S5PC100, 35 + TYPE_S5PC110, 36 + }; 37 + 38 + #define ONENAND_ERASE_STATUS 0x00 39 + #define ONENAND_MULTI_ERASE_SET 0x01 40 + #define ONENAND_ERASE_START 0x03 41 + #define ONENAND_UNLOCK_START 0x08 42 + #define ONENAND_UNLOCK_END 0x09 43 + #define ONENAND_LOCK_START 0x0A 44 + #define ONENAND_LOCK_END 0x0B 45 + #define ONENAND_LOCK_TIGHT_START 0x0C 46 + #define ONENAND_LOCK_TIGHT_END 0x0D 47 + #define ONENAND_UNLOCK_ALL 0x0E 48 + #define ONENAND_OTP_ACCESS 0x12 49 + #define ONENAND_SPARE_ACCESS_ONLY 0x13 50 + #define ONENAND_MAIN_ACCESS_ONLY 0x14 51 + #define ONENAND_ERASE_VERIFY 0x15 52 + #define ONENAND_MAIN_SPARE_ACCESS 0x16 53 + #define ONENAND_PIPELINE_READ 0x4000 54 + 55 + #define MAP_00 (0x0) 56 + #define MAP_01 (0x1) 57 + #define MAP_10 (0x2) 58 + #define MAP_11 (0x3) 59 + 60 + #define S3C64XX_CMD_MAP_SHIFT 24 61 + #define S5PC1XX_CMD_MAP_SHIFT 26 62 + 63 + #define S3C6400_FBA_SHIFT 10 64 + #define S3C6400_FPA_SHIFT 4 65 + #define S3C6400_FSA_SHIFT 2 66 + 67 + #define S3C6410_FBA_SHIFT 12 68 + #define S3C6410_FPA_SHIFT 6 69 + #define S3C6410_FSA_SHIFT 4 70 + 71 + #define S5PC100_FBA_SHIFT 13 72 + #define S5PC100_FPA_SHIFT 7 73 + #define S5PC100_FSA_SHIFT 5 74 + 75 + /* S5PC110 specific definitions */ 76 + #define S5PC110_DMA_SRC_ADDR 0x400 77 + #define S5PC110_DMA_SRC_CFG 0x404 78 + #define S5PC110_DMA_DST_ADDR 0x408 79 + #define S5PC110_DMA_DST_CFG 0x40C 80 + #define S5PC110_DMA_TRANS_SIZE 0x414 81 + #define S5PC110_DMA_TRANS_CMD 0x418 82 + #define S5PC110_DMA_TRANS_STATUS 0x41C 83 + #define S5PC110_DMA_TRANS_DIR 0x420 84 + 85 + #define S5PC110_DMA_CFG_SINGLE (0x0 << 16) 86 + #define S5PC110_DMA_CFG_4BURST (0x2 << 16) 87 + #define S5PC110_DMA_CFG_8BURST (0x3 << 16) 88 + #define S5PC110_DMA_CFG_16BURST (0x4 << 16) 89 + 90 + #define S5PC110_DMA_CFG_INC (0x0 << 8) 91 + #define S5PC110_DMA_CFG_CNT (0x1 << 8) 92 + 93 + #define S5PC110_DMA_CFG_8BIT (0x0 << 0) 94 + #define S5PC110_DMA_CFG_16BIT (0x1 << 0) 95 + #define S5PC110_DMA_CFG_32BIT (0x2 << 0) 96 + 97 + #define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \ 98 + S5PC110_DMA_CFG_INC | \ 99 + S5PC110_DMA_CFG_16BIT) 100 + #define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \ 101 + S5PC110_DMA_CFG_INC | \ 102 + S5PC110_DMA_CFG_32BIT) 103 + #define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ 104 + S5PC110_DMA_CFG_INC | \ 105 + S5PC110_DMA_CFG_32BIT) 106 + #define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ 107 + S5PC110_DMA_CFG_INC | \ 108 + S5PC110_DMA_CFG_16BIT) 109 + 110 + #define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18) 111 + #define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16) 112 + #define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0) 113 + 114 + #define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18) 115 + #define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17) 116 + #define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16) 117 + 118 + #define S5PC110_DMA_DIR_READ 0x0 119 + #define S5PC110_DMA_DIR_WRITE 0x1 120 + 121 + struct s3c_onenand { 122 + struct mtd_info *mtd; 123 + struct platform_device *pdev; 124 + enum soc_type type; 125 + void __iomem *base; 126 + struct resource *base_res; 127 + void __iomem *ahb_addr; 128 + struct resource *ahb_res; 129 + int bootram_command; 130 + void __iomem *page_buf; 131 + void __iomem *oob_buf; 132 + unsigned int (*mem_addr)(int fba, int fpa, int fsa); 133 + unsigned int (*cmd_map)(unsigned int type, unsigned int val); 134 + void __iomem *dma_addr; 135 + struct resource *dma_res; 136 + unsigned long phys_base; 137 + #ifdef CONFIG_MTD_PARTITIONS 138 + struct mtd_partition *parts; 139 + #endif 140 + }; 141 + 142 + #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) 143 + #define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr))) 144 + #define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr))) 145 + #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2))) 146 + 147 + static struct s3c_onenand *onenand; 148 + 149 + #ifdef CONFIG_MTD_PARTITIONS 150 + static const char *part_probes[] = { "cmdlinepart", NULL, }; 151 + #endif 152 + 153 + static inline int s3c_read_reg(int offset) 154 + { 155 + return readl(onenand->base + offset); 156 + } 157 + 158 + static inline void s3c_write_reg(int value, int offset) 159 + { 160 + writel(value, onenand->base + offset); 161 + } 162 + 163 + static inline int s3c_read_cmd(unsigned int cmd) 164 + { 165 + return readl(onenand->ahb_addr + cmd); 166 + } 167 + 168 + static inline void s3c_write_cmd(int value, unsigned int cmd) 169 + { 170 + writel(value, onenand->ahb_addr + cmd); 171 + } 172 + 173 + #ifdef SAMSUNG_DEBUG 174 + static void s3c_dump_reg(void) 175 + { 176 + int i; 177 + 178 + for (i = 0; i < 0x400; i += 0x40) { 179 + printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n", 180 + (unsigned int) onenand->base + i, 181 + s3c_read_reg(i), s3c_read_reg(i + 0x10), 182 + s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30)); 183 + } 184 + } 185 + #endif 186 + 187 + static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val) 188 + { 189 + return (type << S3C64XX_CMD_MAP_SHIFT) | val; 190 + } 191 + 192 + static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val) 193 + { 194 + return (type << S5PC1XX_CMD_MAP_SHIFT) | val; 195 + } 196 + 197 + static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa) 198 + { 199 + return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) | 200 + (fsa << S3C6400_FSA_SHIFT); 201 + } 202 + 203 + static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa) 204 + { 205 + return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) | 206 + (fsa << S3C6410_FSA_SHIFT); 207 + } 208 + 209 + static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa) 210 + { 211 + return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) | 212 + (fsa << S5PC100_FSA_SHIFT); 213 + } 214 + 215 + static void s3c_onenand_reset(void) 216 + { 217 + unsigned long timeout = 0x10000; 218 + int stat; 219 + 220 + s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); 221 + while (1 && timeout--) { 222 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 223 + if (stat & RST_CMP) 224 + break; 225 + } 226 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 227 + s3c_write_reg(stat, INT_ERR_ACK_OFFSET); 228 + 229 + /* Clear interrupt */ 230 + s3c_write_reg(0x0, INT_ERR_ACK_OFFSET); 231 + /* Clear the ECC status */ 232 + s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET); 233 + } 234 + 235 + static unsigned short s3c_onenand_readw(void __iomem *addr) 236 + { 237 + struct onenand_chip *this = onenand->mtd->priv; 238 + struct device *dev = &onenand->pdev->dev; 239 + int reg = addr - this->base; 240 + int word_addr = reg >> 1; 241 + int value; 242 + 243 + /* It's used for probing time */ 244 + switch (reg) { 245 + case ONENAND_REG_MANUFACTURER_ID: 246 + return s3c_read_reg(MANUFACT_ID_OFFSET); 247 + case ONENAND_REG_DEVICE_ID: 248 + return s3c_read_reg(DEVICE_ID_OFFSET); 249 + case ONENAND_REG_VERSION_ID: 250 + return s3c_read_reg(FLASH_VER_ID_OFFSET); 251 + case ONENAND_REG_DATA_BUFFER_SIZE: 252 + return s3c_read_reg(DATA_BUF_SIZE_OFFSET); 253 + case ONENAND_REG_TECHNOLOGY: 254 + return s3c_read_reg(TECH_OFFSET); 255 + case ONENAND_REG_SYS_CFG1: 256 + return s3c_read_reg(MEM_CFG_OFFSET); 257 + 258 + /* Used at unlock all status */ 259 + case ONENAND_REG_CTRL_STATUS: 260 + return 0; 261 + 262 + case ONENAND_REG_WP_STATUS: 263 + return ONENAND_WP_US; 264 + 265 + default: 266 + break; 267 + } 268 + 269 + /* BootRAM access control */ 270 + if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) { 271 + if (word_addr == 0) 272 + return s3c_read_reg(MANUFACT_ID_OFFSET); 273 + if (word_addr == 1) 274 + return s3c_read_reg(DEVICE_ID_OFFSET); 275 + if (word_addr == 2) 276 + return s3c_read_reg(FLASH_VER_ID_OFFSET); 277 + } 278 + 279 + value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff; 280 + dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, 281 + word_addr, value); 282 + return value; 283 + } 284 + 285 + static void s3c_onenand_writew(unsigned short value, void __iomem *addr) 286 + { 287 + struct onenand_chip *this = onenand->mtd->priv; 288 + struct device *dev = &onenand->pdev->dev; 289 + unsigned int reg = addr - this->base; 290 + unsigned int word_addr = reg >> 1; 291 + 292 + /* It's used for probing time */ 293 + switch (reg) { 294 + case ONENAND_REG_SYS_CFG1: 295 + s3c_write_reg(value, MEM_CFG_OFFSET); 296 + return; 297 + 298 + case ONENAND_REG_START_ADDRESS1: 299 + case ONENAND_REG_START_ADDRESS2: 300 + return; 301 + 302 + /* Lock/lock-tight/unlock/unlock_all */ 303 + case ONENAND_REG_START_BLOCK_ADDRESS: 304 + return; 305 + 306 + default: 307 + break; 308 + } 309 + 310 + /* BootRAM access control */ 311 + if ((unsigned int)addr < ONENAND_DATARAM) { 312 + if (value == ONENAND_CMD_READID) { 313 + onenand->bootram_command = 1; 314 + return; 315 + } 316 + if (value == ONENAND_CMD_RESET) { 317 + s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); 318 + onenand->bootram_command = 0; 319 + return; 320 + } 321 + } 322 + 323 + dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, 324 + word_addr, value); 325 + 326 + s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr)); 327 + } 328 + 329 + static int s3c_onenand_wait(struct mtd_info *mtd, int state) 330 + { 331 + struct device *dev = &onenand->pdev->dev; 332 + unsigned int flags = INT_ACT; 333 + unsigned int stat, ecc; 334 + unsigned long timeout; 335 + 336 + switch (state) { 337 + case FL_READING: 338 + flags |= BLK_RW_CMP | LOAD_CMP; 339 + break; 340 + case FL_WRITING: 341 + flags |= BLK_RW_CMP | PGM_CMP; 342 + break; 343 + case FL_ERASING: 344 + flags |= BLK_RW_CMP | ERS_CMP; 345 + break; 346 + case FL_LOCKING: 347 + flags |= BLK_RW_CMP; 348 + break; 349 + default: 350 + break; 351 + } 352 + 353 + /* The 20 msec is enough */ 354 + timeout = jiffies + msecs_to_jiffies(20); 355 + while (time_before(jiffies, timeout)) { 356 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 357 + if (stat & flags) 358 + break; 359 + 360 + if (state != FL_READING) 361 + cond_resched(); 362 + } 363 + /* To get correct interrupt status in timeout case */ 364 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 365 + s3c_write_reg(stat, INT_ERR_ACK_OFFSET); 366 + 367 + /* 368 + * In the Spec. it checks the controller status first 369 + * However if you get the correct information in case of 370 + * power off recovery (POR) test, it should read ECC status first 371 + */ 372 + if (stat & LOAD_CMP) { 373 + ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); 374 + if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { 375 + dev_info(dev, "%s: ECC error = 0x%04x\n", __func__, 376 + ecc); 377 + mtd->ecc_stats.failed++; 378 + return -EBADMSG; 379 + } 380 + } 381 + 382 + if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) { 383 + dev_info(dev, "%s: controller error = 0x%04x\n", __func__, 384 + stat); 385 + if (stat & LOCKED_BLK) 386 + dev_info(dev, "%s: it's locked error = 0x%04x\n", 387 + __func__, stat); 388 + 389 + return -EIO; 390 + } 391 + 392 + return 0; 393 + } 394 + 395 + static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, 396 + size_t len) 397 + { 398 + struct onenand_chip *this = mtd->priv; 399 + unsigned int *m, *s; 400 + int fba, fpa, fsa = 0; 401 + unsigned int mem_addr, cmd_map_01, cmd_map_10; 402 + int i, mcount, scount; 403 + int index; 404 + 405 + fba = (int) (addr >> this->erase_shift); 406 + fpa = (int) (addr >> this->page_shift); 407 + fpa &= this->page_mask; 408 + 409 + mem_addr = onenand->mem_addr(fba, fpa, fsa); 410 + cmd_map_01 = CMD_MAP_01(onenand, mem_addr); 411 + cmd_map_10 = CMD_MAP_10(onenand, mem_addr); 412 + 413 + switch (cmd) { 414 + case ONENAND_CMD_READ: 415 + case ONENAND_CMD_READOOB: 416 + case ONENAND_CMD_BUFFERRAM: 417 + ONENAND_SET_NEXT_BUFFERRAM(this); 418 + default: 419 + break; 420 + } 421 + 422 + index = ONENAND_CURRENT_BUFFERRAM(this); 423 + 424 + /* 425 + * Emulate Two BufferRAMs and access with 4 bytes pointer 426 + */ 427 + m = (unsigned int *) onenand->page_buf; 428 + s = (unsigned int *) onenand->oob_buf; 429 + 430 + if (index) { 431 + m += (this->writesize >> 2); 432 + s += (mtd->oobsize >> 2); 433 + } 434 + 435 + mcount = mtd->writesize >> 2; 436 + scount = mtd->oobsize >> 2; 437 + 438 + switch (cmd) { 439 + case ONENAND_CMD_READ: 440 + /* Main */ 441 + for (i = 0; i < mcount; i++) 442 + *m++ = s3c_read_cmd(cmd_map_01); 443 + return 0; 444 + 445 + case ONENAND_CMD_READOOB: 446 + s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); 447 + /* Main */ 448 + for (i = 0; i < mcount; i++) 449 + *m++ = s3c_read_cmd(cmd_map_01); 450 + 451 + /* Spare */ 452 + for (i = 0; i < scount; i++) 453 + *s++ = s3c_read_cmd(cmd_map_01); 454 + 455 + s3c_write_reg(0, TRANS_SPARE_OFFSET); 456 + return 0; 457 + 458 + case ONENAND_CMD_PROG: 459 + /* Main */ 460 + for (i = 0; i < mcount; i++) 461 + s3c_write_cmd(*m++, cmd_map_01); 462 + return 0; 463 + 464 + case ONENAND_CMD_PROGOOB: 465 + s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); 466 + 467 + /* Main - dummy write */ 468 + for (i = 0; i < mcount; i++) 469 + s3c_write_cmd(0xffffffff, cmd_map_01); 470 + 471 + /* Spare */ 472 + for (i = 0; i < scount; i++) 473 + s3c_write_cmd(*s++, cmd_map_01); 474 + 475 + s3c_write_reg(0, TRANS_SPARE_OFFSET); 476 + return 0; 477 + 478 + case ONENAND_CMD_UNLOCK_ALL: 479 + s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10); 480 + return 0; 481 + 482 + case ONENAND_CMD_ERASE: 483 + s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10); 484 + return 0; 485 + 486 + default: 487 + break; 488 + } 489 + 490 + return 0; 491 + } 492 + 493 + static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area) 494 + { 495 + struct onenand_chip *this = mtd->priv; 496 + int index = ONENAND_CURRENT_BUFFERRAM(this); 497 + unsigned char *p; 498 + 499 + if (area == ONENAND_DATARAM) { 500 + p = (unsigned char *) onenand->page_buf; 501 + if (index == 1) 502 + p += this->writesize; 503 + } else { 504 + p = (unsigned char *) onenand->oob_buf; 505 + if (index == 1) 506 + p += mtd->oobsize; 507 + } 508 + 509 + return p; 510 + } 511 + 512 + static int onenand_read_bufferram(struct mtd_info *mtd, int area, 513 + unsigned char *buffer, int offset, 514 + size_t count) 515 + { 516 + unsigned char *p; 517 + 518 + p = s3c_get_bufferram(mtd, area); 519 + memcpy(buffer, p + offset, count); 520 + return 0; 521 + } 522 + 523 + static int onenand_write_bufferram(struct mtd_info *mtd, int area, 524 + const unsigned char *buffer, int offset, 525 + size_t count) 526 + { 527 + unsigned char *p; 528 + 529 + p = s3c_get_bufferram(mtd, area); 530 + memcpy(p + offset, buffer, count); 531 + return 0; 532 + } 533 + 534 + static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction) 535 + { 536 + void __iomem *base = onenand->dma_addr; 537 + int status; 538 + 539 + writel(src, base + S5PC110_DMA_SRC_ADDR); 540 + writel(dst, base + S5PC110_DMA_DST_ADDR); 541 + 542 + if (direction == S5PC110_DMA_DIR_READ) { 543 + writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG); 544 + writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG); 545 + } else { 546 + writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG); 547 + writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG); 548 + } 549 + 550 + writel(count, base + S5PC110_DMA_TRANS_SIZE); 551 + writel(direction, base + S5PC110_DMA_TRANS_DIR); 552 + 553 + writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD); 554 + 555 + do { 556 + status = readl(base + S5PC110_DMA_TRANS_STATUS); 557 + } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 558 + 559 + if (status & S5PC110_DMA_TRANS_STATUS_TE) { 560 + writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD); 561 + writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 562 + return -EIO; 563 + } 564 + 565 + writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 566 + 567 + return 0; 568 + } 569 + 570 + static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, 571 + unsigned char *buffer, int offset, size_t count) 572 + { 573 + struct onenand_chip *this = mtd->priv; 574 + void __iomem *bufferram; 575 + void __iomem *p; 576 + void *buf = (void *) buffer; 577 + dma_addr_t dma_src, dma_dst; 578 + int err; 579 + 580 + p = bufferram = this->base + area; 581 + if (ONENAND_CURRENT_BUFFERRAM(this)) { 582 + if (area == ONENAND_DATARAM) 583 + p += this->writesize; 584 + else 585 + p += mtd->oobsize; 586 + } 587 + 588 + if (offset & 3 || (size_t) buf & 3 || 589 + !onenand->dma_addr || count != mtd->writesize) 590 + goto normal; 591 + 592 + /* Handle vmalloc address */ 593 + if (buf >= high_memory) { 594 + struct page *page; 595 + 596 + if (((size_t) buf & PAGE_MASK) != 597 + ((size_t) (buf + count - 1) & PAGE_MASK)) 598 + goto normal; 599 + page = vmalloc_to_page(buf); 600 + if (!page) 601 + goto normal; 602 + buf = page_address(page) + ((size_t) buf & ~PAGE_MASK); 603 + } 604 + 605 + /* DMA routine */ 606 + dma_src = onenand->phys_base + (p - this->base); 607 + dma_dst = dma_map_single(&onenand->pdev->dev, 608 + buf, count, DMA_FROM_DEVICE); 609 + if (dma_mapping_error(&onenand->pdev->dev, dma_dst)) { 610 + dev_err(&onenand->pdev->dev, 611 + "Couldn't map a %d byte buffer for DMA\n", count); 612 + goto normal; 613 + } 614 + err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, 615 + count, S5PC110_DMA_DIR_READ); 616 + dma_unmap_single(&onenand->pdev->dev, dma_dst, count, DMA_FROM_DEVICE); 617 + 618 + if (!err) 619 + return 0; 620 + 621 + normal: 622 + if (count != mtd->writesize) { 623 + /* Copy the bufferram to memory to prevent unaligned access */ 624 + memcpy(this->page_buf, bufferram, mtd->writesize); 625 + p = this->page_buf + offset; 626 + } 627 + 628 + memcpy(buffer, p, count); 629 + 630 + return 0; 631 + } 632 + 633 + static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state) 634 + { 635 + unsigned int flags = INT_ACT | LOAD_CMP; 636 + unsigned int stat; 637 + unsigned long timeout; 638 + 639 + /* The 20 msec is enough */ 640 + timeout = jiffies + msecs_to_jiffies(20); 641 + while (time_before(jiffies, timeout)) { 642 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 643 + if (stat & flags) 644 + break; 645 + } 646 + /* To get correct interrupt status in timeout case */ 647 + stat = s3c_read_reg(INT_ERR_STAT_OFFSET); 648 + s3c_write_reg(stat, INT_ERR_ACK_OFFSET); 649 + 650 + if (stat & LD_FAIL_ECC_ERR) { 651 + s3c_onenand_reset(); 652 + return ONENAND_BBT_READ_ERROR; 653 + } 654 + 655 + if (stat & LOAD_CMP) { 656 + int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); 657 + if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { 658 + s3c_onenand_reset(); 659 + return ONENAND_BBT_READ_ERROR; 660 + } 661 + } 662 + 663 + return 0; 664 + } 665 + 666 + static void s3c_onenand_check_lock_status(struct mtd_info *mtd) 667 + { 668 + struct onenand_chip *this = mtd->priv; 669 + struct device *dev = &onenand->pdev->dev; 670 + unsigned int block, end; 671 + int tmp; 672 + 673 + end = this->chipsize >> this->erase_shift; 674 + 675 + for (block = 0; block < end; block++) { 676 + unsigned int mem_addr = onenand->mem_addr(block, 0, 0); 677 + tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr)); 678 + 679 + if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) { 680 + dev_err(dev, "block %d is write-protected!\n", block); 681 + s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET); 682 + } 683 + } 684 + } 685 + 686 + static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, 687 + size_t len, int cmd) 688 + { 689 + struct onenand_chip *this = mtd->priv; 690 + int start, end, start_mem_addr, end_mem_addr; 691 + 692 + start = ofs >> this->erase_shift; 693 + start_mem_addr = onenand->mem_addr(start, 0, 0); 694 + end = start + (len >> this->erase_shift) - 1; 695 + end_mem_addr = onenand->mem_addr(end, 0, 0); 696 + 697 + if (cmd == ONENAND_CMD_LOCK) { 698 + s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand, 699 + start_mem_addr)); 700 + s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand, 701 + end_mem_addr)); 702 + } else { 703 + s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand, 704 + start_mem_addr)); 705 + s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand, 706 + end_mem_addr)); 707 + } 708 + 709 + this->wait(mtd, FL_LOCKING); 710 + } 711 + 712 + static void s3c_unlock_all(struct mtd_info *mtd) 713 + { 714 + struct onenand_chip *this = mtd->priv; 715 + loff_t ofs = 0; 716 + size_t len = this->chipsize; 717 + 718 + if (this->options & ONENAND_HAS_UNLOCK_ALL) { 719 + /* Write unlock command */ 720 + this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0); 721 + 722 + /* No need to check return value */ 723 + this->wait(mtd, FL_LOCKING); 724 + 725 + /* Workaround for all block unlock in DDP */ 726 + if (!ONENAND_IS_DDP(this)) { 727 + s3c_onenand_check_lock_status(mtd); 728 + return; 729 + } 730 + 731 + /* All blocks on another chip */ 732 + ofs = this->chipsize >> 1; 733 + len = this->chipsize >> 1; 734 + } 735 + 736 + s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); 737 + 738 + s3c_onenand_check_lock_status(mtd); 739 + } 740 + 741 + static void s3c_onenand_setup(struct mtd_info *mtd) 742 + { 743 + struct onenand_chip *this = mtd->priv; 744 + 745 + onenand->mtd = mtd; 746 + 747 + if (onenand->type == TYPE_S3C6400) { 748 + onenand->mem_addr = s3c6400_mem_addr; 749 + onenand->cmd_map = s3c64xx_cmd_map; 750 + } else if (onenand->type == TYPE_S3C6410) { 751 + onenand->mem_addr = s3c6410_mem_addr; 752 + onenand->cmd_map = s3c64xx_cmd_map; 753 + } else if (onenand->type == TYPE_S5PC100) { 754 + onenand->mem_addr = s5pc100_mem_addr; 755 + onenand->cmd_map = s5pc1xx_cmd_map; 756 + } else if (onenand->type == TYPE_S5PC110) { 757 + /* Use generic onenand functions */ 758 + onenand->cmd_map = s5pc1xx_cmd_map; 759 + this->read_bufferram = s5pc110_read_bufferram; 760 + return; 761 + } else { 762 + BUG(); 763 + } 764 + 765 + this->read_word = s3c_onenand_readw; 766 + this->write_word = s3c_onenand_writew; 767 + 768 + this->wait = s3c_onenand_wait; 769 + this->bbt_wait = s3c_onenand_bbt_wait; 770 + this->unlock_all = s3c_unlock_all; 771 + this->command = s3c_onenand_command; 772 + 773 + this->read_bufferram = onenand_read_bufferram; 774 + this->write_bufferram = onenand_write_bufferram; 775 + } 776 + 777 + static int s3c_onenand_probe(struct platform_device *pdev) 778 + { 779 + struct onenand_platform_data *pdata; 780 + struct onenand_chip *this; 781 + struct mtd_info *mtd; 782 + struct resource *r; 783 + int size, err; 784 + unsigned long onenand_ctrl_cfg = 0; 785 + 786 + pdata = pdev->dev.platform_data; 787 + /* No need to check pdata. the platform data is optional */ 788 + 789 + size = sizeof(struct mtd_info) + sizeof(struct onenand_chip); 790 + mtd = kzalloc(size, GFP_KERNEL); 791 + if (!mtd) { 792 + dev_err(&pdev->dev, "failed to allocate memory\n"); 793 + return -ENOMEM; 794 + } 795 + 796 + onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL); 797 + if (!onenand) { 798 + err = -ENOMEM; 799 + goto onenand_fail; 800 + } 801 + 802 + this = (struct onenand_chip *) &mtd[1]; 803 + mtd->priv = this; 804 + mtd->dev.parent = &pdev->dev; 805 + mtd->owner = THIS_MODULE; 806 + onenand->pdev = pdev; 807 + onenand->type = platform_get_device_id(pdev)->driver_data; 808 + 809 + s3c_onenand_setup(mtd); 810 + 811 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 812 + if (!r) { 813 + dev_err(&pdev->dev, "no memory resource defined\n"); 814 + return -ENOENT; 815 + goto ahb_resource_failed; 816 + } 817 + 818 + onenand->base_res = request_mem_region(r->start, resource_size(r), 819 + pdev->name); 820 + if (!onenand->base_res) { 821 + dev_err(&pdev->dev, "failed to request memory resource\n"); 822 + err = -EBUSY; 823 + goto resource_failed; 824 + } 825 + 826 + onenand->base = ioremap(r->start, resource_size(r)); 827 + if (!onenand->base) { 828 + dev_err(&pdev->dev, "failed to map memory resource\n"); 829 + err = -EFAULT; 830 + goto ioremap_failed; 831 + } 832 + /* Set onenand_chip also */ 833 + this->base = onenand->base; 834 + 835 + /* Use runtime badblock check */ 836 + this->options |= ONENAND_SKIP_UNLOCK_CHECK; 837 + 838 + if (onenand->type != TYPE_S5PC110) { 839 + r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 840 + if (!r) { 841 + dev_err(&pdev->dev, "no buffer memory resource defined\n"); 842 + return -ENOENT; 843 + goto ahb_resource_failed; 844 + } 845 + 846 + onenand->ahb_res = request_mem_region(r->start, resource_size(r), 847 + pdev->name); 848 + if (!onenand->ahb_res) { 849 + dev_err(&pdev->dev, "failed to request buffer memory resource\n"); 850 + err = -EBUSY; 851 + goto ahb_resource_failed; 852 + } 853 + 854 + onenand->ahb_addr = ioremap(r->start, resource_size(r)); 855 + if (!onenand->ahb_addr) { 856 + dev_err(&pdev->dev, "failed to map buffer memory resource\n"); 857 + err = -EINVAL; 858 + goto ahb_ioremap_failed; 859 + } 860 + 861 + /* Allocate 4KiB BufferRAM */ 862 + onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL); 863 + if (!onenand->page_buf) { 864 + err = -ENOMEM; 865 + goto page_buf_fail; 866 + } 867 + 868 + /* Allocate 128 SpareRAM */ 869 + onenand->oob_buf = kzalloc(128, GFP_KERNEL); 870 + if (!onenand->oob_buf) { 871 + err = -ENOMEM; 872 + goto oob_buf_fail; 873 + } 874 + 875 + /* S3C doesn't handle subpage write */ 876 + mtd->subpage_sft = 0; 877 + this->subpagesize = mtd->writesize; 878 + 879 + } else { /* S5PC110 */ 880 + r = platform_get_resource(pdev, IORESOURCE_MEM, 1); 881 + if (!r) { 882 + dev_err(&pdev->dev, "no dma memory resource defined\n"); 883 + return -ENOENT; 884 + goto dma_resource_failed; 885 + } 886 + 887 + onenand->dma_res = request_mem_region(r->start, resource_size(r), 888 + pdev->name); 889 + if (!onenand->dma_res) { 890 + dev_err(&pdev->dev, "failed to request dma memory resource\n"); 891 + err = -EBUSY; 892 + goto dma_resource_failed; 893 + } 894 + 895 + onenand->dma_addr = ioremap(r->start, resource_size(r)); 896 + if (!onenand->dma_addr) { 897 + dev_err(&pdev->dev, "failed to map dma memory resource\n"); 898 + err = -EINVAL; 899 + goto dma_ioremap_failed; 900 + } 901 + 902 + onenand->phys_base = onenand->base_res->start; 903 + 904 + onenand_ctrl_cfg = readl(onenand->dma_addr + 0x100); 905 + if ((onenand_ctrl_cfg & ONENAND_SYS_CFG1_SYNC_WRITE) && 906 + onenand->dma_addr) 907 + writel(onenand_ctrl_cfg & ~ONENAND_SYS_CFG1_SYNC_WRITE, 908 + onenand->dma_addr + 0x100); 909 + else 910 + onenand_ctrl_cfg = 0; 911 + } 912 + 913 + if (onenand_scan(mtd, 1)) { 914 + err = -EFAULT; 915 + goto scan_failed; 916 + } 917 + 918 + if (onenand->type == TYPE_S5PC110) { 919 + if (onenand_ctrl_cfg && onenand->dma_addr) 920 + writel(onenand_ctrl_cfg, onenand->dma_addr + 0x100); 921 + } else { 922 + /* S3C doesn't handle subpage write */ 923 + mtd->subpage_sft = 0; 924 + this->subpagesize = mtd->writesize; 925 + } 926 + 927 + if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) 928 + dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); 929 + 930 + #ifdef CONFIG_MTD_PARTITIONS 931 + err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); 932 + if (err > 0) 933 + add_mtd_partitions(mtd, onenand->parts, err); 934 + else if (err <= 0 && pdata && pdata->parts) 935 + add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 936 + else 937 + #endif 938 + err = add_mtd_device(mtd); 939 + 940 + platform_set_drvdata(pdev, mtd); 941 + 942 + return 0; 943 + 944 + scan_failed: 945 + if (onenand->dma_addr) 946 + iounmap(onenand->dma_addr); 947 + dma_ioremap_failed: 948 + if (onenand->dma_res) 949 + release_mem_region(onenand->dma_res->start, 950 + resource_size(onenand->dma_res)); 951 + kfree(onenand->oob_buf); 952 + oob_buf_fail: 953 + kfree(onenand->page_buf); 954 + page_buf_fail: 955 + if (onenand->ahb_addr) 956 + iounmap(onenand->ahb_addr); 957 + ahb_ioremap_failed: 958 + if (onenand->ahb_res) 959 + release_mem_region(onenand->ahb_res->start, 960 + resource_size(onenand->ahb_res)); 961 + dma_resource_failed: 962 + ahb_resource_failed: 963 + iounmap(onenand->base); 964 + ioremap_failed: 965 + if (onenand->base_res) 966 + release_mem_region(onenand->base_res->start, 967 + resource_size(onenand->base_res)); 968 + resource_failed: 969 + kfree(onenand); 970 + onenand_fail: 971 + kfree(mtd); 972 + return err; 973 + } 974 + 975 + static int __devexit s3c_onenand_remove(struct platform_device *pdev) 976 + { 977 + struct mtd_info *mtd = platform_get_drvdata(pdev); 978 + 979 + onenand_release(mtd); 980 + if (onenand->ahb_addr) 981 + iounmap(onenand->ahb_addr); 982 + if (onenand->ahb_res) 983 + release_mem_region(onenand->ahb_res->start, 984 + resource_size(onenand->ahb_res)); 985 + if (onenand->dma_addr) 986 + iounmap(onenand->dma_addr); 987 + if (onenand->dma_res) 988 + release_mem_region(onenand->dma_res->start, 989 + resource_size(onenand->dma_res)); 990 + 991 + iounmap(onenand->base); 992 + release_mem_region(onenand->base_res->start, 993 + resource_size(onenand->base_res)); 994 + 995 + platform_set_drvdata(pdev, NULL); 996 + kfree(onenand->oob_buf); 997 + kfree(onenand->page_buf); 998 + kfree(onenand); 999 + kfree(mtd); 1000 + return 0; 1001 + } 1002 + 1003 + static int s3c_pm_ops_suspend(struct device *dev) 1004 + { 1005 + struct platform_device *pdev = to_platform_device(dev); 1006 + struct mtd_info *mtd = platform_get_drvdata(pdev); 1007 + struct onenand_chip *this = mtd->priv; 1008 + 1009 + this->wait(mtd, FL_PM_SUSPENDED); 1010 + return mtd->suspend(mtd); 1011 + } 1012 + 1013 + static int s3c_pm_ops_resume(struct device *dev) 1014 + { 1015 + struct platform_device *pdev = to_platform_device(dev); 1016 + struct mtd_info *mtd = platform_get_drvdata(pdev); 1017 + struct onenand_chip *this = mtd->priv; 1018 + 1019 + mtd->resume(mtd); 1020 + this->unlock_all(mtd); 1021 + return 0; 1022 + } 1023 + 1024 + static const struct dev_pm_ops s3c_pm_ops = { 1025 + .suspend = s3c_pm_ops_suspend, 1026 + .resume = s3c_pm_ops_resume, 1027 + }; 1028 + 1029 + static struct platform_device_id s3c_onenand_driver_ids[] = { 1030 + { 1031 + .name = "s3c6400-onenand", 1032 + .driver_data = TYPE_S3C6400, 1033 + }, { 1034 + .name = "s3c6410-onenand", 1035 + .driver_data = TYPE_S3C6410, 1036 + }, { 1037 + .name = "s5pc100-onenand", 1038 + .driver_data = TYPE_S5PC100, 1039 + }, { 1040 + .name = "s5pc110-onenand", 1041 + .driver_data = TYPE_S5PC110, 1042 + }, { }, 1043 + }; 1044 + MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids); 1045 + 1046 + static struct platform_driver s3c_onenand_driver = { 1047 + .driver = { 1048 + .name = "samsung-onenand", 1049 + .pm = &s3c_pm_ops, 1050 + }, 1051 + .id_table = s3c_onenand_driver_ids, 1052 + .probe = s3c_onenand_probe, 1053 + .remove = __devexit_p(s3c_onenand_remove), 1054 + }; 1055 + 1056 + static int __init s3c_onenand_init(void) 1057 + { 1058 + return platform_driver_register(&s3c_onenand_driver); 1059 + } 1060 + 1061 + static void __exit s3c_onenand_exit(void) 1062 + { 1063 + platform_driver_unregister(&s3c_onenand_driver); 1064 + } 1065 + 1066 + module_init(s3c_onenand_init); 1067 + module_exit(s3c_onenand_exit); 1068 + 1069 + MODULE_LICENSE("GPL"); 1070 + MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); 1071 + MODULE_DESCRIPTION("Samsung OneNAND controller support");
-1
drivers/mtd/rfd_ftl.c
··· 817 817 vfree(part->sector_map); 818 818 kfree(part->header_cache); 819 819 kfree(part->blocks); 820 - kfree(part); 821 820 } 822 821 823 822 static struct mtd_blktrans_ops rfd_ftl_tr = {
+1284
drivers/mtd/sm_ftl.c
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * SmartMedia/xD translation layer 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/kernel.h> 11 + #include <linux/module.h> 12 + #include <linux/random.h> 13 + #include <linux/hdreg.h> 14 + #include <linux/kthread.h> 15 + #include <linux/freezer.h> 16 + #include <linux/sysfs.h> 17 + #include <linux/bitops.h> 18 + #include <linux/slab.h> 19 + #include <linux/mtd/nand_ecc.h> 20 + #include "nand/sm_common.h" 21 + #include "sm_ftl.h" 22 + 23 + 24 + 25 + struct workqueue_struct *cache_flush_workqueue; 26 + 27 + static int cache_timeout = 1000; 28 + module_param(cache_timeout, bool, S_IRUGO); 29 + MODULE_PARM_DESC(cache_timeout, 30 + "Timeout (in ms) for cache flush (1000 ms default"); 31 + 32 + static int debug; 33 + module_param(debug, int, S_IRUGO | S_IWUSR); 34 + MODULE_PARM_DESC(debug, "Debug level (0-2)"); 35 + 36 + 37 + /* ------------------- sysfs attributtes ---------------------------------- */ 38 + struct sm_sysfs_attribute { 39 + struct device_attribute dev_attr; 40 + char *data; 41 + int len; 42 + }; 43 + 44 + ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, 45 + char *buf) 46 + { 47 + struct sm_sysfs_attribute *sm_attr = 48 + container_of(attr, struct sm_sysfs_attribute, dev_attr); 49 + 50 + strncpy(buf, sm_attr->data, sm_attr->len); 51 + return sm_attr->len; 52 + } 53 + 54 + 55 + #define NUM_ATTRIBUTES 1 56 + #define SM_CIS_VENDOR_OFFSET 0x59 57 + struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) 58 + { 59 + struct attribute_group *attr_group; 60 + struct attribute **attributes; 61 + struct sm_sysfs_attribute *vendor_attribute; 62 + 63 + int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, 64 + SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); 65 + 66 + char *vendor = kmalloc(vendor_len, GFP_KERNEL); 67 + memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); 68 + vendor[vendor_len] = 0; 69 + 70 + /* Initialize sysfs attributes */ 71 + vendor_attribute = 72 + kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); 73 + 74 + sysfs_attr_init(&vendor_attribute->dev_attr.attr); 75 + 76 + vendor_attribute->data = vendor; 77 + vendor_attribute->len = vendor_len; 78 + vendor_attribute->dev_attr.attr.name = "vendor"; 79 + vendor_attribute->dev_attr.attr.mode = S_IRUGO; 80 + vendor_attribute->dev_attr.show = sm_attr_show; 81 + 82 + 83 + /* Create array of pointers to the attributes */ 84 + attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), 85 + GFP_KERNEL); 86 + attributes[0] = &vendor_attribute->dev_attr.attr; 87 + 88 + /* Finally create the attribute group */ 89 + attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); 90 + attr_group->attrs = attributes; 91 + return attr_group; 92 + } 93 + 94 + void sm_delete_sysfs_attributes(struct sm_ftl *ftl) 95 + { 96 + struct attribute **attributes = ftl->disk_attributes->attrs; 97 + int i; 98 + 99 + for (i = 0; attributes[i] ; i++) { 100 + 101 + struct device_attribute *dev_attr = container_of(attributes[i], 102 + struct device_attribute, attr); 103 + 104 + struct sm_sysfs_attribute *sm_attr = 105 + container_of(dev_attr, 106 + struct sm_sysfs_attribute, dev_attr); 107 + 108 + kfree(sm_attr->data); 109 + kfree(sm_attr); 110 + } 111 + 112 + kfree(ftl->disk_attributes->attrs); 113 + kfree(ftl->disk_attributes); 114 + } 115 + 116 + 117 + /* ----------------------- oob helpers -------------------------------------- */ 118 + 119 + static int sm_get_lba(uint8_t *lba) 120 + { 121 + /* check fixed bits */ 122 + if ((lba[0] & 0xF8) != 0x10) 123 + return -2; 124 + 125 + /* check parity - endianess doesn't matter */ 126 + if (hweight16(*(uint16_t *)lba) & 1) 127 + return -2; 128 + 129 + return (lba[1] >> 1) | ((lba[0] & 0x07) << 7); 130 + } 131 + 132 + 133 + /* 134 + * Read LBA asscociated with block 135 + * returns -1, if block is erased 136 + * returns -2 if error happens 137 + */ 138 + static int sm_read_lba(struct sm_oob *oob) 139 + { 140 + static const uint32_t erased_pattern[4] = { 141 + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; 142 + 143 + uint16_t lba_test; 144 + int lba; 145 + 146 + /* First test for erased block */ 147 + if (!memcmp(oob, erased_pattern, SM_OOB_SIZE)) 148 + return -1; 149 + 150 + /* Now check is both copies of the LBA differ too much */ 151 + lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2; 152 + if (lba_test && !is_power_of_2(lba_test)) 153 + return -2; 154 + 155 + /* And read it */ 156 + lba = sm_get_lba(oob->lba_copy1); 157 + 158 + if (lba == -2) 159 + lba = sm_get_lba(oob->lba_copy2); 160 + 161 + return lba; 162 + } 163 + 164 + static void sm_write_lba(struct sm_oob *oob, uint16_t lba) 165 + { 166 + uint8_t tmp[2]; 167 + 168 + WARN_ON(lba >= 1000); 169 + 170 + tmp[0] = 0x10 | ((lba >> 7) & 0x07); 171 + tmp[1] = (lba << 1) & 0xFF; 172 + 173 + if (hweight16(*(uint16_t *)tmp) & 0x01) 174 + tmp[1] |= 1; 175 + 176 + oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0]; 177 + oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1]; 178 + } 179 + 180 + 181 + /* Make offset from parts */ 182 + static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) 183 + { 184 + WARN_ON(boffset & (SM_SECTOR_SIZE - 1)); 185 + WARN_ON(zone < 0 || zone >= ftl->zone_count); 186 + WARN_ON(block >= ftl->zone_size); 187 + WARN_ON(boffset >= ftl->block_size); 188 + 189 + if (block == -1) 190 + return -1; 191 + 192 + return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; 193 + } 194 + 195 + /* Breaks offset into parts */ 196 + static void sm_break_offset(struct sm_ftl *ftl, loff_t offset, 197 + int *zone, int *block, int *boffset) 198 + { 199 + *boffset = do_div(offset, ftl->block_size); 200 + *block = do_div(offset, ftl->max_lba); 201 + *zone = offset >= ftl->zone_count ? -1 : offset; 202 + } 203 + 204 + /* ---------------------- low level IO ------------------------------------- */ 205 + 206 + static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob) 207 + { 208 + uint8_t ecc[3]; 209 + 210 + __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); 211 + if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0) 212 + return -EIO; 213 + 214 + buffer += SM_SMALL_PAGE; 215 + 216 + __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); 217 + if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0) 218 + return -EIO; 219 + return 0; 220 + } 221 + 222 + /* Reads a sector + oob*/ 223 + static int sm_read_sector(struct sm_ftl *ftl, 224 + int zone, int block, int boffset, 225 + uint8_t *buffer, struct sm_oob *oob) 226 + { 227 + struct mtd_info *mtd = ftl->trans->mtd; 228 + struct mtd_oob_ops ops; 229 + struct sm_oob tmp_oob; 230 + int ret = -EIO; 231 + int try = 0; 232 + 233 + /* FTL can contain -1 entries that are by default filled with bits */ 234 + if (block == -1) { 235 + memset(buffer, 0xFF, SM_SECTOR_SIZE); 236 + return 0; 237 + } 238 + 239 + /* User might not need the oob, but we do for data vertification */ 240 + if (!oob) 241 + oob = &tmp_oob; 242 + 243 + ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 244 + ops.ooboffs = 0; 245 + ops.ooblen = SM_OOB_SIZE; 246 + ops.oobbuf = (void *)oob; 247 + ops.len = SM_SECTOR_SIZE; 248 + ops.datbuf = buffer; 249 + 250 + again: 251 + if (try++) { 252 + /* Avoid infinite recursion on CIS reads, sm_recheck_media 253 + won't help anyway */ 254 + if (zone == 0 && block == ftl->cis_block && boffset == 255 + ftl->cis_boffset) 256 + return ret; 257 + 258 + /* Test if media is stable */ 259 + if (try == 3 || sm_recheck_media(ftl)) 260 + return ret; 261 + } 262 + 263 + /* Unfortunelly, oob read will _always_ succeed, 264 + despite card removal..... */ 265 + ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 266 + 267 + /* Test for unknown errors */ 268 + if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { 269 + dbg("read of block %d at zone %d, failed due to error (%d)", 270 + block, zone, ret); 271 + goto again; 272 + } 273 + 274 + /* Do a basic test on the oob, to guard against returned garbage */ 275 + if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved)) 276 + goto again; 277 + 278 + /* This should never happen, unless there is a bug in the mtd driver */ 279 + WARN_ON(ops.oobretlen != SM_OOB_SIZE); 280 + WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); 281 + 282 + if (!buffer) 283 + return 0; 284 + 285 + /* Test if sector marked as bad */ 286 + if (!sm_sector_valid(oob)) { 287 + dbg("read of block %d at zone %d, failed because it is marked" 288 + " as bad" , block, zone); 289 + goto again; 290 + } 291 + 292 + /* Test ECC*/ 293 + if (ret == -EBADMSG || 294 + (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { 295 + 296 + dbg("read of block %d at zone %d, failed due to ECC error", 297 + block, zone); 298 + goto again; 299 + } 300 + 301 + return 0; 302 + } 303 + 304 + /* Writes a sector to media */ 305 + static int sm_write_sector(struct sm_ftl *ftl, 306 + int zone, int block, int boffset, 307 + uint8_t *buffer, struct sm_oob *oob) 308 + { 309 + struct mtd_oob_ops ops; 310 + struct mtd_info *mtd = ftl->trans->mtd; 311 + int ret; 312 + 313 + BUG_ON(ftl->readonly); 314 + 315 + if (zone == 0 && (block == ftl->cis_block || block == 0)) { 316 + dbg("attempted to write the CIS!"); 317 + return -EIO; 318 + } 319 + 320 + if (ftl->unstable) 321 + return -EIO; 322 + 323 + ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; 324 + ops.len = SM_SECTOR_SIZE; 325 + ops.datbuf = buffer; 326 + ops.ooboffs = 0; 327 + ops.ooblen = SM_OOB_SIZE; 328 + ops.oobbuf = (void *)oob; 329 + 330 + ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); 331 + 332 + /* Now we assume that hardware will catch write bitflip errors */ 333 + /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */ 334 + 335 + if (ret) { 336 + dbg("write to block %d at zone %d, failed with error %d", 337 + block, zone, ret); 338 + 339 + sm_recheck_media(ftl); 340 + return ret; 341 + } 342 + 343 + /* This should never happen, unless there is a bug in the driver */ 344 + WARN_ON(ops.oobretlen != SM_OOB_SIZE); 345 + WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); 346 + 347 + return 0; 348 + } 349 + 350 + /* ------------------------ block IO ------------------------------------- */ 351 + 352 + /* Write a block using data and lba, and invalid sector bitmap */ 353 + static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, 354 + int zone, int block, int lba, 355 + unsigned long invalid_bitmap) 356 + { 357 + struct sm_oob oob; 358 + int boffset; 359 + int retry = 0; 360 + 361 + /* Initialize the oob with requested values */ 362 + memset(&oob, 0xFF, SM_OOB_SIZE); 363 + sm_write_lba(&oob, lba); 364 + restart: 365 + if (ftl->unstable) 366 + return -EIO; 367 + 368 + for (boffset = 0; boffset < ftl->block_size; 369 + boffset += SM_SECTOR_SIZE) { 370 + 371 + oob.data_status = 0xFF; 372 + 373 + if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) { 374 + 375 + sm_printk("sector %d of block at LBA %d of zone %d" 376 + " coudn't be read, marking it as invalid", 377 + boffset / SM_SECTOR_SIZE, lba, zone); 378 + 379 + oob.data_status = 0; 380 + } 381 + 382 + if (ftl->smallpagenand) { 383 + __nand_calculate_ecc(buf + boffset, 384 + SM_SMALL_PAGE, oob.ecc1); 385 + 386 + __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE, 387 + SM_SMALL_PAGE, oob.ecc2); 388 + } 389 + if (!sm_write_sector(ftl, zone, block, boffset, 390 + buf + boffset, &oob)) 391 + continue; 392 + 393 + if (!retry) { 394 + 395 + /* If write fails. try to erase the block */ 396 + /* This is safe, because we never write in blocks 397 + that contain valuable data. 398 + This is intended to repair block that are marked 399 + as erased, but that isn't fully erased*/ 400 + 401 + if (sm_erase_block(ftl, zone, block, 0)) 402 + return -EIO; 403 + 404 + retry = 1; 405 + goto restart; 406 + } else { 407 + sm_mark_block_bad(ftl, zone, block); 408 + return -EIO; 409 + } 410 + } 411 + return 0; 412 + } 413 + 414 + 415 + /* Mark whole block at offset 'offs' as bad. */ 416 + static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) 417 + { 418 + struct sm_oob oob; 419 + int boffset; 420 + 421 + memset(&oob, 0xFF, SM_OOB_SIZE); 422 + oob.block_status = 0xF0; 423 + 424 + if (ftl->unstable) 425 + return; 426 + 427 + if (sm_recheck_media(ftl)) 428 + return; 429 + 430 + sm_printk("marking block %d of zone %d as bad", block, zone); 431 + 432 + /* We aren't checking the return value, because we don't care */ 433 + /* This also fails on fake xD cards, but I guess these won't expose 434 + any bad blocks till fail completly */ 435 + for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) 436 + sm_write_sector(ftl, zone, block, boffset, NULL, &oob); 437 + } 438 + 439 + /* 440 + * Erase a block within a zone 441 + * If erase succedes, it updates free block fifo, otherwise marks block as bad 442 + */ 443 + static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 444 + int put_free) 445 + { 446 + struct ftl_zone *zone = &ftl->zones[zone_num]; 447 + struct mtd_info *mtd = ftl->trans->mtd; 448 + struct erase_info erase; 449 + 450 + erase.mtd = mtd; 451 + erase.callback = sm_erase_callback; 452 + erase.addr = sm_mkoffset(ftl, zone_num, block, 0); 453 + erase.len = ftl->block_size; 454 + erase.priv = (u_long)ftl; 455 + 456 + if (ftl->unstable) 457 + return -EIO; 458 + 459 + BUG_ON(ftl->readonly); 460 + 461 + if (zone_num == 0 && (block == ftl->cis_block || block == 0)) { 462 + sm_printk("attempted to erase the CIS!"); 463 + return -EIO; 464 + } 465 + 466 + if (mtd->erase(mtd, &erase)) { 467 + sm_printk("erase of block %d in zone %d failed", 468 + block, zone_num); 469 + goto error; 470 + } 471 + 472 + if (erase.state == MTD_ERASE_PENDING) 473 + wait_for_completion(&ftl->erase_completion); 474 + 475 + if (erase.state != MTD_ERASE_DONE) { 476 + sm_printk("erase of block %d in zone %d failed after wait", 477 + block, zone_num); 478 + goto error; 479 + } 480 + 481 + if (put_free) 482 + kfifo_in(&zone->free_sectors, 483 + (const unsigned char *)&block, sizeof(block)); 484 + 485 + return 0; 486 + error: 487 + sm_mark_block_bad(ftl, zone_num, block); 488 + return -EIO; 489 + } 490 + 491 + static void sm_erase_callback(struct erase_info *self) 492 + { 493 + struct sm_ftl *ftl = (struct sm_ftl *)self->priv; 494 + complete(&ftl->erase_completion); 495 + } 496 + 497 + /* Throughtly test that block is valid. */ 498 + static int sm_check_block(struct sm_ftl *ftl, int zone, int block) 499 + { 500 + int boffset; 501 + struct sm_oob oob; 502 + int lbas[] = { -3, 0, 0, 0 }; 503 + int i = 0; 504 + int test_lba; 505 + 506 + 507 + /* First just check that block doesn't look fishy */ 508 + /* Only blocks that are valid or are sliced in two parts, are 509 + accepted */ 510 + for (boffset = 0; boffset < ftl->block_size; 511 + boffset += SM_SECTOR_SIZE) { 512 + 513 + /* This shoudn't happen anyway */ 514 + if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) 515 + return -2; 516 + 517 + test_lba = sm_read_lba(&oob); 518 + 519 + if (lbas[i] != test_lba) 520 + lbas[++i] = test_lba; 521 + 522 + /* If we found three different LBAs, something is fishy */ 523 + if (i == 3) 524 + return -EIO; 525 + } 526 + 527 + /* If the block is sliced (partialy erased usually) erase it */ 528 + if (i == 2) { 529 + sm_erase_block(ftl, zone, block, 1); 530 + return 1; 531 + } 532 + 533 + return 0; 534 + } 535 + 536 + /* ----------------- media scanning --------------------------------- */ 537 + static const struct chs_entry chs_table[] = { 538 + { 1, 125, 4, 4 }, 539 + { 2, 125, 4, 8 }, 540 + { 4, 250, 4, 8 }, 541 + { 8, 250, 4, 16 }, 542 + { 16, 500, 4, 16 }, 543 + { 32, 500, 8, 16 }, 544 + { 64, 500, 8, 32 }, 545 + { 128, 500, 16, 32 }, 546 + { 256, 1000, 16, 32 }, 547 + { 512, 1015, 32, 63 }, 548 + { 1024, 985, 33, 63 }, 549 + { 2048, 985, 33, 63 }, 550 + { 0 }, 551 + }; 552 + 553 + 554 + static const uint8_t cis_signature[] = { 555 + 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 556 + }; 557 + /* Find out media parameters. 558 + * This ideally has to be based on nand id, but for now device size is enough */ 559 + int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) 560 + { 561 + int i; 562 + int size_in_megs = mtd->size / (1024 * 1024); 563 + 564 + ftl->readonly = mtd->type == MTD_ROM; 565 + 566 + /* Manual settings for very old devices */ 567 + ftl->zone_count = 1; 568 + ftl->smallpagenand = 0; 569 + 570 + switch (size_in_megs) { 571 + case 1: 572 + /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/ 573 + ftl->zone_size = 256; 574 + ftl->max_lba = 250; 575 + ftl->block_size = 8 * SM_SECTOR_SIZE; 576 + ftl->smallpagenand = 1; 577 + 578 + break; 579 + case 2: 580 + /* 2 MiB flash SmartMedia (256 byte pages)*/ 581 + if (mtd->writesize == SM_SMALL_PAGE) { 582 + ftl->zone_size = 512; 583 + ftl->max_lba = 500; 584 + ftl->block_size = 8 * SM_SECTOR_SIZE; 585 + ftl->smallpagenand = 1; 586 + /* 2 MiB rom SmartMedia */ 587 + } else { 588 + 589 + if (!ftl->readonly) 590 + return -ENODEV; 591 + 592 + ftl->zone_size = 256; 593 + ftl->max_lba = 250; 594 + ftl->block_size = 16 * SM_SECTOR_SIZE; 595 + } 596 + break; 597 + case 4: 598 + /* 4 MiB flash/rom SmartMedia device */ 599 + ftl->zone_size = 512; 600 + ftl->max_lba = 500; 601 + ftl->block_size = 16 * SM_SECTOR_SIZE; 602 + break; 603 + case 8: 604 + /* 8 MiB flash/rom SmartMedia device */ 605 + ftl->zone_size = 1024; 606 + ftl->max_lba = 1000; 607 + ftl->block_size = 16 * SM_SECTOR_SIZE; 608 + } 609 + 610 + /* Minimum xD size is 16MiB. Also, all xD cards have standard zone 611 + sizes. SmartMedia cards exist up to 128 MiB and have same layout*/ 612 + if (size_in_megs >= 16) { 613 + ftl->zone_count = size_in_megs / 16; 614 + ftl->zone_size = 1024; 615 + ftl->max_lba = 1000; 616 + ftl->block_size = 32 * SM_SECTOR_SIZE; 617 + } 618 + 619 + /* Test for proper write,erase and oob sizes */ 620 + if (mtd->erasesize > ftl->block_size) 621 + return -ENODEV; 622 + 623 + if (mtd->writesize > SM_SECTOR_SIZE) 624 + return -ENODEV; 625 + 626 + if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE) 627 + return -ENODEV; 628 + 629 + if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) 630 + return -ENODEV; 631 + 632 + /* We use these functions for IO */ 633 + if (!mtd->read_oob || !mtd->write_oob) 634 + return -ENODEV; 635 + 636 + /* Find geometry information */ 637 + for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) { 638 + if (chs_table[i].size == size_in_megs) { 639 + ftl->cylinders = chs_table[i].cyl; 640 + ftl->heads = chs_table[i].head; 641 + ftl->sectors = chs_table[i].sec; 642 + return 0; 643 + } 644 + } 645 + 646 + sm_printk("media has unknown size : %dMiB", size_in_megs); 647 + ftl->cylinders = 985; 648 + ftl->heads = 33; 649 + ftl->sectors = 63; 650 + return 0; 651 + } 652 + 653 + /* Validate the CIS */ 654 + static int sm_read_cis(struct sm_ftl *ftl) 655 + { 656 + struct sm_oob oob; 657 + 658 + if (sm_read_sector(ftl, 659 + 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob)) 660 + return -EIO; 661 + 662 + if (!sm_sector_valid(&oob) || !sm_block_valid(&oob)) 663 + return -EIO; 664 + 665 + if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset, 666 + cis_signature, sizeof(cis_signature))) { 667 + return 0; 668 + } 669 + 670 + return -EIO; 671 + } 672 + 673 + /* Scan the media for the CIS */ 674 + static int sm_find_cis(struct sm_ftl *ftl) 675 + { 676 + struct sm_oob oob; 677 + int block, boffset; 678 + int block_found = 0; 679 + int cis_found = 0; 680 + 681 + /* Search for first valid block */ 682 + for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) { 683 + 684 + if (sm_read_sector(ftl, 0, block, 0, NULL, &oob)) 685 + continue; 686 + 687 + if (!sm_block_valid(&oob)) 688 + continue; 689 + block_found = 1; 690 + break; 691 + } 692 + 693 + if (!block_found) 694 + return -EIO; 695 + 696 + /* Search for first valid sector in this block */ 697 + for (boffset = 0 ; boffset < ftl->block_size; 698 + boffset += SM_SECTOR_SIZE) { 699 + 700 + if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob)) 701 + continue; 702 + 703 + if (!sm_sector_valid(&oob)) 704 + continue; 705 + break; 706 + } 707 + 708 + if (boffset == ftl->block_size) 709 + return -EIO; 710 + 711 + ftl->cis_block = block; 712 + ftl->cis_boffset = boffset; 713 + ftl->cis_page_offset = 0; 714 + 715 + cis_found = !sm_read_cis(ftl); 716 + 717 + if (!cis_found) { 718 + ftl->cis_page_offset = SM_SMALL_PAGE; 719 + cis_found = !sm_read_cis(ftl); 720 + } 721 + 722 + if (cis_found) { 723 + dbg("CIS block found at offset %x", 724 + block * ftl->block_size + 725 + boffset + ftl->cis_page_offset); 726 + return 0; 727 + } 728 + return -EIO; 729 + } 730 + 731 + /* Basic test to determine if underlying mtd device if functional */ 732 + static int sm_recheck_media(struct sm_ftl *ftl) 733 + { 734 + if (sm_read_cis(ftl)) { 735 + 736 + if (!ftl->unstable) { 737 + sm_printk("media unstable, not allowing writes"); 738 + ftl->unstable = 1; 739 + } 740 + return -EIO; 741 + } 742 + return 0; 743 + } 744 + 745 + /* Initialize a FTL zone */ 746 + static int sm_init_zone(struct sm_ftl *ftl, int zone_num) 747 + { 748 + struct ftl_zone *zone = &ftl->zones[zone_num]; 749 + struct sm_oob oob; 750 + uint16_t block; 751 + int lba; 752 + int i = 0; 753 + int len; 754 + 755 + dbg("initializing zone %d", zone_num); 756 + 757 + /* Allocate memory for FTL table */ 758 + zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL); 759 + 760 + if (!zone->lba_to_phys_table) 761 + return -ENOMEM; 762 + memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2); 763 + 764 + 765 + /* Allocate memory for free sectors FIFO */ 766 + if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) { 767 + kfree(zone->lba_to_phys_table); 768 + return -ENOMEM; 769 + } 770 + 771 + /* Now scan the zone */ 772 + for (block = 0 ; block < ftl->zone_size ; block++) { 773 + 774 + /* Skip blocks till the CIS (including) */ 775 + if (zone_num == 0 && block <= ftl->cis_block) 776 + continue; 777 + 778 + /* Read the oob of first sector */ 779 + if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) 780 + return -EIO; 781 + 782 + /* Test to see if block is erased. It is enough to test 783 + first sector, because erase happens in one shot */ 784 + if (sm_block_erased(&oob)) { 785 + kfifo_in(&zone->free_sectors, 786 + (unsigned char *)&block, 2); 787 + continue; 788 + } 789 + 790 + /* If block is marked as bad, skip it */ 791 + /* This assumes we can trust first sector*/ 792 + /* However the way the block valid status is defined, ensures 793 + very low probability of failure here */ 794 + if (!sm_block_valid(&oob)) { 795 + dbg("PH %04d <-> <marked bad>", block); 796 + continue; 797 + } 798 + 799 + 800 + lba = sm_read_lba(&oob); 801 + 802 + /* Invalid LBA means that block is damaged. */ 803 + /* We can try to erase it, or mark it as bad, but 804 + lets leave that to recovery application */ 805 + if (lba == -2 || lba >= ftl->max_lba) { 806 + dbg("PH %04d <-> LBA %04d(bad)", block, lba); 807 + continue; 808 + } 809 + 810 + 811 + /* If there is no collision, 812 + just put the sector in the FTL table */ 813 + if (zone->lba_to_phys_table[lba] < 0) { 814 + dbg_verbose("PH %04d <-> LBA %04d", block, lba); 815 + zone->lba_to_phys_table[lba] = block; 816 + continue; 817 + } 818 + 819 + sm_printk("collision" 820 + " of LBA %d between blocks %d and %d in zone %d", 821 + lba, zone->lba_to_phys_table[lba], block, zone_num); 822 + 823 + /* Test that this block is valid*/ 824 + if (sm_check_block(ftl, zone_num, block)) 825 + continue; 826 + 827 + /* Test now the old block */ 828 + if (sm_check_block(ftl, zone_num, 829 + zone->lba_to_phys_table[lba])) { 830 + zone->lba_to_phys_table[lba] = block; 831 + continue; 832 + } 833 + 834 + /* If both blocks are valid and share same LBA, it means that 835 + they hold different versions of same data. It not 836 + known which is more recent, thus just erase one of them 837 + */ 838 + sm_printk("both blocks are valid, erasing the later"); 839 + sm_erase_block(ftl, zone_num, block, 1); 840 + } 841 + 842 + dbg("zone initialized"); 843 + zone->initialized = 1; 844 + 845 + /* No free sectors, means that the zone is heavily damaged, write won't 846 + work, but it can still can be (partially) read */ 847 + if (!kfifo_len(&zone->free_sectors)) { 848 + sm_printk("no free blocks in zone %d", zone_num); 849 + return 0; 850 + } 851 + 852 + /* Randomize first block we write to */ 853 + get_random_bytes(&i, 2); 854 + i %= (kfifo_len(&zone->free_sectors) / 2); 855 + 856 + while (i--) { 857 + len = kfifo_out(&zone->free_sectors, 858 + (unsigned char *)&block, 2); 859 + WARN_ON(len != 2); 860 + kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2); 861 + } 862 + return 0; 863 + } 864 + 865 + /* Get and automaticly initialize an FTL mapping for one zone */ 866 + struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num) 867 + { 868 + struct ftl_zone *zone; 869 + int error; 870 + 871 + BUG_ON(zone_num >= ftl->zone_count); 872 + zone = &ftl->zones[zone_num]; 873 + 874 + if (!zone->initialized) { 875 + error = sm_init_zone(ftl, zone_num); 876 + 877 + if (error) 878 + return ERR_PTR(error); 879 + } 880 + return zone; 881 + } 882 + 883 + 884 + /* ----------------- cache handling ------------------------------------------*/ 885 + 886 + /* Initialize the one block cache */ 887 + void sm_cache_init(struct sm_ftl *ftl) 888 + { 889 + ftl->cache_data_invalid_bitmap = 0xFFFFFFFF; 890 + ftl->cache_clean = 1; 891 + ftl->cache_zone = -1; 892 + ftl->cache_block = -1; 893 + /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/ 894 + } 895 + 896 + /* Put sector in one block cache */ 897 + void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset) 898 + { 899 + memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE); 900 + clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap); 901 + ftl->cache_clean = 0; 902 + } 903 + 904 + /* Read a sector from the cache */ 905 + int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset) 906 + { 907 + if (test_bit(boffset / SM_SECTOR_SIZE, 908 + &ftl->cache_data_invalid_bitmap)) 909 + return -1; 910 + 911 + memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE); 912 + return 0; 913 + } 914 + 915 + /* Write the cache to hardware */ 916 + int sm_cache_flush(struct sm_ftl *ftl) 917 + { 918 + struct ftl_zone *zone; 919 + 920 + int sector_num; 921 + uint16_t write_sector; 922 + int zone_num = ftl->cache_zone; 923 + int block_num; 924 + 925 + if (ftl->cache_clean) 926 + return 0; 927 + 928 + if (ftl->unstable) 929 + return -EIO; 930 + 931 + BUG_ON(zone_num < 0); 932 + zone = &ftl->zones[zone_num]; 933 + block_num = zone->lba_to_phys_table[ftl->cache_block]; 934 + 935 + 936 + /* Try to read all unread areas of the cache block*/ 937 + for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap, 938 + ftl->block_size / SM_SECTOR_SIZE) { 939 + 940 + if (!sm_read_sector(ftl, 941 + zone_num, block_num, sector_num * SM_SECTOR_SIZE, 942 + ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL)) 943 + clear_bit(sector_num, 944 + &ftl->cache_data_invalid_bitmap); 945 + } 946 + restart: 947 + 948 + if (ftl->unstable) 949 + return -EIO; 950 + 951 + /* If there are no spare blocks, */ 952 + /* we could still continue by erasing/writing the current block, 953 + but for such worn out media it doesn't worth the trouble, 954 + and the dangers */ 955 + if (kfifo_out(&zone->free_sectors, 956 + (unsigned char *)&write_sector, 2) != 2) { 957 + dbg("no free sectors for write!"); 958 + return -EIO; 959 + } 960 + 961 + 962 + if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector, 963 + ftl->cache_block, ftl->cache_data_invalid_bitmap)) 964 + goto restart; 965 + 966 + /* Update the FTL table */ 967 + zone->lba_to_phys_table[ftl->cache_block] = write_sector; 968 + 969 + /* Write succesfull, so erase and free the old block */ 970 + if (block_num > 0) 971 + sm_erase_block(ftl, zone_num, block_num, 1); 972 + 973 + sm_cache_init(ftl); 974 + return 0; 975 + } 976 + 977 + 978 + /* flush timer, runs a second after last write */ 979 + static void sm_cache_flush_timer(unsigned long data) 980 + { 981 + struct sm_ftl *ftl = (struct sm_ftl *)data; 982 + queue_work(cache_flush_workqueue, &ftl->flush_work); 983 + } 984 + 985 + /* cache flush work, kicked by timer */ 986 + static void sm_cache_flush_work(struct work_struct *work) 987 + { 988 + struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work); 989 + mutex_lock(&ftl->mutex); 990 + sm_cache_flush(ftl); 991 + mutex_unlock(&ftl->mutex); 992 + return; 993 + } 994 + 995 + /* ---------------- outside interface -------------------------------------- */ 996 + 997 + /* outside interface: read a sector */ 998 + static int sm_read(struct mtd_blktrans_dev *dev, 999 + unsigned long sect_no, char *buf) 1000 + { 1001 + struct sm_ftl *ftl = dev->priv; 1002 + struct ftl_zone *zone; 1003 + int error = 0, in_cache = 0; 1004 + int zone_num, block, boffset; 1005 + 1006 + sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset); 1007 + mutex_lock(&ftl->mutex); 1008 + 1009 + 1010 + zone = sm_get_zone(ftl, zone_num); 1011 + if (IS_ERR(zone)) { 1012 + error = PTR_ERR(zone); 1013 + goto unlock; 1014 + } 1015 + 1016 + /* Have to look at cache first */ 1017 + if (ftl->cache_zone == zone_num && ftl->cache_block == block) { 1018 + in_cache = 1; 1019 + if (!sm_cache_get(ftl, buf, boffset)) 1020 + goto unlock; 1021 + } 1022 + 1023 + /* Translate the block and return if doesn't exist in the table */ 1024 + block = zone->lba_to_phys_table[block]; 1025 + 1026 + if (block == -1) { 1027 + memset(buf, 0xFF, SM_SECTOR_SIZE); 1028 + goto unlock; 1029 + } 1030 + 1031 + if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) { 1032 + error = -EIO; 1033 + goto unlock; 1034 + } 1035 + 1036 + if (in_cache) 1037 + sm_cache_put(ftl, buf, boffset); 1038 + unlock: 1039 + mutex_unlock(&ftl->mutex); 1040 + return error; 1041 + } 1042 + 1043 + /* outside interface: write a sector */ 1044 + static int sm_write(struct mtd_blktrans_dev *dev, 1045 + unsigned long sec_no, char *buf) 1046 + { 1047 + struct sm_ftl *ftl = dev->priv; 1048 + struct ftl_zone *zone; 1049 + int error, zone_num, block, boffset; 1050 + 1051 + BUG_ON(ftl->readonly); 1052 + sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); 1053 + 1054 + /* No need in flush thread running now */ 1055 + del_timer(&ftl->timer); 1056 + mutex_lock(&ftl->mutex); 1057 + 1058 + zone = sm_get_zone(ftl, zone_num); 1059 + if (IS_ERR(zone)) { 1060 + error = PTR_ERR(zone); 1061 + goto unlock; 1062 + } 1063 + 1064 + /* If entry is not in cache, flush it */ 1065 + if (ftl->cache_block != block || ftl->cache_zone != zone_num) { 1066 + 1067 + error = sm_cache_flush(ftl); 1068 + if (error) 1069 + goto unlock; 1070 + 1071 + ftl->cache_block = block; 1072 + ftl->cache_zone = zone_num; 1073 + } 1074 + 1075 + sm_cache_put(ftl, buf, boffset); 1076 + unlock: 1077 + mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout)); 1078 + mutex_unlock(&ftl->mutex); 1079 + return error; 1080 + } 1081 + 1082 + /* outside interface: flush everything */ 1083 + static int sm_flush(struct mtd_blktrans_dev *dev) 1084 + { 1085 + struct sm_ftl *ftl = dev->priv; 1086 + int retval; 1087 + 1088 + mutex_lock(&ftl->mutex); 1089 + retval = sm_cache_flush(ftl); 1090 + mutex_unlock(&ftl->mutex); 1091 + return retval; 1092 + } 1093 + 1094 + /* outside interface: device is released */ 1095 + static int sm_release(struct mtd_blktrans_dev *dev) 1096 + { 1097 + struct sm_ftl *ftl = dev->priv; 1098 + 1099 + mutex_lock(&ftl->mutex); 1100 + del_timer_sync(&ftl->timer); 1101 + cancel_work_sync(&ftl->flush_work); 1102 + sm_cache_flush(ftl); 1103 + mutex_unlock(&ftl->mutex); 1104 + return 0; 1105 + } 1106 + 1107 + /* outside interface: get geometry */ 1108 + static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) 1109 + { 1110 + struct sm_ftl *ftl = dev->priv; 1111 + geo->heads = ftl->heads; 1112 + geo->sectors = ftl->sectors; 1113 + geo->cylinders = ftl->cylinders; 1114 + return 0; 1115 + } 1116 + 1117 + /* external interface: main initialization function */ 1118 + static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 1119 + { 1120 + struct mtd_blktrans_dev *trans; 1121 + struct sm_ftl *ftl; 1122 + 1123 + /* Allocate & initialize our private structure */ 1124 + ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL); 1125 + if (!ftl) 1126 + goto error1; 1127 + 1128 + 1129 + mutex_init(&ftl->mutex); 1130 + setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl); 1131 + INIT_WORK(&ftl->flush_work, sm_cache_flush_work); 1132 + init_completion(&ftl->erase_completion); 1133 + 1134 + /* Read media information */ 1135 + if (sm_get_media_info(ftl, mtd)) { 1136 + dbg("found unsupported mtd device, aborting"); 1137 + goto error2; 1138 + } 1139 + 1140 + 1141 + /* Allocate temporary CIS buffer for read retry support */ 1142 + ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); 1143 + if (!ftl->cis_buffer) 1144 + goto error2; 1145 + 1146 + /* Allocate zone array, it will be initialized on demand */ 1147 + ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count, 1148 + GFP_KERNEL); 1149 + if (!ftl->zones) 1150 + goto error3; 1151 + 1152 + /* Allocate the cache*/ 1153 + ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL); 1154 + 1155 + if (!ftl->cache_data) 1156 + goto error4; 1157 + 1158 + sm_cache_init(ftl); 1159 + 1160 + 1161 + /* Allocate upper layer structure and initialize it */ 1162 + trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); 1163 + if (!trans) 1164 + goto error5; 1165 + 1166 + ftl->trans = trans; 1167 + trans->priv = ftl; 1168 + 1169 + trans->tr = tr; 1170 + trans->mtd = mtd; 1171 + trans->devnum = -1; 1172 + trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9; 1173 + trans->readonly = ftl->readonly; 1174 + 1175 + if (sm_find_cis(ftl)) { 1176 + dbg("CIS not found on mtd device, aborting"); 1177 + goto error6; 1178 + } 1179 + 1180 + ftl->disk_attributes = sm_create_sysfs_attributes(ftl); 1181 + trans->disk_attributes = ftl->disk_attributes; 1182 + 1183 + sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", 1184 + (int)(mtd->size / (1024 * 1024)), mtd->index); 1185 + 1186 + dbg("FTL layout:"); 1187 + dbg("%d zone(s), each consists of %d blocks (+%d spares)", 1188 + ftl->zone_count, ftl->max_lba, 1189 + ftl->zone_size - ftl->max_lba); 1190 + dbg("each block consists of %d bytes", 1191 + ftl->block_size); 1192 + 1193 + 1194 + /* Register device*/ 1195 + if (add_mtd_blktrans_dev(trans)) { 1196 + dbg("error in mtdblktrans layer"); 1197 + goto error6; 1198 + } 1199 + return; 1200 + error6: 1201 + kfree(trans); 1202 + error5: 1203 + kfree(ftl->cache_data); 1204 + error4: 1205 + kfree(ftl->zones); 1206 + error3: 1207 + kfree(ftl->cis_buffer); 1208 + error2: 1209 + kfree(ftl); 1210 + error1: 1211 + return; 1212 + } 1213 + 1214 + /* main interface: device {surprise,} removal */ 1215 + static void sm_remove_dev(struct mtd_blktrans_dev *dev) 1216 + { 1217 + struct sm_ftl *ftl = dev->priv; 1218 + int i; 1219 + 1220 + del_mtd_blktrans_dev(dev); 1221 + ftl->trans = NULL; 1222 + 1223 + for (i = 0 ; i < ftl->zone_count; i++) { 1224 + 1225 + if (!ftl->zones[i].initialized) 1226 + continue; 1227 + 1228 + kfree(ftl->zones[i].lba_to_phys_table); 1229 + kfifo_free(&ftl->zones[i].free_sectors); 1230 + } 1231 + 1232 + sm_delete_sysfs_attributes(ftl); 1233 + kfree(ftl->cis_buffer); 1234 + kfree(ftl->zones); 1235 + kfree(ftl->cache_data); 1236 + kfree(ftl); 1237 + } 1238 + 1239 + static struct mtd_blktrans_ops sm_ftl_ops = { 1240 + .name = "smblk", 1241 + .major = -1, 1242 + .part_bits = SM_FTL_PARTN_BITS, 1243 + .blksize = SM_SECTOR_SIZE, 1244 + .getgeo = sm_getgeo, 1245 + 1246 + .add_mtd = sm_add_mtd, 1247 + .remove_dev = sm_remove_dev, 1248 + 1249 + .readsect = sm_read, 1250 + .writesect = sm_write, 1251 + 1252 + .flush = sm_flush, 1253 + .release = sm_release, 1254 + 1255 + .owner = THIS_MODULE, 1256 + }; 1257 + 1258 + static __init int sm_module_init(void) 1259 + { 1260 + int error = 0; 1261 + cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1262 + 1263 + if (IS_ERR(cache_flush_workqueue)) 1264 + return PTR_ERR(cache_flush_workqueue); 1265 + 1266 + error = register_mtd_blktrans(&sm_ftl_ops); 1267 + if (error) 1268 + destroy_workqueue(cache_flush_workqueue); 1269 + return error; 1270 + 1271 + } 1272 + 1273 + static void __exit sm_module_exit(void) 1274 + { 1275 + destroy_workqueue(cache_flush_workqueue); 1276 + deregister_mtd_blktrans(&sm_ftl_ops); 1277 + } 1278 + 1279 + module_init(sm_module_init); 1280 + module_exit(sm_module_exit); 1281 + 1282 + MODULE_LICENSE("GPL"); 1283 + MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1284 + MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
+94
drivers/mtd/sm_ftl.h
··· 1 + /* 2 + * Copyright © 2009 - Maxim Levitsky 3 + * SmartMedia/xD translation layer 4 + * 5 + * Based loosly on ssfdc.c which is 6 + * © 2005 Eptar srl 7 + * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/mtd/blktrans.h> 15 + #include <linux/kfifo.h> 16 + #include <linux/sched.h> 17 + #include <linux/completion.h> 18 + #include <linux/mtd/mtd.h> 19 + 20 + 21 + 22 + struct ftl_zone { 23 + int initialized; 24 + int16_t *lba_to_phys_table; /* LBA to physical table */ 25 + struct kfifo free_sectors; /* queue of free sectors */ 26 + }; 27 + 28 + struct sm_ftl { 29 + struct mtd_blktrans_dev *trans; 30 + 31 + struct mutex mutex; /* protects the structure */ 32 + struct ftl_zone *zones; /* FTL tables for each zone */ 33 + 34 + /* Media information */ 35 + int block_size; /* block size in bytes */ 36 + int zone_size; /* zone size in blocks */ 37 + int zone_count; /* number of zones */ 38 + int max_lba; /* maximum lba in a zone */ 39 + int smallpagenand; /* 256 bytes/page nand */ 40 + int readonly; /* is FS readonly */ 41 + int unstable; 42 + int cis_block; /* CIS block location */ 43 + int cis_boffset; /* CIS offset in the block */ 44 + int cis_page_offset; /* CIS offset in the page */ 45 + void *cis_buffer; /* tmp buffer for cis reads */ 46 + 47 + /* Cache */ 48 + int cache_block; /* block number of cached block */ 49 + int cache_zone; /* zone of cached block */ 50 + unsigned char *cache_data; /* cached block data */ 51 + long unsigned int cache_data_invalid_bitmap; 52 + int cache_clean; 53 + struct work_struct flush_work; 54 + struct timer_list timer; 55 + 56 + /* Async erase stuff */ 57 + struct completion erase_completion; 58 + 59 + /* Geometry stuff */ 60 + int heads; 61 + int sectors; 62 + int cylinders; 63 + 64 + struct attribute_group *disk_attributes; 65 + }; 66 + 67 + struct chs_entry { 68 + unsigned long size; 69 + unsigned short cyl; 70 + unsigned char head; 71 + unsigned char sec; 72 + }; 73 + 74 + 75 + #define SM_FTL_PARTN_BITS 3 76 + 77 + #define sm_printk(format, ...) \ 78 + printk(KERN_WARNING "sm_ftl" ": " format "\n", ## __VA_ARGS__) 79 + 80 + #define dbg(format, ...) \ 81 + if (debug) \ 82 + printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__) 83 + 84 + #define dbg_verbose(format, ...) \ 85 + if (debug > 1) \ 86 + printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__) 87 + 88 + 89 + static void sm_erase_callback(struct erase_info *self); 90 + static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 91 + int put_free); 92 + static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block); 93 + 94 + static int sm_recheck_media(struct sm_ftl *ftl);
-1
drivers/mtd/ssfdc.c
··· 375 375 376 376 del_mtd_blktrans_dev(dev); 377 377 kfree(ssfdc->logic_block_map); 378 - kfree(ssfdc); 379 378 } 380 379 381 380 static int ssfdcr_readsect(struct mtd_blktrans_dev *dev,
+1 -2
drivers/mtd/tests/mtd_pagetest.c
··· 480 480 { 481 481 int i, bad = 0; 482 482 483 - bbt = kmalloc(ebcnt, GFP_KERNEL); 483 + bbt = kzalloc(ebcnt, GFP_KERNEL); 484 484 if (!bbt) { 485 485 printk(PRINT_PREF "error: cannot allocate memory\n"); 486 486 return -ENOMEM; 487 487 } 488 - memset(bbt, 0 , ebcnt); 489 488 490 489 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 491 490 for (i = 0; i < ebcnt; ++i) {
+1 -2
drivers/mtd/tests/mtd_readtest.c
··· 141 141 { 142 142 int i, bad = 0; 143 143 144 - bbt = kmalloc(ebcnt, GFP_KERNEL); 144 + bbt = kzalloc(ebcnt, GFP_KERNEL); 145 145 if (!bbt) { 146 146 printk(PRINT_PREF "error: cannot allocate memory\n"); 147 147 return -ENOMEM; 148 148 } 149 - memset(bbt, 0 , ebcnt); 150 149 151 150 /* NOR flash does not implement block_isbad */ 152 151 if (mtd->block_isbad == NULL)
+1 -2
drivers/mtd/tests/mtd_speedtest.c
··· 295 295 { 296 296 int i, bad = 0; 297 297 298 - bbt = kmalloc(ebcnt, GFP_KERNEL); 298 + bbt = kzalloc(ebcnt, GFP_KERNEL); 299 299 if (!bbt) { 300 300 printk(PRINT_PREF "error: cannot allocate memory\n"); 301 301 return -ENOMEM; 302 302 } 303 - memset(bbt, 0 , ebcnt); 304 303 305 304 /* NOR flash does not implement block_isbad */ 306 305 if (mtd->block_isbad == NULL)
+1 -2
drivers/mtd/tests/mtd_stresstest.c
··· 221 221 { 222 222 int i, bad = 0; 223 223 224 - bbt = kmalloc(ebcnt, GFP_KERNEL); 224 + bbt = kzalloc(ebcnt, GFP_KERNEL); 225 225 if (!bbt) { 226 226 printk(PRINT_PREF "error: cannot allocate memory\n"); 227 227 return -ENOMEM; 228 228 } 229 - memset(bbt, 0 , ebcnt); 230 229 231 230 /* NOR flash does not implement block_isbad */ 232 231 if (mtd->block_isbad == NULL)
+1 -2
drivers/mtd/tests/mtd_subpagetest.c
··· 354 354 { 355 355 int i, bad = 0; 356 356 357 - bbt = kmalloc(ebcnt, GFP_KERNEL); 357 + bbt = kzalloc(ebcnt, GFP_KERNEL); 358 358 if (!bbt) { 359 359 printk(PRINT_PREF "error: cannot allocate memory\n"); 360 360 return -ENOMEM; 361 361 } 362 - memset(bbt, 0 , ebcnt); 363 362 364 363 printk(PRINT_PREF "scanning for bad eraseblocks\n"); 365 364 for (i = 0; i < ebcnt; ++i) {
+1 -2
fs/jffs2/background.c
··· 23 23 24 24 void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) 25 25 { 26 - spin_lock(&c->erase_completion_lock); 26 + assert_spin_locked(&c->erase_completion_lock); 27 27 if (c->gc_task && jffs2_thread_should_wake(c)) 28 28 send_sig(SIGHUP, c->gc_task, 1); 29 - spin_unlock(&c->erase_completion_lock); 30 29 } 31 30 32 31 /* This must only ever be called when no GC thread is currently running */
+8 -4
fs/jffs2/erase.c
··· 103 103 jffs2_erase_failed(c, jeb, bad_offset); 104 104 } 105 105 106 - void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) 106 + int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) 107 107 { 108 108 struct jffs2_eraseblock *jeb; 109 + int work_done = 0; 109 110 110 111 mutex_lock(&c->erase_free_sem); 111 112 ··· 122 121 mutex_unlock(&c->erase_free_sem); 123 122 jffs2_mark_erased_block(c, jeb); 124 123 124 + work_done++; 125 125 if (!--count) { 126 126 D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); 127 127 goto done; ··· 159 157 mutex_unlock(&c->erase_free_sem); 160 158 done: 161 159 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); 160 + return work_done; 162 161 } 163 162 164 163 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) ··· 168 165 mutex_lock(&c->erase_free_sem); 169 166 spin_lock(&c->erase_completion_lock); 170 167 list_move_tail(&jeb->list, &c->erase_complete_list); 168 + /* Wake the GC thread to mark them clean */ 169 + jffs2_garbage_collect_trigger(c); 171 170 spin_unlock(&c->erase_completion_lock); 172 171 mutex_unlock(&c->erase_free_sem); 173 - /* Ensure that kupdated calls us again to mark them clean */ 174 - jffs2_erase_pending_trigger(c); 172 + wake_up(&c->erase_wait); 175 173 } 176 174 177 175 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) ··· 491 487 492 488 refile: 493 489 /* Stick it back on the list from whence it came and come back later */ 494 - jffs2_erase_pending_trigger(c); 495 490 mutex_lock(&c->erase_free_sem); 496 491 spin_lock(&c->erase_completion_lock); 492 + jffs2_garbage_collect_trigger(c); 497 493 list_move(&jeb->list, &c->erase_complete_list); 498 494 spin_unlock(&c->erase_completion_lock); 499 495 mutex_unlock(&c->erase_free_sem);
+5 -5
fs/jffs2/fs.c
··· 313 313 case S_IFBLK: 314 314 case S_IFCHR: 315 315 /* Read the device numbers from the media */ 316 - if (f->metadata->size != sizeof(jdev.old) && 317 - f->metadata->size != sizeof(jdev.new)) { 316 + if (f->metadata->size != sizeof(jdev.old_id) && 317 + f->metadata->size != sizeof(jdev.new_id)) { 318 318 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); 319 319 goto error_io; 320 320 } ··· 325 325 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); 326 326 goto error; 327 327 } 328 - if (f->metadata->size == sizeof(jdev.old)) 329 - rdev = old_decode_dev(je16_to_cpu(jdev.old)); 328 + if (f->metadata->size == sizeof(jdev.old_id)) 329 + rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); 330 330 else 331 - rdev = new_decode_dev(je32_to_cpu(jdev.new)); 331 + rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); 332 332 333 333 case S_IFSOCK: 334 334 case S_IFIFO:
+15 -2
fs/jffs2/gc.c
··· 214 214 return ret; 215 215 } 216 216 217 + /* If there are any blocks which need erasing, erase them now */ 218 + if (!list_empty(&c->erase_complete_list) || 219 + !list_empty(&c->erase_pending_list)) { 220 + spin_unlock(&c->erase_completion_lock); 221 + D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n")); 222 + if (jffs2_erase_pending_blocks(c, 1)) { 223 + mutex_unlock(&c->alloc_sem); 224 + return 0; 225 + } 226 + D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n")); 227 + spin_lock(&c->erase_completion_lock); 228 + } 229 + 217 230 /* First, work out which block we're garbage-collecting */ 218 231 jeb = c->gcblock; 219 232 ··· 235 222 236 223 if (!jeb) { 237 224 /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */ 238 - if (!list_empty(&c->erase_pending_list)) { 225 + if (c->nr_erasing_blocks) { 239 226 spin_unlock(&c->erase_completion_lock); 240 227 mutex_unlock(&c->alloc_sem); 241 228 return -EAGAIN; ··· 448 435 list_add_tail(&c->gcblock->list, &c->erase_pending_list); 449 436 c->gcblock = NULL; 450 437 c->nr_erasing_blocks++; 451 - jffs2_erase_pending_trigger(c); 438 + jffs2_garbage_collect_trigger(c); 452 439 } 453 440 spin_unlock(&c->erase_completion_lock); 454 441
+5 -5
fs/jffs2/nodelist.h
··· 312 312 static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev) 313 313 { 314 314 if (old_valid_dev(rdev)) { 315 - jdev->old = cpu_to_je16(old_encode_dev(rdev)); 316 - return sizeof(jdev->old); 315 + jdev->old_id = cpu_to_je16(old_encode_dev(rdev)); 316 + return sizeof(jdev->old_id); 317 317 } else { 318 - jdev->new = cpu_to_je32(new_encode_dev(rdev)); 319 - return sizeof(jdev->new); 318 + jdev->new_id = cpu_to_je32(new_encode_dev(rdev)); 319 + return sizeof(jdev->new_id); 320 320 } 321 321 } 322 322 ··· 464 464 int jffs2_do_mount_fs(struct jffs2_sb_info *c); 465 465 466 466 /* erase.c */ 467 - void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); 467 + int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); 468 468 void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 469 469 470 470 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+23 -5
fs/jffs2/nodemgmt.c
··· 116 116 117 117 ret = jffs2_garbage_collect_pass(c); 118 118 119 - if (ret == -EAGAIN) 120 - jffs2_erase_pending_blocks(c, 1); 121 - else if (ret) 119 + if (ret == -EAGAIN) { 120 + spin_lock(&c->erase_completion_lock); 121 + if (c->nr_erasing_blocks && 122 + list_empty(&c->erase_pending_list) && 123 + list_empty(&c->erase_complete_list)) { 124 + DECLARE_WAITQUEUE(wait, current); 125 + set_current_state(TASK_UNINTERRUPTIBLE); 126 + add_wait_queue(&c->erase_wait, &wait); 127 + D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__)); 128 + spin_unlock(&c->erase_completion_lock); 129 + 130 + schedule(); 131 + } else 132 + spin_unlock(&c->erase_completion_lock); 133 + } else if (ret) 122 134 return ret; 123 135 124 136 cond_resched(); ··· 229 217 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); 230 218 list_move_tail(&ejeb->list, &c->erase_pending_list); 231 219 c->nr_erasing_blocks++; 232 - jffs2_erase_pending_trigger(c); 220 + jffs2_garbage_collect_trigger(c); 233 221 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", 234 222 ejeb->offset)); 235 223 } ··· 481 469 void jffs2_complete_reservation(struct jffs2_sb_info *c) 482 470 { 483 471 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); 472 + spin_lock(&c->erase_completion_lock); 484 473 jffs2_garbage_collect_trigger(c); 474 + spin_unlock(&c->erase_completion_lock); 485 475 mutex_unlock(&c->alloc_sem); 486 476 } 487 477 ··· 625 611 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); 626 612 list_add_tail(&jeb->list, &c->erase_pending_list); 627 613 c->nr_erasing_blocks++; 628 - jffs2_erase_pending_trigger(c); 614 + jffs2_garbage_collect_trigger(c); 629 615 } else { 630 616 /* Sometimes, however, we leave it elsewhere so it doesn't get 631 617 immediately reused, and we spread the load a bit. */ ··· 745 731 uint32_t dirty; 746 732 int nr_very_dirty = 0; 747 733 struct jffs2_eraseblock *jeb; 734 + 735 + if (!list_empty(&c->erase_complete_list) || 736 + !list_empty(&c->erase_pending_list)) 737 + return 1; 748 738 749 739 if (c->unchecked_size) { 750 740 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+1 -2
fs/jffs2/os-linux.h
··· 140 140 141 141 #endif /* WRITEBUFFER */ 142 142 143 - /* erase.c */ 144 - static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c) 143 + static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c) 145 144 { 146 145 OFNI_BS_2SFFJ(c)->s_dirt = 1; 147 146 }
+3 -1
fs/jffs2/scan.c
··· 260 260 ret = -EIO; 261 261 goto out; 262 262 } 263 - jffs2_erase_pending_trigger(c); 263 + spin_lock(&c->erase_completion_lock); 264 + jffs2_garbage_collect_trigger(c); 265 + spin_unlock(&c->erase_completion_lock); 264 266 } 265 267 ret = 0; 266 268 out:
-2
fs/jffs2/super.c
··· 63 63 64 64 if (!(sb->s_flags & MS_RDONLY)) { 65 65 D1(printk(KERN_DEBUG "jffs2_write_super()\n")); 66 - jffs2_garbage_collect_trigger(c); 67 - jffs2_erase_pending_blocks(c, 0); 68 66 jffs2_flush_wbuf_gc(c, 0); 69 67 } 70 68
+4 -4
fs/jffs2/wbuf.c
··· 84 84 struct jffs2_inodirty *new; 85 85 86 86 /* Mark the superblock dirty so that kupdated will flush... */ 87 - jffs2_erase_pending_trigger(c); 87 + jffs2_dirty_trigger(c); 88 88 89 89 if (jffs2_wbuf_pending_for_ino(c, ino)) 90 90 return; ··· 121 121 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); 122 122 list_add_tail(&jeb->list, &c->erase_pending_list); 123 123 c->nr_erasing_blocks++; 124 - jffs2_erase_pending_trigger(c); 124 + jffs2_garbage_collect_trigger(c); 125 125 } else { 126 126 /* Sometimes, however, we leave it elsewhere so it doesn't get 127 127 immediately reused, and we spread the load a bit. */ ··· 152 152 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); 153 153 list_add(&jeb->list, &c->erase_pending_list); 154 154 c->nr_erasing_blocks++; 155 - jffs2_erase_pending_trigger(c); 155 + jffs2_garbage_collect_trigger(c); 156 156 } 157 157 158 158 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { ··· 543 543 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 544 544 list_move(&jeb->list, &c->erase_pending_list); 545 545 c->nr_erasing_blocks++; 546 - jffs2_erase_pending_trigger(c); 546 + jffs2_garbage_collect_trigger(c); 547 547 } 548 548 549 549 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+2 -2
include/linux/jffs2.h
··· 215 215 216 216 /* Data payload for device nodes. */ 217 217 union jffs2_device_node { 218 - jint16_t old; 219 - jint32_t new; 218 + jint16_t old_id; 219 + jint32_t new_id; 220 220 }; 221 221 222 222 #endif /* __LINUX_JFFS2_H__ */
+10 -5
include/linux/mtd/blktrans.h
··· 9 9 #define __MTD_TRANS_H__ 10 10 11 11 #include <linux/mutex.h> 12 + #include <linux/kref.h> 13 + #include <linux/sysfs.h> 12 14 13 15 struct hd_geometry; 14 16 struct mtd_info; ··· 26 24 int devnum; 27 25 unsigned long size; 28 26 int readonly; 29 - void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */ 27 + int open; 28 + struct kref ref; 29 + struct gendisk *disk; 30 + struct attribute_group *disk_attributes; 31 + struct task_struct *thread; 32 + struct request_queue *rq; 33 + spinlock_t queue_lock; 34 + void *priv; 30 35 }; 31 - 32 - struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */ 33 36 34 37 struct mtd_blktrans_ops { 35 38 char *name; ··· 67 60 struct list_head devs; 68 61 struct list_head list; 69 62 struct module *owner; 70 - 71 - struct mtd_blkcore_priv *blkcore_priv; 72 63 }; 73 64 74 65 extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
+20 -8
include/linux/mtd/cfi.h
··· 253 253 #define P_ID_MITSUBISHI_STD 0x0100 254 254 #define P_ID_MITSUBISHI_EXT 0x0101 255 255 #define P_ID_SST_PAGE 0x0102 256 + #define P_ID_SST_OLD 0x0701 256 257 #define P_ID_INTEL_PERFORMANCE 0x0200 257 258 #define P_ID_INTEL_DATA 0x0210 258 259 #define P_ID_RESERVED 0xffff ··· 298 297 * and 32bit devices on 16 bit busses 299 298 * set the low bit of the alternating bit sequence of the address. 300 299 */ 301 - if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa)) 300 + if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa)) 302 301 addr |= (type >> 1)*interleave; 303 302 304 303 return addr; ··· 516 515 void* param; 517 516 }; 518 517 519 - #define CFI_MFR_ANY 0xffff 520 - #define CFI_ID_ANY 0xffff 518 + #define CFI_MFR_ANY 0xFFFF 519 + #define CFI_ID_ANY 0xFFFF 520 + #define CFI_MFR_CONTINUATION 0x007F 521 521 522 - #define CFI_MFR_AMD 0x0001 523 - #define CFI_MFR_INTEL 0x0089 524 - #define CFI_MFR_ATMEL 0x001F 525 - #define CFI_MFR_SAMSUNG 0x00EC 526 - #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 522 + #define CFI_MFR_AMD 0x0001 523 + #define CFI_MFR_ATMEL 0x001F 524 + #define CFI_MFR_EON 0x001C 525 + #define CFI_MFR_FUJITSU 0x0004 526 + #define CFI_MFR_HYUNDAI 0x00AD 527 + #define CFI_MFR_INTEL 0x0089 528 + #define CFI_MFR_MACRONIX 0x00C2 529 + #define CFI_MFR_NEC 0x0010 530 + #define CFI_MFR_PMC 0x009D 531 + #define CFI_MFR_SAMSUNG 0x00EC 532 + #define CFI_MFR_SHARP 0x00B0 533 + #define CFI_MFR_SST 0x00BF 534 + #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 535 + #define CFI_MFR_TOSHIBA 0x0098 536 + #define CFI_MFR_WINBOND 0x00DA 527 537 528 538 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 529 539
+2 -2
include/linux/mtd/flashchip.h
··· 15 15 * has asm/spinlock.h, or 2.4, which has linux/spinlock.h 16 16 */ 17 17 #include <linux/sched.h> 18 + #include <linux/mutex.h> 18 19 19 20 typedef enum { 20 21 FL_READY, ··· 75 74 unsigned int erase_suspended:1; 76 75 unsigned long in_progress_block_addr; 77 76 78 - spinlock_t *mutex; 79 - spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */ 77 + struct mutex mutex; 80 78 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip 81 79 to be ready */ 82 80 int word_write_time;
+3
include/linux/mtd/map.h
··· 7 7 #include <linux/types.h> 8 8 #include <linux/list.h> 9 9 #include <linux/string.h> 10 + #include <linux/bug.h> 10 11 11 12 #include <linux/mtd/compatmac.h> 12 13 ··· 387 386 #endif 388 387 else if (map_bankwidth_is_large(map)) 389 388 memcpy_fromio(r.x, map->virt+ofs, map->bankwidth); 389 + else 390 + BUG(); 390 391 391 392 return r; 392 393 }
+3 -5
include/linux/mtd/mtd.h
··· 20 20 21 21 #define MTD_CHAR_MAJOR 90 22 22 #define MTD_BLOCK_MAJOR 31 23 - #define MAX_MTD_DEVICES 32 24 23 25 24 #define MTD_ERASE_PENDING 0x01 26 25 #define MTD_ERASING 0x02 ··· 60 61 * MTD_OOB_PLACE: oob data are placed at the given offset 61 62 * MTD_OOB_AUTO: oob data are automatically placed at the free areas 62 63 * which are defined by the ecclayout 63 - * MTD_OOB_RAW: mode to read raw data+oob in one chunk. The oob data 64 - * is inserted into the data. Thats a raw image of the 65 - * flash contents. 64 + * MTD_OOB_RAW: mode to read oob and data without doing ECC checking 66 65 */ 67 66 typedef enum { 68 67 MTD_OOB_PLACE, ··· 287 290 extern int del_mtd_device (struct mtd_info *mtd); 288 291 289 292 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); 293 + extern int __get_mtd_device(struct mtd_info *mtd); 294 + extern void __put_mtd_device(struct mtd_info *mtd); 290 295 extern struct mtd_info *get_mtd_device_nm(const char *name); 291 - 292 296 extern void put_mtd_device(struct mtd_info *mtd); 293 297 294 298
+1 -1
include/linux/mtd/mtdram.h
··· 3 3 4 4 #include <linux/mtd/mtd.h> 5 5 int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, 6 - unsigned long size, char *name); 6 + unsigned long size, char *name); 7 7 8 8 #endif /* __MTD_MTDRAM_H__ */
+23 -2
include/linux/mtd/nand.h
··· 25 25 #include <linux/mtd/bbm.h> 26 26 27 27 struct mtd_info; 28 + struct nand_flash_dev; 28 29 /* Scan and identify a NAND device */ 29 30 extern int nand_scan (struct mtd_info *mtd, int max_chips); 30 31 /* Separate phases of nand_scan(), allowing board driver to intervene 31 32 * and override command or ECC setup according to flash type */ 32 - extern int nand_scan_ident(struct mtd_info *mtd, int max_chips); 33 + extern int nand_scan_ident(struct mtd_info *mtd, int max_chips, 34 + struct nand_flash_dev *table); 33 35 extern int nand_scan_tail(struct mtd_info *mtd); 34 36 35 37 /* Free resources held by the NAND device */ ··· 40 38 /* Internal helper for board drivers which need to override command function */ 41 39 extern void nand_wait_ready(struct mtd_info *mtd); 42 40 41 + /* locks all blockes present in the device */ 42 + extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 43 + 44 + /* unlocks specified locked blockes */ 45 + extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 46 + 43 47 /* The maximum number of NAND chips in an array */ 44 48 #define NAND_MAX_CHIPS 8 45 49 ··· 53 45 * is supported now. If you add a chip with bigger oobsize/page 54 46 * adjust this accordingly. 55 47 */ 56 - #define NAND_MAX_OOBSIZE 128 48 + #define NAND_MAX_OOBSIZE 256 57 49 #define NAND_MAX_PAGESIZE 4096 58 50 59 51 /* ··· 89 81 #define NAND_CMD_READID 0x90 90 82 #define NAND_CMD_ERASE2 0xd0 91 83 #define NAND_CMD_RESET 0xff 84 + 85 + #define NAND_CMD_LOCK 0x2a 86 + #define NAND_CMD_UNLOCK1 0x23 87 + #define NAND_CMD_UNLOCK2 0x24 92 88 93 89 /* Extended commands for large page devices */ 94 90 #define NAND_CMD_READSTART 0x30 ··· 181 169 #define NAND_NO_READRDY 0x00000100 182 170 /* Chip does not allow subpage writes */ 183 171 #define NAND_NO_SUBPAGE_WRITE 0x00000200 172 + /* Chip stores bad block marker on the last page of the eraseblock */ 173 + #define NAND_BB_LAST_PAGE 0x00000400 174 + 175 + /* Device is one of 'new' xD cards that expose fake nand command set */ 176 + #define NAND_BROKEN_XD 0x00000400 177 + 178 + /* Device behaves just like nand, but is readonly */ 179 + #define NAND_ROM 0x00000800 184 180 185 181 /* Options valid for Samsung large page devices */ 186 182 #define NAND_SAMSUNG_LP_OPTIONS \ ··· 411 391 int subpagesize; 412 392 uint8_t cellinfo; 413 393 int badblockpos; 394 + int badblockbits; 414 395 415 396 flstate_t state; 416 397
+9
include/linux/mtd/onenand.h
··· 125 125 flstate_t state; 126 126 unsigned char *page_buf; 127 127 unsigned char *oob_buf; 128 + #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE 129 + unsigned char *verify_buf; 130 + #endif 128 131 129 132 int subpagesize; 130 133 struct nand_ecclayout *ecclayout; ··· 178 175 #define ONENAND_HAS_CONT_LOCK (0x0001) 179 176 #define ONENAND_HAS_UNLOCK_ALL (0x0002) 180 177 #define ONENAND_HAS_2PLANE (0x0004) 178 + #define ONENAND_HAS_4KB_PAGE (0x0008) 181 179 #define ONENAND_SKIP_UNLOCK_CHECK (0x0100) 182 180 #define ONENAND_PAGEBUF_ALLOC (0x1000) 183 181 #define ONENAND_OOBBUF_ALLOC (0x2000) 182 + 183 + #define ONENAND_IS_4KB_PAGE(this) \ 184 + (this->options & ONENAND_HAS_4KB_PAGE) 184 185 185 186 /* 186 187 * OneNAND Flash Manufacturer ID Codes ··· 212 205 213 206 struct onenand_platform_data { 214 207 void (*mmcontrol)(struct mtd_info *mtd, int sync_read); 208 + int (*read_bufferram)(struct mtd_info *mtd, int area, 209 + unsigned char *buffer, int offset, size_t count); 215 210 struct mtd_partition *parts; 216 211 unsigned int nr_parts; 217 212 };
+5 -2
include/linux/mtd/sh_flctl.h
··· 93 93 #define INIT_FL4ECCRESULT_VAL 0x03FF03FF 94 94 #define LOOP_TIMEOUT_MAX 0x00010000 95 95 96 - #define mtd_to_flctl(mtd) container_of(mtd, struct sh_flctl, mtd) 97 - 98 96 struct sh_flctl { 99 97 struct mtd_info mtd; 100 98 struct nand_chip chip; ··· 122 124 123 125 unsigned has_hwecc:1; 124 126 }; 127 + 128 + static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) 129 + { 130 + return container_of(mtdinfo, struct sh_flctl, mtd); 131 + } 125 132 126 133 #endif /* __SH_FLCTL_H__ */
+1 -1
lib/idr.c
··· 623 623 } 624 624 return NULL; 625 625 } 626 - 626 + EXPORT_SYMBOL(idr_get_next); 627 627 628 628 629 629 /**