Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (120 commits)
[MTD] Fix mtdoops.c compilation
[MTD] [NOR] fix startup lock when using multiple nor flash chips
[MTD] [DOC200x] eccbuf is statically defined and always evaluate to true
[MTD] Fix maps/physmap.c compilation with CONFIG_PM
[MTD] onenand: Add panic_write function to the onenand driver
[MTD] mtdoops: Use the panic_write function when present
[MTD] Add mtd panic_write function pointer
[MTD] [NAND] Freescale enhanced Local Bus Controller FCM NAND support.
[MTD] physmap.c: Add support for multiple resources
[MTD] [NAND] Fix misparenthesization introduced by commit 78b65179...
[MTD] [NAND] Fix Blackfin NFC ECC calculating bug with page size 512 bytes
[MTD] [NAND] Remove wrong operation in PM function of the BF54x NFC driver
[MTD] [NAND] Remove unused variable in plat_nand_remove
[MTD] Unlocking all Intel flash that is locked on power up.
[MTD] [NAND] at91_nand: Make mtdparts option can override board info
[MTD] mtdoops: Various minor cleanups
[MTD] mtdoops: Ensure sequential write to the buffer
[MTD] mtdoops: Perform write operations in a workqueue
[MTD] mtdoops: Add further error return code checking
[MTD] [NOR] Test devtype, not definition in flash_probe(), drivers/mtd/devices/lart.c
...

+4871 -2108
+11
drivers/mtd/Kconfig
··· 150 for your particular device. It won't happen automatically. The 151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 152 153 comment "User Modules And Translation Layers" 154 155 config MTD_CHAR ··· 293 This enables panic and oops messages to be logged to a circular 294 buffer in a flash partition where it can be read back at some 295 later point. 296 297 source "drivers/mtd/chips/Kconfig" 298
··· 150 for your particular device. It won't happen automatically. The 151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 152 153 + config MTD_OF_PARTS 154 + tristate "Flash partition map based on OF description" 155 + depends on PPC_OF && MTD_PARTITIONS 156 + help 157 + This provides a partition parsing function which derives 158 + the partition map from the children of the flash node, 159 + as described in Documentation/powerpc/booting-without-of.txt. 160 + 161 comment "User Modules And Translation Layers" 162 163 config MTD_CHAR ··· 285 This enables panic and oops messages to be logged to a circular 286 buffer in a flash partition where it can be read back at some 287 later point. 288 + 289 + To use, add console=ttyMTDx to the kernel command line, 290 + where x is the MTD device number to use. 291 292 source "drivers/mtd/chips/Kconfig" 293
+1
drivers/mtd/Makefile
··· 11 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 12 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 13 obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 14 15 # 'Users' - code which presents functionality to userspace. 16 obj-$(CONFIG_MTD_CHAR) += mtdchar.o
··· 11 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 12 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 13 obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 14 + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o 15 16 # 'Users' - code which presents functionality to userspace. 17 obj-$(CONFIG_MTD_CHAR) += mtdchar.o
+71 -7
drivers/mtd/chips/cfi_cmdset_0001.c
··· 50 #define I82802AC 0x00ac 51 #define MANUFACTURER_ST 0x0020 52 #define M50LPW080 0x002F 53 54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ··· 158 } 159 #endif 160 161 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 162 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 163 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) ··· 269 /* 270 * Some chips power-up with all sectors locked by default. 271 */ 272 - static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) 273 { 274 - printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 275 - mtd->flags |= MTD_STUPID_LOCK; 276 } 277 278 static struct cfi_fixup cfi_fixup_table[] = { 279 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 280 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 281 #endif ··· 294 #endif 295 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 296 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 297 - { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, 298 { 0, 0, NULL, NULL } 299 }; 300 ··· 326 return NULL; 327 328 if (extp->MajorVersion != '1' || 329 - (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 330 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 331 "version %c.%c.\n", extp->MajorVersion, 332 extp->MinorVersion); ··· 801 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 802 { 803 int ret; 804 805 retry: 806 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING ··· 856 } 857 spin_lock(&shared->lock); 858 spin_unlock(contender->mutex); 859 } 860 861 /* We now own it */ ··· 2358 struct flchip *chip; 2359 int ret = 0; 2360 2361 - if ((mtd->flags & MTD_STUPID_LOCK) 2362 && extp && (extp->FeatureSupport & (1 << 5))) 2363 cfi_intelext_save_locks(mtd); 2364 ··· 2469 spin_unlock(chip->mutex); 2470 } 2471 2472 - if ((mtd->flags & MTD_STUPID_LOCK) 2473 && extp && (extp->FeatureSupport & (1 << 5))) 2474 cfi_intelext_restore_locks(mtd); 2475 }
··· 50 #define I82802AC 0x00ac 51 #define MANUFACTURER_ST 0x0020 52 #define M50LPW080 0x002F 53 + #define AT49BV640D 0x02de 54 55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ··· 157 } 158 #endif 159 160 + /* Atmel chips don't use the same PRI format as Intel chips */ 161 + static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 162 + { 163 + struct map_info *map = mtd->priv; 164 + struct cfi_private *cfi = map->fldrv_priv; 165 + struct cfi_pri_intelext *extp = cfi->cmdset_priv; 166 + struct cfi_pri_atmel atmel_pri; 167 + uint32_t features = 0; 168 + 169 + /* Reverse byteswapping */ 170 + extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport); 171 + extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask); 172 + extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr); 173 + 174 + memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 175 + memset((char *)extp + 5, 0, sizeof(*extp) - 5); 176 + 177 + printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features); 178 + 179 + if (atmel_pri.Features & 0x01) /* chip erase supported */ 180 + features |= (1<<0); 181 + if (atmel_pri.Features & 0x02) /* erase suspend supported */ 182 + features |= (1<<1); 183 + if (atmel_pri.Features & 0x04) /* program suspend supported */ 184 + features |= (1<<2); 185 + if (atmel_pri.Features & 0x08) /* simultaneous operations supported */ 186 + features |= (1<<9); 187 + if (atmel_pri.Features & 0x20) /* page mode read supported */ 188 + features |= (1<<7); 189 + if (atmel_pri.Features & 0x40) /* queued erase supported */ 190 + features |= (1<<4); 191 + if (atmel_pri.Features & 0x80) /* Protection bits supported */ 192 + features |= (1<<6); 193 + 194 + extp->FeatureSupport = features; 195 + 196 + /* burst write mode not supported */ 197 + cfi->cfiq->BufWriteTimeoutTyp = 0; 198 + cfi->cfiq->BufWriteTimeoutMax = 0; 199 + } 200 + 201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) ··· 227 /* 228 * Some chips power-up with all sectors locked by default. 229 */ 230 + static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) 231 { 232 + struct map_info *map = mtd->priv; 233 + struct cfi_private *cfi = map->fldrv_priv; 234 + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 235 + 236 + if (cfip->FeatureSupport&32) { 237 + printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 238 + mtd->flags |= MTD_POWERUP_LOCK; 239 + } 240 } 241 242 static struct cfi_fixup cfi_fixup_table[] = { 243 + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 244 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 245 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 246 #endif ··· 245 #endif 246 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 247 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 248 + { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 249 { 0, 0, NULL, NULL } 250 }; 251 ··· 277 return NULL; 278 279 if (extp->MajorVersion != '1' || 280 + (extp->MinorVersion < '0' || extp->MinorVersion > '5')) { 281 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 282 "version %c.%c.\n", extp->MajorVersion, 283 extp->MinorVersion); ··· 752 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 753 { 754 int ret; 755 + DECLARE_WAITQUEUE(wait, current); 756 757 retry: 758 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING ··· 806 } 807 spin_lock(&shared->lock); 808 spin_unlock(contender->mutex); 809 + } 810 + 811 + /* Check if we already have suspended erase 812 + * on this chip. Sleep. */ 813 + if (mode == FL_ERASING && shared->erasing 814 + && shared->erasing->oldstate == FL_ERASING) { 815 + spin_unlock(&shared->lock); 816 + set_current_state(TASK_UNINTERRUPTIBLE); 817 + add_wait_queue(&chip->wq, &wait); 818 + spin_unlock(chip->mutex); 819 + schedule(); 820 + remove_wait_queue(&chip->wq, &wait); 821 + spin_lock(chip->mutex); 822 + goto retry; 823 } 824 825 /* We now own it */ ··· 2294 struct flchip *chip; 2295 int ret = 0; 2296 2297 + if ((mtd->flags & MTD_POWERUP_LOCK) 2298 && extp && (extp->FeatureSupport & (1 << 5))) 2299 cfi_intelext_save_locks(mtd); 2300 ··· 2405 spin_unlock(chip->mutex); 2406 } 2407 2408 + if ((mtd->flags & MTD_POWERUP_LOCK) 2409 && extp && (extp->FeatureSupport & (1 << 5))) 2410 cfi_intelext_restore_locks(mtd); 2411 }
+10 -4
drivers/mtd/chips/cfi_cmdset_0002.c
··· 185 extp->TopBottom = 2; 186 else 187 extp->TopBottom = 3; 188 } 189 190 static void fixup_use_secsi(struct mtd_info *mtd, void *param) ··· 217 { 218 mtd->lock = cfi_atmel_lock; 219 mtd->unlock = cfi_atmel_unlock; 220 - mtd->flags |= MTD_STUPID_LOCK; 221 } 222 223 static struct cfi_fixup cfi_fixup_table[] = { 224 #ifdef AMD_BOOTLOC_BUG 225 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 226 #endif ··· 234 #if !FORCE_WORD_WRITE 235 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 236 #endif 237 - { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 238 { 0, 0, NULL, NULL } 239 }; 240 static struct cfi_fixup jedec_fixup_table[] = { ··· 342 /* Modify the unlock address if we are in compatibility mode */ 343 if ( /* x16 in x8 mode */ 344 ((cfi->device_type == CFI_DEVICETYPE_X8) && 345 - (cfi->cfiq->InterfaceDesc == 2)) || 346 /* x32 in x16 mode */ 347 ((cfi->device_type == CFI_DEVICETYPE_X16) && 348 - (cfi->cfiq->InterfaceDesc == 4))) 349 { 350 cfi->addr_unlock1 = 0xaaa; 351 cfi->addr_unlock2 = 0x555;
··· 185 extp->TopBottom = 2; 186 else 187 extp->TopBottom = 3; 188 + 189 + /* burst write mode not supported */ 190 + cfi->cfiq->BufWriteTimeoutTyp = 0; 191 + cfi->cfiq->BufWriteTimeoutMax = 0; 192 } 193 194 static void fixup_use_secsi(struct mtd_info *mtd, void *param) ··· 213 { 214 mtd->lock = cfi_atmel_lock; 215 mtd->unlock = cfi_atmel_unlock; 216 + mtd->flags |= MTD_POWERUP_LOCK; 217 } 218 219 static struct cfi_fixup cfi_fixup_table[] = { 220 + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 221 #ifdef AMD_BOOTLOC_BUG 222 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 223 #endif ··· 229 #if !FORCE_WORD_WRITE 230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 231 #endif 232 { 0, 0, NULL, NULL } 233 }; 234 static struct cfi_fixup jedec_fixup_table[] = { ··· 338 /* Modify the unlock address if we are in compatibility mode */ 339 if ( /* x16 in x8 mode */ 340 ((cfi->device_type == CFI_DEVICETYPE_X8) && 341 + (cfi->cfiq->InterfaceDesc == 342 + CFI_INTERFACE_X8_BY_X16_ASYNC)) || 343 /* x32 in x16 mode */ 344 ((cfi->device_type == CFI_DEVICETYPE_X16) && 345 + (cfi->cfiq->InterfaceDesc == 346 + CFI_INTERFACE_X16_BY_X32_ASYNC))) 347 { 348 cfi->addr_unlock1 = 0xaaa; 349 cfi->addr_unlock2 = 0x555;
+6 -6
drivers/mtd/chips/cfi_probe.c
··· 370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 372 switch(cfip->InterfaceDesc) { 373 - case 0: 374 printk(" - x8-only asynchronous interface\n"); 375 break; 376 377 - case 1: 378 printk(" - x16-only asynchronous interface\n"); 379 break; 380 381 - case 2: 382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 383 break; 384 385 - case 3: 386 printk(" - x32-only asynchronous interface\n"); 387 break; 388 389 - case 4: 390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 391 break; 392 393 - case 65535: 394 printk(" - Not Allowed / Reserved\n"); 395 break; 396
··· 370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 372 switch(cfip->InterfaceDesc) { 373 + case CFI_INTERFACE_X8_ASYNC: 374 printk(" - x8-only asynchronous interface\n"); 375 break; 376 377 + case CFI_INTERFACE_X16_ASYNC: 378 printk(" - x16-only asynchronous interface\n"); 379 break; 380 381 + case CFI_INTERFACE_X8_BY_X16_ASYNC: 382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 383 break; 384 385 + case CFI_INTERFACE_X32_ASYNC: 386 printk(" - x32-only asynchronous interface\n"); 387 break; 388 389 + case CFI_INTERFACE_X16_BY_X32_ASYNC: 390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 391 break; 392 393 + case CFI_INTERFACE_NOT_ALLOWED: 394 printk(" - Not Allowed / Reserved\n"); 395 break; 396
+1 -1
drivers/mtd/chips/gen_probe.c
··· 112 max_chips = 1; 113 } 114 115 - mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG; 116 chip_map = kzalloc(mapsize, GFP_KERNEL); 117 if (!chip_map) { 118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
··· 112 max_chips = 1; 113 } 114 115 + mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG ); 116 chip_map = kzalloc(mapsize, GFP_KERNEL); 117 if (!chip_map) { 118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
+595 -779
drivers/mtd/chips/jedec_probe.c
··· 194 195 196 struct unlock_addr { 197 - u32 addr1; 198 - u32 addr2; 199 }; 200 201 ··· 246 } 247 }; 248 249 - 250 struct amd_flash_info { 251 - const __u16 mfr_id; 252 - const __u16 dev_id; 253 const char *name; 254 - const int DevSize; 255 - const int NumEraseRegions; 256 - const int CmdSet; 257 - const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */ 258 - const ulong regions[6]; 259 }; 260 261 #define ERASEINFO(size,blocks) (size<<8)|(blocks-1) ··· 280 .mfr_id = MANUFACTURER_AMD, 281 .dev_id = AM29F032B, 282 .name = "AMD AM29F032B", 283 - .uaddr = { 284 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 285 - }, 286 - .DevSize = SIZE_4MiB, 287 - .CmdSet = P_ID_AMD_STD, 288 - .NumEraseRegions= 1, 289 .regions = { 290 ERASEINFO(0x10000,64) 291 } ··· 292 .mfr_id = MANUFACTURER_AMD, 293 .dev_id = AM29LV160DT, 294 .name = "AMD AM29LV160DT", 295 - .uaddr = { 296 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 297 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 298 - }, 299 - .DevSize = SIZE_2MiB, 300 - .CmdSet = P_ID_AMD_STD, 301 - .NumEraseRegions= 4, 302 .regions = { 303 ERASEINFO(0x10000,31), 304 ERASEINFO(0x08000,1), ··· 307 .mfr_id = MANUFACTURER_AMD, 308 .dev_id = AM29LV160DB, 309 .name = "AMD AM29LV160DB", 310 - .uaddr = { 311 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 312 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 313 - }, 314 - .DevSize = SIZE_2MiB, 315 - .CmdSet = P_ID_AMD_STD, 316 - .NumEraseRegions= 4, 317 .regions = { 318 ERASEINFO(0x04000,1), 319 ERASEINFO(0x02000,2), ··· 322 .mfr_id = MANUFACTURER_AMD, 323 .dev_id = AM29LV400BB, 324 .name = "AMD AM29LV400BB", 325 - .uaddr = { 326 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 327 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 328 - }, 329 - .DevSize = SIZE_512KiB, 330 - .CmdSet = P_ID_AMD_STD, 331 - .NumEraseRegions= 4, 332 .regions = { 333 ERASEINFO(0x04000,1), 334 ERASEINFO(0x02000,2), ··· 337 .mfr_id = MANUFACTURER_AMD, 338 .dev_id = AM29LV400BT, 339 .name = "AMD AM29LV400BT", 340 - .uaddr = { 341 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 342 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 343 - }, 344 - .DevSize = SIZE_512KiB, 345 - .CmdSet = P_ID_AMD_STD, 346 - .NumEraseRegions= 4, 347 .regions = { 348 ERASEINFO(0x10000,7), 349 ERASEINFO(0x08000,1), ··· 352 .mfr_id = MANUFACTURER_AMD, 353 .dev_id = AM29LV800BB, 354 .name = "AMD AM29LV800BB", 355 - .uaddr = { 356 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 357 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 358 - }, 359 - .DevSize = SIZE_1MiB, 360 - .CmdSet = P_ID_AMD_STD, 361 - .NumEraseRegions= 4, 362 .regions = { 363 ERASEINFO(0x04000,1), 364 ERASEINFO(0x02000,2), ··· 368 .mfr_id = MANUFACTURER_AMD, 369 .dev_id = AM29DL800BB, 370 .name = "AMD AM29DL800BB", 371 - .uaddr = { 372 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 373 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 374 - }, 375 - .DevSize = SIZE_1MiB, 376 - .CmdSet = P_ID_AMD_STD, 377 - .NumEraseRegions= 6, 378 .regions = { 379 ERASEINFO(0x04000,1), 380 ERASEINFO(0x08000,1), ··· 385 .mfr_id = MANUFACTURER_AMD, 386 .dev_id = AM29DL800BT, 387 .name = "AMD AM29DL800BT", 388 - .uaddr = { 389 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 390 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 391 - }, 392 - .DevSize = SIZE_1MiB, 393 - .CmdSet = P_ID_AMD_STD, 394 - .NumEraseRegions= 6, 395 .regions = { 396 ERASEINFO(0x10000,14), 397 ERASEINFO(0x04000,1), ··· 402 .mfr_id = MANUFACTURER_AMD, 403 .dev_id = AM29F800BB, 404 .name = "AMD AM29F800BB", 405 - .uaddr = { 406 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 407 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 408 - }, 409 - .DevSize = SIZE_1MiB, 410 - .CmdSet = P_ID_AMD_STD, 411 - .NumEraseRegions= 4, 412 .regions = { 413 ERASEINFO(0x04000,1), 414 ERASEINFO(0x02000,2), ··· 417 .mfr_id = MANUFACTURER_AMD, 418 .dev_id = AM29LV800BT, 419 .name = "AMD AM29LV800BT", 420 - .uaddr = { 421 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 422 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 423 - }, 424 - .DevSize = SIZE_1MiB, 425 - .CmdSet = P_ID_AMD_STD, 426 - .NumEraseRegions= 4, 427 .regions = { 428 ERASEINFO(0x10000,15), 429 ERASEINFO(0x08000,1), ··· 432 .mfr_id = MANUFACTURER_AMD, 433 .dev_id = AM29F800BT, 434 .name = "AMD AM29F800BT", 435 - .uaddr = { 436 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 437 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 438 - }, 439 - .DevSize = SIZE_1MiB, 440 - .CmdSet = P_ID_AMD_STD, 441 - .NumEraseRegions= 4, 442 .regions = { 443 ERASEINFO(0x10000,15), 444 ERASEINFO(0x08000,1), ··· 447 .mfr_id = MANUFACTURER_AMD, 448 .dev_id = AM29F017D, 449 .name = "AMD AM29F017D", 450 - .uaddr = { 451 - [0] = MTD_UADDR_DONT_CARE /* x8 */ 452 - }, 453 - .DevSize = SIZE_2MiB, 454 - .CmdSet = P_ID_AMD_STD, 455 - .NumEraseRegions= 1, 456 .regions = { 457 ERASEINFO(0x10000,32), 458 } ··· 459 .mfr_id = MANUFACTURER_AMD, 460 .dev_id = AM29F016D, 461 .name = "AMD AM29F016D", 462 - .uaddr = { 463 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 464 - }, 465 - .DevSize = SIZE_2MiB, 466 - .CmdSet = P_ID_AMD_STD, 467 - .NumEraseRegions= 1, 468 .regions = { 469 ERASEINFO(0x10000,32), 470 } ··· 471 .mfr_id = MANUFACTURER_AMD, 472 .dev_id = AM29F080, 473 .name = "AMD AM29F080", 474 - .uaddr = { 475 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 476 - }, 477 - .DevSize = SIZE_1MiB, 478 - .CmdSet = P_ID_AMD_STD, 479 - .NumEraseRegions= 1, 480 .regions = { 481 ERASEINFO(0x10000,16), 482 } ··· 483 .mfr_id = MANUFACTURER_AMD, 484 .dev_id = AM29F040, 485 .name = "AMD AM29F040", 486 - .uaddr = { 487 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 488 - }, 489 - .DevSize = SIZE_512KiB, 490 - .CmdSet = P_ID_AMD_STD, 491 - .NumEraseRegions= 1, 492 .regions = { 493 ERASEINFO(0x10000,8), 494 } ··· 495 .mfr_id = MANUFACTURER_AMD, 496 .dev_id = AM29LV040B, 497 .name = "AMD AM29LV040B", 498 - .uaddr = { 499 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 500 - }, 501 - .DevSize = SIZE_512KiB, 502 - .CmdSet = P_ID_AMD_STD, 503 - .NumEraseRegions= 1, 504 .regions = { 505 ERASEINFO(0x10000,8), 506 } ··· 507 .mfr_id = MANUFACTURER_AMD, 508 .dev_id = AM29F002T, 509 .name = "AMD AM29F002T", 510 - .uaddr = { 511 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 512 - }, 513 - .DevSize = SIZE_256KiB, 514 - .CmdSet = P_ID_AMD_STD, 515 - .NumEraseRegions= 4, 516 .regions = { 517 ERASEINFO(0x10000,3), 518 ERASEINFO(0x08000,1), ··· 522 .mfr_id = MANUFACTURER_ATMEL, 523 .dev_id = AT49BV512, 524 .name = "Atmel AT49BV512", 525 - .uaddr = { 526 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 527 - }, 528 - .DevSize = SIZE_64KiB, 529 - .CmdSet = P_ID_AMD_STD, 530 - .NumEraseRegions= 1, 531 .regions = { 532 ERASEINFO(0x10000,1) 533 } ··· 534 .mfr_id = MANUFACTURER_ATMEL, 535 .dev_id = AT29LV512, 536 .name = "Atmel AT29LV512", 537 - .uaddr = { 538 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 539 - }, 540 - .DevSize = SIZE_64KiB, 541 - .CmdSet = P_ID_AMD_STD, 542 - .NumEraseRegions= 1, 543 .regions = { 544 ERASEINFO(0x80,256), 545 ERASEINFO(0x80,256) ··· 547 .mfr_id = MANUFACTURER_ATMEL, 548 .dev_id = AT49BV16X, 549 .name = "Atmel AT49BV16X", 550 - .uaddr = { 551 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 552 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 553 - }, 554 - .DevSize = SIZE_2MiB, 555 - .CmdSet = P_ID_AMD_STD, 556 - .NumEraseRegions= 2, 557 .regions = { 558 ERASEINFO(0x02000,8), 559 ERASEINFO(0x10000,31) ··· 560 .mfr_id = MANUFACTURER_ATMEL, 561 .dev_id = AT49BV16XT, 562 .name = "Atmel AT49BV16XT", 563 - .uaddr = { 564 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 565 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 566 - }, 567 - .DevSize = SIZE_2MiB, 568 - .CmdSet = P_ID_AMD_STD, 569 - .NumEraseRegions= 2, 570 .regions = { 571 ERASEINFO(0x10000,31), 572 ERASEINFO(0x02000,8) ··· 573 .mfr_id = MANUFACTURER_ATMEL, 574 .dev_id = AT49BV32X, 575 .name = "Atmel AT49BV32X", 576 - .uaddr = { 577 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 578 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 579 - }, 580 - .DevSize = SIZE_4MiB, 581 - .CmdSet = P_ID_AMD_STD, 582 - .NumEraseRegions= 2, 583 .regions = { 584 ERASEINFO(0x02000,8), 585 ERASEINFO(0x10000,63) ··· 586 .mfr_id = MANUFACTURER_ATMEL, 587 .dev_id = AT49BV32XT, 588 .name = "Atmel AT49BV32XT", 589 - .uaddr = { 590 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 591 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 592 - }, 593 - .DevSize = SIZE_4MiB, 594 - .CmdSet = P_ID_AMD_STD, 595 - .NumEraseRegions= 2, 596 .regions = { 597 ERASEINFO(0x10000,63), 598 ERASEINFO(0x02000,8) ··· 599 .mfr_id = MANUFACTURER_FUJITSU, 600 .dev_id = MBM29F040C, 601 .name = "Fujitsu MBM29F040C", 602 - .uaddr = { 603 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 604 - }, 605 - .DevSize = SIZE_512KiB, 606 - .CmdSet = P_ID_AMD_STD, 607 - .NumEraseRegions= 1, 608 .regions = { 609 ERASEINFO(0x10000,8) 610 } ··· 611 .mfr_id = MANUFACTURER_FUJITSU, 612 .dev_id = MBM29F800BA, 613 .name = "Fujitsu MBM29F800BA", 614 - .uaddr = { 615 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 616 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 617 - }, 618 - .DevSize = SIZE_1MiB, 619 - .CmdSet = P_ID_AMD_STD, 620 - .NumEraseRegions= 4, 621 .regions = { 622 ERASEINFO(0x04000,1), 623 ERASEINFO(0x02000,2), ··· 626 .mfr_id = MANUFACTURER_FUJITSU, 627 .dev_id = MBM29LV650UE, 628 .name = "Fujitsu MBM29LV650UE", 629 - .uaddr = { 630 - [0] = MTD_UADDR_DONT_CARE /* x16 */ 631 - }, 632 - .DevSize = SIZE_8MiB, 633 - .CmdSet = P_ID_AMD_STD, 634 - .NumEraseRegions= 1, 635 .regions = { 636 ERASEINFO(0x10000,128) 637 } ··· 638 .mfr_id = MANUFACTURER_FUJITSU, 639 .dev_id = MBM29LV320TE, 640 .name = "Fujitsu MBM29LV320TE", 641 - .uaddr = { 642 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 643 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 644 - }, 645 - .DevSize = SIZE_4MiB, 646 - .CmdSet = P_ID_AMD_STD, 647 - .NumEraseRegions= 2, 648 .regions = { 649 ERASEINFO(0x10000,63), 650 ERASEINFO(0x02000,8) ··· 651 .mfr_id = MANUFACTURER_FUJITSU, 652 .dev_id = MBM29LV320BE, 653 .name = "Fujitsu MBM29LV320BE", 654 - .uaddr = { 655 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 656 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 657 - }, 658 - .DevSize = SIZE_4MiB, 659 - .CmdSet = P_ID_AMD_STD, 660 - .NumEraseRegions= 2, 661 .regions = { 662 ERASEINFO(0x02000,8), 663 ERASEINFO(0x10000,63) ··· 664 .mfr_id = MANUFACTURER_FUJITSU, 665 .dev_id = MBM29LV160TE, 666 .name = "Fujitsu MBM29LV160TE", 667 - .uaddr = { 668 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 669 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 670 - }, 671 - .DevSize = SIZE_2MiB, 672 - .CmdSet = P_ID_AMD_STD, 673 - .NumEraseRegions= 4, 674 .regions = { 675 ERASEINFO(0x10000,31), 676 ERASEINFO(0x08000,1), ··· 679 .mfr_id = MANUFACTURER_FUJITSU, 680 .dev_id = MBM29LV160BE, 681 .name = "Fujitsu MBM29LV160BE", 682 - .uaddr = { 683 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 684 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 685 - }, 686 - .DevSize = SIZE_2MiB, 687 - .CmdSet = P_ID_AMD_STD, 688 - .NumEraseRegions= 4, 689 .regions = { 690 ERASEINFO(0x04000,1), 691 ERASEINFO(0x02000,2), ··· 694 .mfr_id = MANUFACTURER_FUJITSU, 695 .dev_id = MBM29LV800BA, 696 .name = "Fujitsu MBM29LV800BA", 697 - .uaddr = { 698 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 699 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 700 - }, 701 - .DevSize = SIZE_1MiB, 702 - .CmdSet = P_ID_AMD_STD, 703 - .NumEraseRegions= 4, 704 .regions = { 705 ERASEINFO(0x04000,1), 706 ERASEINFO(0x02000,2), ··· 709 .mfr_id = MANUFACTURER_FUJITSU, 710 .dev_id = MBM29LV800TA, 711 .name = "Fujitsu MBM29LV800TA", 712 - .uaddr = { 713 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 714 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 715 - }, 716 - .DevSize = SIZE_1MiB, 717 - .CmdSet = P_ID_AMD_STD, 718 - .NumEraseRegions= 4, 719 .regions = { 720 ERASEINFO(0x10000,15), 721 ERASEINFO(0x08000,1), ··· 724 .mfr_id = MANUFACTURER_FUJITSU, 725 .dev_id = MBM29LV400BC, 726 .name = "Fujitsu MBM29LV400BC", 727 - .uaddr = { 728 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 729 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 730 - }, 731 - .DevSize = SIZE_512KiB, 732 - .CmdSet = P_ID_AMD_STD, 733 - .NumEraseRegions= 4, 734 .regions = { 735 ERASEINFO(0x04000,1), 736 ERASEINFO(0x02000,2), ··· 739 .mfr_id = MANUFACTURER_FUJITSU, 740 .dev_id = MBM29LV400TC, 741 .name = "Fujitsu MBM29LV400TC", 742 - .uaddr = { 743 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 744 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 745 - }, 746 - .DevSize = SIZE_512KiB, 747 - .CmdSet = P_ID_AMD_STD, 748 - .NumEraseRegions= 4, 749 .regions = { 750 ERASEINFO(0x10000,7), 751 ERASEINFO(0x08000,1), ··· 754 .mfr_id = MANUFACTURER_HYUNDAI, 755 .dev_id = HY29F002T, 756 .name = "Hyundai HY29F002T", 757 - .uaddr = { 758 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 759 - }, 760 - .DevSize = SIZE_256KiB, 761 - .CmdSet = P_ID_AMD_STD, 762 - .NumEraseRegions= 4, 763 .regions = { 764 ERASEINFO(0x10000,3), 765 ERASEINFO(0x08000,1), ··· 769 .mfr_id = MANUFACTURER_INTEL, 770 .dev_id = I28F004B3B, 771 .name = "Intel 28F004B3B", 772 - .uaddr = { 773 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 774 - }, 775 - .DevSize = SIZE_512KiB, 776 - .CmdSet = P_ID_INTEL_STD, 777 - .NumEraseRegions= 2, 778 .regions = { 779 ERASEINFO(0x02000, 8), 780 ERASEINFO(0x10000, 7), ··· 782 .mfr_id = MANUFACTURER_INTEL, 783 .dev_id = I28F004B3T, 784 .name = "Intel 28F004B3T", 785 - .uaddr = { 786 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 787 - }, 788 - .DevSize = SIZE_512KiB, 789 - .CmdSet = P_ID_INTEL_STD, 790 - .NumEraseRegions= 2, 791 .regions = { 792 ERASEINFO(0x10000, 7), 793 ERASEINFO(0x02000, 8), ··· 795 .mfr_id = MANUFACTURER_INTEL, 796 .dev_id = I28F400B3B, 797 .name = "Intel 28F400B3B", 798 - .uaddr = { 799 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 800 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 801 - }, 802 - .DevSize = SIZE_512KiB, 803 - .CmdSet = P_ID_INTEL_STD, 804 - .NumEraseRegions= 2, 805 .regions = { 806 ERASEINFO(0x02000, 8), 807 ERASEINFO(0x10000, 7), ··· 808 .mfr_id = MANUFACTURER_INTEL, 809 .dev_id = I28F400B3T, 810 .name = "Intel 28F400B3T", 811 - .uaddr = { 812 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 813 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 814 - }, 815 - .DevSize = SIZE_512KiB, 816 - .CmdSet = P_ID_INTEL_STD, 817 - .NumEraseRegions= 2, 818 .regions = { 819 ERASEINFO(0x10000, 7), 820 ERASEINFO(0x02000, 8), ··· 821 .mfr_id = MANUFACTURER_INTEL, 822 .dev_id = I28F008B3B, 823 .name = "Intel 28F008B3B", 824 - .uaddr = { 825 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 826 - }, 827 - .DevSize = SIZE_1MiB, 828 - .CmdSet = P_ID_INTEL_STD, 829 - .NumEraseRegions= 2, 830 .regions = { 831 ERASEINFO(0x02000, 8), 832 ERASEINFO(0x10000, 15), ··· 834 .mfr_id = MANUFACTURER_INTEL, 835 .dev_id = I28F008B3T, 836 .name = "Intel 28F008B3T", 837 - .uaddr = { 838 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 839 - }, 840 - .DevSize = SIZE_1MiB, 841 - .CmdSet = P_ID_INTEL_STD, 842 - .NumEraseRegions= 2, 843 .regions = { 844 ERASEINFO(0x10000, 15), 845 ERASEINFO(0x02000, 8), ··· 847 .mfr_id = MANUFACTURER_INTEL, 848 .dev_id = I28F008S5, 849 .name = "Intel 28F008S5", 850 - .uaddr = { 851 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 852 - }, 853 - .DevSize = SIZE_1MiB, 854 - .CmdSet = P_ID_INTEL_EXT, 855 - .NumEraseRegions= 1, 856 .regions = { 857 ERASEINFO(0x10000,16), 858 } ··· 859 .mfr_id = MANUFACTURER_INTEL, 860 .dev_id = I28F016S5, 861 .name = "Intel 28F016S5", 862 - .uaddr = { 863 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 864 - }, 865 - .DevSize = SIZE_2MiB, 866 - .CmdSet = P_ID_INTEL_EXT, 867 - .NumEraseRegions= 1, 868 .regions = { 869 ERASEINFO(0x10000,32), 870 } ··· 871 .mfr_id = MANUFACTURER_INTEL, 872 .dev_id = I28F008SA, 873 .name = "Intel 28F008SA", 874 - .uaddr = { 875 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 876 - }, 877 - .DevSize = SIZE_1MiB, 878 - .CmdSet = P_ID_INTEL_STD, 879 - .NumEraseRegions= 1, 880 .regions = { 881 ERASEINFO(0x10000, 16), 882 } ··· 883 .mfr_id = MANUFACTURER_INTEL, 884 .dev_id = I28F800B3B, 885 .name = "Intel 28F800B3B", 886 - .uaddr = { 887 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 888 - }, 889 - .DevSize = SIZE_1MiB, 890 - .CmdSet = P_ID_INTEL_STD, 891 - .NumEraseRegions= 2, 892 .regions = { 893 ERASEINFO(0x02000, 8), 894 ERASEINFO(0x10000, 15), ··· 896 .mfr_id = MANUFACTURER_INTEL, 897 .dev_id = I28F800B3T, 898 .name = "Intel 28F800B3T", 899 - .uaddr = { 900 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 901 - }, 902 - .DevSize = SIZE_1MiB, 903 - .CmdSet = P_ID_INTEL_STD, 904 - .NumEraseRegions= 2, 905 .regions = { 906 ERASEINFO(0x10000, 15), 907 ERASEINFO(0x02000, 8), ··· 909 .mfr_id = MANUFACTURER_INTEL, 910 .dev_id = I28F016B3B, 911 .name = "Intel 28F016B3B", 912 - .uaddr = { 913 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 914 - }, 915 - .DevSize = SIZE_2MiB, 916 - .CmdSet = P_ID_INTEL_STD, 917 - .NumEraseRegions= 2, 918 .regions = { 919 ERASEINFO(0x02000, 8), 920 ERASEINFO(0x10000, 31), ··· 922 .mfr_id = MANUFACTURER_INTEL, 923 .dev_id = I28F016S3, 924 .name = "Intel I28F016S3", 925 - .uaddr = { 926 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 927 - }, 928 - .DevSize = SIZE_2MiB, 929 - .CmdSet = P_ID_INTEL_STD, 930 - .NumEraseRegions= 1, 931 .regions = { 932 ERASEINFO(0x10000, 32), 933 } ··· 934 .mfr_id = MANUFACTURER_INTEL, 935 .dev_id = I28F016B3T, 936 .name = "Intel 28F016B3T", 937 - .uaddr = { 938 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 939 - }, 940 - .DevSize = SIZE_2MiB, 941 - .CmdSet = P_ID_INTEL_STD, 942 - .NumEraseRegions= 2, 943 .regions = { 944 ERASEINFO(0x10000, 31), 945 ERASEINFO(0x02000, 8), ··· 947 .mfr_id = MANUFACTURER_INTEL, 948 .dev_id = I28F160B3B, 949 .name = "Intel 28F160B3B", 950 - .uaddr = { 951 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 952 - }, 953 - .DevSize = SIZE_2MiB, 954 - .CmdSet = P_ID_INTEL_STD, 955 - .NumEraseRegions= 2, 956 .regions = { 957 ERASEINFO(0x02000, 8), 958 ERASEINFO(0x10000, 31), ··· 960 .mfr_id = MANUFACTURER_INTEL, 961 .dev_id = I28F160B3T, 962 .name = "Intel 28F160B3T", 963 - .uaddr = { 964 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 965 - }, 966 - .DevSize = SIZE_2MiB, 967 - .CmdSet = P_ID_INTEL_STD, 968 - .NumEraseRegions= 2, 969 .regions = { 970 ERASEINFO(0x10000, 31), 971 ERASEINFO(0x02000, 8), ··· 973 .mfr_id = MANUFACTURER_INTEL, 974 .dev_id = I28F320B3B, 975 .name = "Intel 28F320B3B", 976 - .uaddr = { 977 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 978 - }, 979 - .DevSize = SIZE_4MiB, 980 - .CmdSet = P_ID_INTEL_STD, 981 - .NumEraseRegions= 2, 982 .regions = { 983 ERASEINFO(0x02000, 8), 984 ERASEINFO(0x10000, 63), ··· 986 .mfr_id = MANUFACTURER_INTEL, 987 .dev_id = I28F320B3T, 988 .name = "Intel 28F320B3T", 989 - .uaddr = { 990 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 991 - }, 992 - .DevSize = SIZE_4MiB, 993 - .CmdSet = P_ID_INTEL_STD, 994 - .NumEraseRegions= 2, 995 .regions = { 996 ERASEINFO(0x10000, 63), 997 ERASEINFO(0x02000, 8), ··· 999 .mfr_id = MANUFACTURER_INTEL, 1000 .dev_id = I28F640B3B, 1001 .name = "Intel 28F640B3B", 1002 - .uaddr = { 1003 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1004 - }, 1005 - .DevSize = SIZE_8MiB, 1006 - .CmdSet = P_ID_INTEL_STD, 1007 - .NumEraseRegions= 2, 1008 .regions = { 1009 ERASEINFO(0x02000, 8), 1010 ERASEINFO(0x10000, 127), ··· 1012 .mfr_id = MANUFACTURER_INTEL, 1013 .dev_id = I28F640B3T, 1014 .name = "Intel 28F640B3T", 1015 - .uaddr = { 1016 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1017 - }, 1018 - .DevSize = SIZE_8MiB, 1019 - .CmdSet = P_ID_INTEL_STD, 1020 - .NumEraseRegions= 2, 1021 .regions = { 1022 ERASEINFO(0x10000, 127), 1023 ERASEINFO(0x02000, 8), ··· 1025 .mfr_id = MANUFACTURER_INTEL, 1026 .dev_id = I82802AB, 1027 .name = "Intel 82802AB", 1028 - .uaddr = { 1029 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1030 - }, 1031 - .DevSize = SIZE_512KiB, 1032 - .CmdSet = P_ID_INTEL_EXT, 1033 - .NumEraseRegions= 1, 1034 .regions = { 1035 ERASEINFO(0x10000,8), 1036 } ··· 1037 .mfr_id = MANUFACTURER_INTEL, 1038 .dev_id = I82802AC, 1039 .name = "Intel 82802AC", 1040 - .uaddr = { 1041 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1042 - }, 1043 - .DevSize = SIZE_1MiB, 1044 - .CmdSet = P_ID_INTEL_EXT, 1045 - .NumEraseRegions= 1, 1046 .regions = { 1047 ERASEINFO(0x10000,16), 1048 } ··· 1049 .mfr_id = MANUFACTURER_MACRONIX, 1050 .dev_id = MX29LV040C, 1051 .name = "Macronix MX29LV040C", 1052 - .uaddr = { 1053 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1054 - }, 1055 - .DevSize = SIZE_512KiB, 1056 - .CmdSet = P_ID_AMD_STD, 1057 - .NumEraseRegions= 1, 1058 .regions = { 1059 ERASEINFO(0x10000,8), 1060 } ··· 1061 .mfr_id = MANUFACTURER_MACRONIX, 1062 .dev_id = MX29LV160T, 1063 .name = "MXIC MX29LV160T", 1064 - .uaddr = { 1065 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1066 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1067 - }, 1068 - .DevSize = SIZE_2MiB, 1069 - .CmdSet = P_ID_AMD_STD, 1070 - .NumEraseRegions= 4, 1071 .regions = { 1072 ERASEINFO(0x10000,31), 1073 ERASEINFO(0x08000,1), ··· 1076 .mfr_id = MANUFACTURER_NEC, 1077 .dev_id = UPD29F064115, 1078 .name = "NEC uPD29F064115", 1079 - .uaddr = { 1080 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1081 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1082 - }, 1083 - .DevSize = SIZE_8MiB, 1084 - .CmdSet = P_ID_AMD_STD, 1085 - .NumEraseRegions= 3, 1086 .regions = { 1087 ERASEINFO(0x2000,8), 1088 ERASEINFO(0x10000,126), ··· 1090 .mfr_id = MANUFACTURER_MACRONIX, 1091 .dev_id = MX29LV160B, 1092 .name = "MXIC MX29LV160B", 1093 - .uaddr = { 1094 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1095 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1096 - }, 1097 - .DevSize = SIZE_2MiB, 1098 - .CmdSet = P_ID_AMD_STD, 1099 - .NumEraseRegions= 4, 1100 .regions = { 1101 ERASEINFO(0x04000,1), 1102 ERASEINFO(0x02000,2), ··· 1105 .mfr_id = MANUFACTURER_MACRONIX, 1106 .dev_id = MX29F040, 1107 .name = "Macronix MX29F040", 1108 - .uaddr = { 1109 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1110 - }, 1111 - .DevSize = SIZE_512KiB, 1112 - .CmdSet = P_ID_AMD_STD, 1113 - .NumEraseRegions= 1, 1114 .regions = { 1115 ERASEINFO(0x10000,8), 1116 } ··· 1117 .mfr_id = MANUFACTURER_MACRONIX, 1118 .dev_id = MX29F016, 1119 .name = "Macronix MX29F016", 1120 - .uaddr = { 1121 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1122 - }, 1123 - .DevSize = SIZE_2MiB, 1124 - .CmdSet = P_ID_AMD_STD, 1125 - .NumEraseRegions= 1, 1126 .regions = { 1127 ERASEINFO(0x10000,32), 1128 } ··· 1129 .mfr_id = MANUFACTURER_MACRONIX, 1130 .dev_id = MX29F004T, 1131 .name = "Macronix MX29F004T", 1132 - .uaddr = { 1133 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1134 - }, 1135 - .DevSize = SIZE_512KiB, 1136 - .CmdSet = P_ID_AMD_STD, 1137 - .NumEraseRegions= 4, 1138 .regions = { 1139 ERASEINFO(0x10000,7), 1140 ERASEINFO(0x08000,1), ··· 1144 .mfr_id = MANUFACTURER_MACRONIX, 1145 .dev_id = MX29F004B, 1146 .name = "Macronix MX29F004B", 1147 - .uaddr = { 1148 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1149 - }, 1150 - .DevSize = SIZE_512KiB, 1151 - .CmdSet = P_ID_AMD_STD, 1152 - .NumEraseRegions= 4, 1153 .regions = { 1154 ERASEINFO(0x04000,1), 1155 ERASEINFO(0x02000,2), ··· 1159 .mfr_id = MANUFACTURER_MACRONIX, 1160 .dev_id = MX29F002T, 1161 .name = "Macronix MX29F002T", 1162 - .uaddr = { 1163 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1164 - }, 1165 - .DevSize = SIZE_256KiB, 1166 - .CmdSet = P_ID_AMD_STD, 1167 - .NumEraseRegions= 4, 1168 .regions = { 1169 ERASEINFO(0x10000,3), 1170 ERASEINFO(0x08000,1), ··· 1174 .mfr_id = MANUFACTURER_PMC, 1175 .dev_id = PM49FL002, 1176 .name = "PMC Pm49FL002", 1177 - .uaddr = { 1178 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1179 - }, 1180 - .DevSize = SIZE_256KiB, 1181 - .CmdSet = P_ID_AMD_STD, 1182 - .NumEraseRegions= 1, 1183 .regions = { 1184 ERASEINFO( 0x01000, 64 ) 1185 } ··· 1186 .mfr_id = MANUFACTURER_PMC, 1187 .dev_id = PM49FL004, 1188 .name = "PMC Pm49FL004", 1189 - .uaddr = { 1190 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1191 - }, 1192 - .DevSize = SIZE_512KiB, 1193 - .CmdSet = P_ID_AMD_STD, 1194 - .NumEraseRegions= 1, 1195 .regions = { 1196 ERASEINFO( 0x01000, 128 ) 1197 } ··· 1198 .mfr_id = MANUFACTURER_PMC, 1199 .dev_id = PM49FL008, 1200 .name = "PMC Pm49FL008", 1201 - .uaddr = { 1202 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1203 - }, 1204 - .DevSize = SIZE_1MiB, 1205 - .CmdSet = P_ID_AMD_STD, 1206 - .NumEraseRegions= 1, 1207 .regions = { 1208 ERASEINFO( 0x01000, 256 ) 1209 } ··· 1210 .mfr_id = MANUFACTURER_SHARP, 1211 .dev_id = LH28F640BF, 1212 .name = "LH28F640BF", 1213 - .uaddr = { 1214 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1215 - }, 1216 - .DevSize = SIZE_4MiB, 1217 - .CmdSet = P_ID_INTEL_STD, 1218 - .NumEraseRegions= 1, 1219 - .regions = { 1220 ERASEINFO(0x40000,16), 1221 } 1222 }, { 1223 .mfr_id = MANUFACTURER_SST, 1224 .dev_id = SST39LF512, 1225 .name = "SST 39LF512", 1226 - .uaddr = { 1227 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1228 - }, 1229 - .DevSize = SIZE_64KiB, 1230 - .CmdSet = P_ID_AMD_STD, 1231 - .NumEraseRegions= 1, 1232 .regions = { 1233 ERASEINFO(0x01000,16), 1234 } ··· 1234 .mfr_id = MANUFACTURER_SST, 1235 .dev_id = SST39LF010, 1236 .name = "SST 39LF010", 1237 - .uaddr = { 1238 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1239 - }, 1240 - .DevSize = SIZE_128KiB, 1241 - .CmdSet = P_ID_AMD_STD, 1242 - .NumEraseRegions= 1, 1243 .regions = { 1244 ERASEINFO(0x01000,32), 1245 } ··· 1246 .mfr_id = MANUFACTURER_SST, 1247 .dev_id = SST29EE020, 1248 .name = "SST 29EE020", 1249 - .uaddr = { 1250 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1251 - }, 1252 - .DevSize = SIZE_256KiB, 1253 - .CmdSet = P_ID_SST_PAGE, 1254 - .NumEraseRegions= 1, 1255 - .regions = {ERASEINFO(0x01000,64), 1256 - } 1257 - }, { 1258 .mfr_id = MANUFACTURER_SST, 1259 .dev_id = SST29LE020, 1260 .name = "SST 29LE020", 1261 - .uaddr = { 1262 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1263 - }, 1264 - .DevSize = SIZE_256KiB, 1265 - .CmdSet = P_ID_SST_PAGE, 1266 - .NumEraseRegions= 1, 1267 - .regions = {ERASEINFO(0x01000,64), 1268 - } 1269 }, { 1270 .mfr_id = MANUFACTURER_SST, 1271 .dev_id = SST39LF020, 1272 .name = "SST 39LF020", 1273 - .uaddr = { 1274 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1275 - }, 1276 - .DevSize = SIZE_256KiB, 1277 - .CmdSet = P_ID_AMD_STD, 1278 - .NumEraseRegions= 1, 1279 .regions = { 1280 ERASEINFO(0x01000,64), 1281 } ··· 1280 .mfr_id = MANUFACTURER_SST, 1281 .dev_id = SST39LF040, 1282 .name = "SST 39LF040", 1283 - .uaddr = { 1284 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1285 - }, 1286 - .DevSize = SIZE_512KiB, 1287 - .CmdSet = P_ID_AMD_STD, 1288 - .NumEraseRegions= 1, 1289 .regions = { 1290 ERASEINFO(0x01000,128), 1291 } ··· 1292 .mfr_id = MANUFACTURER_SST, 1293 .dev_id = SST39SF010A, 1294 .name = "SST 39SF010A", 1295 - .uaddr = { 1296 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1297 - }, 1298 - .DevSize = SIZE_128KiB, 1299 - .CmdSet = P_ID_AMD_STD, 1300 - .NumEraseRegions= 1, 1301 .regions = { 1302 ERASEINFO(0x01000,32), 1303 } ··· 1304 .mfr_id = MANUFACTURER_SST, 1305 .dev_id = SST39SF020A, 1306 .name = "SST 39SF020A", 1307 - .uaddr = { 1308 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1309 - }, 1310 - .DevSize = SIZE_256KiB, 1311 - .CmdSet = P_ID_AMD_STD, 1312 - .NumEraseRegions= 1, 1313 .regions = { 1314 ERASEINFO(0x01000,64), 1315 } 1316 }, { 1317 .mfr_id = MANUFACTURER_SST, 1318 - .dev_id = SST49LF040B, 1319 - .name = "SST 49LF040B", 1320 - .uaddr = { 1321 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1322 - }, 1323 - .DevSize = SIZE_512KiB, 1324 - .CmdSet = P_ID_AMD_STD, 1325 - .NumEraseRegions= 1, 1326 - .regions = { 1327 ERASEINFO(0x01000,128), 1328 } 1329 }, { ··· 1329 .mfr_id = MANUFACTURER_SST, 1330 .dev_id = SST49LF004B, 1331 .name = "SST 49LF004B", 1332 - .uaddr = { 1333 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1334 - }, 1335 - .DevSize = SIZE_512KiB, 1336 - .CmdSet = P_ID_AMD_STD, 1337 - .NumEraseRegions= 1, 1338 .regions = { 1339 ERASEINFO(0x01000,128), 1340 } ··· 1341 .mfr_id = MANUFACTURER_SST, 1342 .dev_id = SST49LF008A, 1343 .name = "SST 49LF008A", 1344 - .uaddr = { 1345 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1346 - }, 1347 - .DevSize = SIZE_1MiB, 1348 - .CmdSet = P_ID_AMD_STD, 1349 - .NumEraseRegions= 1, 1350 .regions = { 1351 ERASEINFO(0x01000,256), 1352 } ··· 1353 .mfr_id = MANUFACTURER_SST, 1354 .dev_id = SST49LF030A, 1355 .name = "SST 49LF030A", 1356 - .uaddr = { 1357 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1358 - }, 1359 - .DevSize = SIZE_512KiB, 1360 - .CmdSet = P_ID_AMD_STD, 1361 - .NumEraseRegions= 1, 1362 .regions = { 1363 ERASEINFO(0x01000,96), 1364 } ··· 1365 .mfr_id = MANUFACTURER_SST, 1366 .dev_id = SST49LF040A, 1367 .name = "SST 49LF040A", 1368 - .uaddr = { 1369 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1370 - }, 1371 - .DevSize = SIZE_512KiB, 1372 - .CmdSet = P_ID_AMD_STD, 1373 - .NumEraseRegions= 1, 1374 .regions = { 1375 ERASEINFO(0x01000,128), 1376 } ··· 1377 .mfr_id = MANUFACTURER_SST, 1378 .dev_id = SST49LF080A, 1379 .name = "SST 49LF080A", 1380 - .uaddr = { 1381 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1382 - }, 1383 - .DevSize = SIZE_1MiB, 1384 - .CmdSet = P_ID_AMD_STD, 1385 - .NumEraseRegions= 1, 1386 .regions = { 1387 ERASEINFO(0x01000,256), 1388 } 1389 }, { 1390 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1391 - .dev_id = SST39LF160, 1392 - .name = "SST 39LF160", 1393 - .uaddr = { 1394 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1395 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1396 - }, 1397 - .DevSize = SIZE_2MiB, 1398 - .CmdSet = P_ID_AMD_STD, 1399 - .NumEraseRegions= 2, 1400 - .regions = { 1401 - ERASEINFO(0x1000,256), 1402 - ERASEINFO(0x1000,256) 1403 - } 1404 }, { 1405 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1406 - .dev_id = SST39VF1601, 1407 - .name = "SST 39VF1601", 1408 - .uaddr = { 1409 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1410 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1411 - }, 1412 - .DevSize = SIZE_2MiB, 1413 - .CmdSet = P_ID_AMD_STD, 1414 - .NumEraseRegions= 2, 1415 - .regions = { 1416 - ERASEINFO(0x1000,256), 1417 - ERASEINFO(0x1000,256) 1418 - } 1419 - 1420 }, { 1421 .mfr_id = MANUFACTURER_ST, 1422 .dev_id = M29F800AB, 1423 .name = "ST M29F800AB", 1424 - .uaddr = { 1425 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1426 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1427 - }, 1428 - .DevSize = SIZE_1MiB, 1429 - .CmdSet = P_ID_AMD_STD, 1430 - .NumEraseRegions= 4, 1431 .regions = { 1432 ERASEINFO(0x04000,1), 1433 ERASEINFO(0x02000,2), ··· 1430 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1431 .dev_id = M29W800DT, 1432 .name = "ST M29W800DT", 1433 - .uaddr = { 1434 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1435 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1436 - }, 1437 - .DevSize = SIZE_1MiB, 1438 - .CmdSet = P_ID_AMD_STD, 1439 - .NumEraseRegions= 4, 1440 .regions = { 1441 ERASEINFO(0x10000,15), 1442 ERASEINFO(0x08000,1), ··· 1445 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1446 .dev_id = M29W800DB, 1447 .name = "ST M29W800DB", 1448 - .uaddr = { 1449 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1450 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1451 - }, 1452 - .DevSize = SIZE_1MiB, 1453 - .CmdSet = P_ID_AMD_STD, 1454 - .NumEraseRegions= 4, 1455 .regions = { 1456 ERASEINFO(0x04000,1), 1457 ERASEINFO(0x02000,2), ··· 1460 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1461 .dev_id = M29W160DT, 1462 .name = "ST M29W160DT", 1463 - .uaddr = { 1464 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1465 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1466 - }, 1467 - .DevSize = SIZE_2MiB, 1468 - .CmdSet = P_ID_AMD_STD, 1469 - .NumEraseRegions= 4, 1470 .regions = { 1471 ERASEINFO(0x10000,31), 1472 ERASEINFO(0x08000,1), ··· 1475 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1476 .dev_id = M29W160DB, 1477 .name = "ST M29W160DB", 1478 - .uaddr = { 1479 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1480 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1481 - }, 1482 - .DevSize = SIZE_2MiB, 1483 - .CmdSet = P_ID_AMD_STD, 1484 - .NumEraseRegions= 4, 1485 .regions = { 1486 ERASEINFO(0x04000,1), 1487 ERASEINFO(0x02000,2), ··· 1490 .mfr_id = MANUFACTURER_ST, 1491 .dev_id = M29W040B, 1492 .name = "ST M29W040B", 1493 - .uaddr = { 1494 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1495 - }, 1496 - .DevSize = SIZE_512KiB, 1497 - .CmdSet = P_ID_AMD_STD, 1498 - .NumEraseRegions= 1, 1499 .regions = { 1500 ERASEINFO(0x10000,8), 1501 } ··· 1502 .mfr_id = MANUFACTURER_ST, 1503 .dev_id = M50FW040, 1504 .name = "ST M50FW040", 1505 - .uaddr = { 1506 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1507 - }, 1508 - .DevSize = SIZE_512KiB, 1509 - .CmdSet = P_ID_INTEL_EXT, 1510 - .NumEraseRegions= 1, 1511 .regions = { 1512 ERASEINFO(0x10000,8), 1513 } ··· 1514 .mfr_id = MANUFACTURER_ST, 1515 .dev_id = M50FW080, 1516 .name = "ST M50FW080", 1517 - .uaddr = { 1518 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1519 - }, 1520 - .DevSize = SIZE_1MiB, 1521 - .CmdSet = P_ID_INTEL_EXT, 1522 - .NumEraseRegions= 1, 1523 .regions = { 1524 ERASEINFO(0x10000,16), 1525 } ··· 1526 .mfr_id = MANUFACTURER_ST, 1527 .dev_id = M50FW016, 1528 .name = "ST M50FW016", 1529 - .uaddr = { 1530 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1531 - }, 1532 - .DevSize = SIZE_2MiB, 1533 - .CmdSet = P_ID_INTEL_EXT, 1534 - .NumEraseRegions= 1, 1535 .regions = { 1536 ERASEINFO(0x10000,32), 1537 } ··· 1538 .mfr_id = MANUFACTURER_ST, 1539 .dev_id = M50LPW080, 1540 .name = "ST M50LPW080", 1541 - .uaddr = { 1542 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1543 - }, 1544 - .DevSize = SIZE_1MiB, 1545 - .CmdSet = P_ID_INTEL_EXT, 1546 - .NumEraseRegions= 1, 1547 .regions = { 1548 ERASEINFO(0x10000,16), 1549 } ··· 1550 .mfr_id = MANUFACTURER_TOSHIBA, 1551 .dev_id = TC58FVT160, 1552 .name = "Toshiba TC58FVT160", 1553 - .uaddr = { 1554 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1555 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1556 - }, 1557 - .DevSize = SIZE_2MiB, 1558 - .CmdSet = P_ID_AMD_STD, 1559 - .NumEraseRegions= 4, 1560 .regions = { 1561 ERASEINFO(0x10000,31), 1562 ERASEINFO(0x08000,1), ··· 1565 .mfr_id = MANUFACTURER_TOSHIBA, 1566 .dev_id = TC58FVB160, 1567 .name = "Toshiba TC58FVB160", 1568 - .uaddr = { 1569 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1570 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1571 - }, 1572 - .DevSize = SIZE_2MiB, 1573 - .CmdSet = P_ID_AMD_STD, 1574 - .NumEraseRegions= 4, 1575 .regions = { 1576 ERASEINFO(0x04000,1), 1577 ERASEINFO(0x02000,2), ··· 1580 .mfr_id = MANUFACTURER_TOSHIBA, 1581 .dev_id = TC58FVB321, 1582 .name = "Toshiba TC58FVB321", 1583 - .uaddr = { 1584 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1585 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1586 - }, 1587 - .DevSize = SIZE_4MiB, 1588 - .CmdSet = P_ID_AMD_STD, 1589 - .NumEraseRegions= 2, 1590 .regions = { 1591 ERASEINFO(0x02000,8), 1592 ERASEINFO(0x10000,63) ··· 1593 .mfr_id = MANUFACTURER_TOSHIBA, 1594 .dev_id = TC58FVT321, 1595 .name = "Toshiba TC58FVT321", 1596 - .uaddr = { 1597 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1598 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1599 - }, 1600 - .DevSize = SIZE_4MiB, 1601 - .CmdSet = P_ID_AMD_STD, 1602 - .NumEraseRegions= 2, 1603 .regions = { 1604 ERASEINFO(0x10000,63), 1605 ERASEINFO(0x02000,8) ··· 1606 .mfr_id = MANUFACTURER_TOSHIBA, 1607 .dev_id = TC58FVB641, 1608 .name = "Toshiba TC58FVB641", 1609 - .uaddr = { 1610 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1611 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1612 - }, 1613 - .DevSize = SIZE_8MiB, 1614 - .CmdSet = P_ID_AMD_STD, 1615 - .NumEraseRegions= 2, 1616 .regions = { 1617 ERASEINFO(0x02000,8), 1618 ERASEINFO(0x10000,127) ··· 1619 .mfr_id = MANUFACTURER_TOSHIBA, 1620 .dev_id = TC58FVT641, 1621 .name = "Toshiba TC58FVT641", 1622 - .uaddr = { 1623 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1624 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1625 - }, 1626 - .DevSize = SIZE_8MiB, 1627 - .CmdSet = P_ID_AMD_STD, 1628 - .NumEraseRegions= 2, 1629 .regions = { 1630 ERASEINFO(0x10000,127), 1631 ERASEINFO(0x02000,8) ··· 1632 .mfr_id = MANUFACTURER_WINBOND, 1633 .dev_id = W49V002A, 1634 .name = "Winbond W49V002A", 1635 - .uaddr = { 1636 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1637 - }, 1638 - .DevSize = SIZE_256KiB, 1639 - .CmdSet = P_ID_AMD_STD, 1640 - .NumEraseRegions= 4, 1641 .regions = { 1642 ERASEINFO(0x10000, 3), 1643 ERASEINFO(0x08000, 1), ··· 1646 } 1647 }; 1648 1649 - 1650 - static int cfi_jedec_setup(struct cfi_private *p_cfi, int index); 1651 - 1652 - static int jedec_probe_chip(struct map_info *map, __u32 base, 1653 - unsigned long *chip_map, struct cfi_private *cfi); 1654 - 1655 - static struct mtd_info *jedec_probe(struct map_info *map); 1656 - 1657 - static inline u32 jedec_read_mfr(struct map_info *map, __u32 base, 1658 struct cfi_private *cfi) 1659 { 1660 map_word result; ··· 1657 return result.x[0] & mask; 1658 } 1659 1660 - static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1661 struct cfi_private *cfi) 1662 { 1663 map_word result; ··· 1668 return result.x[0] & mask; 1669 } 1670 1671 - static inline void jedec_reset(u32 base, struct map_info *map, 1672 - struct cfi_private *cfi) 1673 { 1674 /* Reset */ 1675 ··· 1678 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1679 * as they will ignore the writes and dont care what address 1680 * the F0 is written to */ 1681 - if(cfi->addr_unlock1) { 1682 DEBUG( MTD_DEBUG_LEVEL3, 1683 "reset unlock called %x %x \n", 1684 cfi->addr_unlock1,cfi->addr_unlock2); ··· 1687 } 1688 1689 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1690 - /* Some misdesigned intel chips do not respond for 0xF0 for a reset, 1691 * so ensure we're in read mode. Send both the Intel and the AMD command 1692 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1693 * this should be safe. ··· 1697 } 1698 1699 1700 - static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type) 1701 - { 1702 - int uaddr_idx; 1703 - __u8 uaddr = MTD_UADDR_NOT_SUPPORTED; 1704 - 1705 - switch ( device_type ) { 1706 - case CFI_DEVICETYPE_X8: uaddr_idx = 0; break; 1707 - case CFI_DEVICETYPE_X16: uaddr_idx = 1; break; 1708 - case CFI_DEVICETYPE_X32: uaddr_idx = 2; break; 1709 - default: 1710 - printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n", 1711 - __func__, device_type); 1712 - goto uaddr_done; 1713 - } 1714 - 1715 - uaddr = finfo->uaddr[uaddr_idx]; 1716 - 1717 - if (uaddr != MTD_UADDR_NOT_SUPPORTED ) { 1718 - /* ASSERT("The unlock addresses for non-8-bit mode 1719 - are bollocks. We don't really need an array."); */ 1720 - uaddr = finfo->uaddr[0]; 1721 - } 1722 - 1723 - uaddr_done: 1724 - return uaddr; 1725 - } 1726 - 1727 - 1728 static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1729 { 1730 int i,num_erase_regions; 1731 - __u8 uaddr; 1732 1733 - printk("Found: %s\n",jedec_table[index].name); 1734 1735 - num_erase_regions = jedec_table[index].NumEraseRegions; 1736 1737 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1738 if (!p_cfi->cfiq) { ··· 1720 1721 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1722 1723 - p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1724 - p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1725 - p_cfi->cfiq->DevSize = jedec_table[index].DevSize; 1726 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1727 1728 for (i=0; i<num_erase_regions; i++){ ··· 1734 p_cfi->mfr = jedec_table[index].mfr_id; 1735 p_cfi->id = jedec_table[index].dev_id; 1736 1737 - uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type); 1738 - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { 1739 - kfree( p_cfi->cfiq ); 1740 - return 0; 1741 - } 1742 1743 - p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1; 1744 - p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2; 1745 1746 return 1; /* ok */ 1747 } ··· 1754 * be perfect - consequently there should be some module parameters that 1755 * could be manually specified to force the chip info. 1756 */ 1757 - static inline int jedec_match( __u32 base, 1758 struct map_info *map, 1759 struct cfi_private *cfi, 1760 const struct amd_flash_info *finfo ) 1761 { 1762 int rc = 0; /* failure until all tests pass */ 1763 u32 mfr, id; 1764 - __u8 uaddr; 1765 1766 /* 1767 * The IDs must match. For X16 and X32 devices operating in ··· 1774 */ 1775 switch (cfi->device_type) { 1776 case CFI_DEVICETYPE_X8: 1777 - mfr = (__u8)finfo->mfr_id; 1778 - id = (__u8)finfo->dev_id; 1779 1780 /* bjd: it seems that if we do this, we can end up 1781 * detecting 16bit flashes as an 8bit device, even though ··· 1788 } 1789 break; 1790 case CFI_DEVICETYPE_X16: 1791 - mfr = (__u16)finfo->mfr_id; 1792 - id = (__u16)finfo->dev_id; 1793 break; 1794 case CFI_DEVICETYPE_X32: 1795 - mfr = (__u16)finfo->mfr_id; 1796 - id = (__u32)finfo->dev_id; 1797 break; 1798 default: 1799 printk(KERN_WARNING ··· 1808 /* the part size must fit in the memory window */ 1809 DEBUG( MTD_DEBUG_LEVEL3, 1810 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 1811 - __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) ); 1812 - if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) { 1813 DEBUG( MTD_DEBUG_LEVEL3, 1814 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 1815 __func__, finfo->mfr_id, finfo->dev_id, 1816 - 1 << finfo->DevSize ); 1817 goto match_done; 1818 } 1819 1820 - uaddr = finfo_uaddr(finfo, cfi->device_type); 1821 - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { 1822 goto match_done; 1823 - } 1824 1825 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 1826 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 1827 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 1828 - && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 || 1829 - unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) { 1830 DEBUG( MTD_DEBUG_LEVEL3, 1831 "MTD %s(): 0x%.4x 0x%.4x did not match\n", 1832 __func__, ··· 1866 * were truly frobbing a real device. 1867 */ 1868 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 1869 - if(cfi->addr_unlock1) { 1870 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1871 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1872 } ··· 1892 if (MTD_UADDR_UNNECESSARY == uaddr_idx) 1893 return 0; 1894 1895 - cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; 1896 - cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; 1897 } 1898 1899 /* Make certain we aren't probing past the end of map */ ··· 1905 1906 } 1907 /* Ensure the unlock addresses we try stay inside the map */ 1908 - probe_offset1 = cfi_build_cmd_addr( 1909 - cfi->addr_unlock1, 1910 - cfi_interleave(cfi), 1911 - cfi->device_type); 1912 - probe_offset2 = cfi_build_cmd_addr( 1913 - cfi->addr_unlock1, 1914 - cfi_interleave(cfi), 1915 - cfi->device_type); 1916 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 1917 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 1918 - { 1919 goto retry; 1920 - } 1921 1922 /* Reset */ 1923 jedec_reset(base, map, cfi); ··· 1944 } 1945 goto retry; 1946 } else { 1947 - __u16 mfr; 1948 - __u16 id; 1949 1950 /* Make sure it is a chip of the same manufacturer and id */ 1951 mfr = jedec_read_mfr(map, base, cfi);
··· 194 195 196 struct unlock_addr { 197 + uint32_t addr1; 198 + uint32_t addr2; 199 }; 200 201 ··· 246 } 247 }; 248 249 struct amd_flash_info { 250 const char *name; 251 + const uint16_t mfr_id; 252 + const uint16_t dev_id; 253 + const uint8_t dev_size; 254 + const uint8_t nr_regions; 255 + const uint16_t cmd_set; 256 + const uint32_t regions[6]; 257 + const uint8_t devtypes; /* Bitmask for x8, x16 etc. */ 258 + const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */ 259 }; 260 261 #define ERASEINFO(size,blocks) (size<<8)|(blocks-1) ··· 280 .mfr_id = MANUFACTURER_AMD, 281 .dev_id = AM29F032B, 282 .name = "AMD AM29F032B", 283 + .uaddr = MTD_UADDR_0x0555_0x02AA, 284 + .devtypes = CFI_DEVICETYPE_X8, 285 + .dev_size = SIZE_4MiB, 286 + .cmd_set = P_ID_AMD_STD, 287 + .nr_regions = 1, 288 .regions = { 289 ERASEINFO(0x10000,64) 290 } ··· 293 .mfr_id = MANUFACTURER_AMD, 294 .dev_id = AM29LV160DT, 295 .name = "AMD AM29LV160DT", 296 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 297 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 298 + .dev_size = SIZE_2MiB, 299 + .cmd_set = P_ID_AMD_STD, 300 + .nr_regions = 4, 301 .regions = { 302 ERASEINFO(0x10000,31), 303 ERASEINFO(0x08000,1), ··· 310 .mfr_id = MANUFACTURER_AMD, 311 .dev_id = AM29LV160DB, 312 .name = "AMD AM29LV160DB", 313 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 314 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 315 + .dev_size = SIZE_2MiB, 316 + .cmd_set = P_ID_AMD_STD, 317 + .nr_regions = 4, 318 .regions = { 319 ERASEINFO(0x04000,1), 320 ERASEINFO(0x02000,2), ··· 327 .mfr_id = MANUFACTURER_AMD, 328 .dev_id = AM29LV400BB, 329 .name = "AMD AM29LV400BB", 330 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 331 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 332 + .dev_size = SIZE_512KiB, 333 + .cmd_set = P_ID_AMD_STD, 334 + .nr_regions = 4, 335 .regions = { 336 ERASEINFO(0x04000,1), 337 ERASEINFO(0x02000,2), ··· 344 .mfr_id = MANUFACTURER_AMD, 345 .dev_id = AM29LV400BT, 346 .name = "AMD AM29LV400BT", 347 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 348 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 349 + .dev_size = SIZE_512KiB, 350 + .cmd_set = P_ID_AMD_STD, 351 + .nr_regions = 4, 352 .regions = { 353 ERASEINFO(0x10000,7), 354 ERASEINFO(0x08000,1), ··· 361 .mfr_id = MANUFACTURER_AMD, 362 .dev_id = AM29LV800BB, 363 .name = "AMD AM29LV800BB", 364 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 365 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 366 + .dev_size = SIZE_1MiB, 367 + .cmd_set = P_ID_AMD_STD, 368 + .nr_regions = 4, 369 .regions = { 370 ERASEINFO(0x04000,1), 371 ERASEINFO(0x02000,2), ··· 379 .mfr_id = MANUFACTURER_AMD, 380 .dev_id = AM29DL800BB, 381 .name = "AMD AM29DL800BB", 382 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 383 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 384 + .dev_size = SIZE_1MiB, 385 + .cmd_set = P_ID_AMD_STD, 386 + .nr_regions = 6, 387 .regions = { 388 ERASEINFO(0x04000,1), 389 ERASEINFO(0x08000,1), ··· 398 .mfr_id = MANUFACTURER_AMD, 399 .dev_id = AM29DL800BT, 400 .name = "AMD AM29DL800BT", 401 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 402 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 403 + .dev_size = SIZE_1MiB, 404 + .cmd_set = P_ID_AMD_STD, 405 + .nr_regions = 6, 406 .regions = { 407 ERASEINFO(0x10000,14), 408 ERASEINFO(0x04000,1), ··· 417 .mfr_id = MANUFACTURER_AMD, 418 .dev_id = AM29F800BB, 419 .name = "AMD AM29F800BB", 420 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 421 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 422 + .dev_size = SIZE_1MiB, 423 + .cmd_set = P_ID_AMD_STD, 424 + .nr_regions = 4, 425 .regions = { 426 ERASEINFO(0x04000,1), 427 ERASEINFO(0x02000,2), ··· 434 .mfr_id = MANUFACTURER_AMD, 435 .dev_id = AM29LV800BT, 436 .name = "AMD AM29LV800BT", 437 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 438 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 439 + .dev_size = SIZE_1MiB, 440 + .cmd_set = P_ID_AMD_STD, 441 + .nr_regions = 4, 442 .regions = { 443 ERASEINFO(0x10000,15), 444 ERASEINFO(0x08000,1), ··· 451 .mfr_id = MANUFACTURER_AMD, 452 .dev_id = AM29F800BT, 453 .name = "AMD AM29F800BT", 454 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 455 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 456 + .dev_size = SIZE_1MiB, 457 + .cmd_set = P_ID_AMD_STD, 458 + .nr_regions = 4, 459 .regions = { 460 ERASEINFO(0x10000,15), 461 ERASEINFO(0x08000,1), ··· 468 .mfr_id = MANUFACTURER_AMD, 469 .dev_id = AM29F017D, 470 .name = "AMD AM29F017D", 471 + .devtypes = CFI_DEVICETYPE_X8, 472 + .uaddr = MTD_UADDR_DONT_CARE, 473 + .dev_size = SIZE_2MiB, 474 + .cmd_set = P_ID_AMD_STD, 475 + .nr_regions = 1, 476 .regions = { 477 ERASEINFO(0x10000,32), 478 } ··· 481 .mfr_id = MANUFACTURER_AMD, 482 .dev_id = AM29F016D, 483 .name = "AMD AM29F016D", 484 + .devtypes = CFI_DEVICETYPE_X8, 485 + .uaddr = MTD_UADDR_0x0555_0x02AA, 486 + .dev_size = SIZE_2MiB, 487 + .cmd_set = P_ID_AMD_STD, 488 + .nr_regions = 1, 489 .regions = { 490 ERASEINFO(0x10000,32), 491 } ··· 494 .mfr_id = MANUFACTURER_AMD, 495 .dev_id = AM29F080, 496 .name = "AMD AM29F080", 497 + .devtypes = CFI_DEVICETYPE_X8, 498 + .uaddr = MTD_UADDR_0x0555_0x02AA, 499 + .dev_size = SIZE_1MiB, 500 + .cmd_set = P_ID_AMD_STD, 501 + .nr_regions = 1, 502 .regions = { 503 ERASEINFO(0x10000,16), 504 } ··· 507 .mfr_id = MANUFACTURER_AMD, 508 .dev_id = AM29F040, 509 .name = "AMD AM29F040", 510 + .devtypes = CFI_DEVICETYPE_X8, 511 + .uaddr = MTD_UADDR_0x0555_0x02AA, 512 + .dev_size = SIZE_512KiB, 513 + .cmd_set = P_ID_AMD_STD, 514 + .nr_regions = 1, 515 .regions = { 516 ERASEINFO(0x10000,8), 517 } ··· 520 .mfr_id = MANUFACTURER_AMD, 521 .dev_id = AM29LV040B, 522 .name = "AMD AM29LV040B", 523 + .devtypes = CFI_DEVICETYPE_X8, 524 + .uaddr = MTD_UADDR_0x0555_0x02AA, 525 + .dev_size = SIZE_512KiB, 526 + .cmd_set = P_ID_AMD_STD, 527 + .nr_regions = 1, 528 .regions = { 529 ERASEINFO(0x10000,8), 530 } ··· 533 .mfr_id = MANUFACTURER_AMD, 534 .dev_id = AM29F002T, 535 .name = "AMD AM29F002T", 536 + .devtypes = CFI_DEVICETYPE_X8, 537 + .uaddr = MTD_UADDR_0x0555_0x02AA, 538 + .dev_size = SIZE_256KiB, 539 + .cmd_set = P_ID_AMD_STD, 540 + .nr_regions = 4, 541 .regions = { 542 ERASEINFO(0x10000,3), 543 ERASEINFO(0x08000,1), ··· 549 .mfr_id = MANUFACTURER_ATMEL, 550 .dev_id = AT49BV512, 551 .name = "Atmel AT49BV512", 552 + .devtypes = CFI_DEVICETYPE_X8, 553 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 554 + .dev_size = SIZE_64KiB, 555 + .cmd_set = P_ID_AMD_STD, 556 + .nr_regions = 1, 557 .regions = { 558 ERASEINFO(0x10000,1) 559 } ··· 562 .mfr_id = MANUFACTURER_ATMEL, 563 .dev_id = AT29LV512, 564 .name = "Atmel AT29LV512", 565 + .devtypes = CFI_DEVICETYPE_X8, 566 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 567 + .dev_size = SIZE_64KiB, 568 + .cmd_set = P_ID_AMD_STD, 569 + .nr_regions = 1, 570 .regions = { 571 ERASEINFO(0x80,256), 572 ERASEINFO(0x80,256) ··· 576 .mfr_id = MANUFACTURER_ATMEL, 577 .dev_id = AT49BV16X, 578 .name = "Atmel AT49BV16X", 579 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 580 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 581 + .dev_size = SIZE_2MiB, 582 + .cmd_set = P_ID_AMD_STD, 583 + .nr_regions = 2, 584 .regions = { 585 ERASEINFO(0x02000,8), 586 ERASEINFO(0x10000,31) ··· 591 .mfr_id = MANUFACTURER_ATMEL, 592 .dev_id = AT49BV16XT, 593 .name = "Atmel AT49BV16XT", 594 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 595 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 596 + .dev_size = SIZE_2MiB, 597 + .cmd_set = P_ID_AMD_STD, 598 + .nr_regions = 2, 599 .regions = { 600 ERASEINFO(0x10000,31), 601 ERASEINFO(0x02000,8) ··· 606 .mfr_id = MANUFACTURER_ATMEL, 607 .dev_id = AT49BV32X, 608 .name = "Atmel AT49BV32X", 609 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 610 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 611 + .dev_size = SIZE_4MiB, 612 + .cmd_set = P_ID_AMD_STD, 613 + .nr_regions = 2, 614 .regions = { 615 ERASEINFO(0x02000,8), 616 ERASEINFO(0x10000,63) ··· 621 .mfr_id = MANUFACTURER_ATMEL, 622 .dev_id = AT49BV32XT, 623 .name = "Atmel AT49BV32XT", 624 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 625 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 626 + .dev_size = SIZE_4MiB, 627 + .cmd_set = P_ID_AMD_STD, 628 + .nr_regions = 2, 629 .regions = { 630 ERASEINFO(0x10000,63), 631 ERASEINFO(0x02000,8) ··· 636 .mfr_id = MANUFACTURER_FUJITSU, 637 .dev_id = MBM29F040C, 638 .name = "Fujitsu MBM29F040C", 639 + .devtypes = CFI_DEVICETYPE_X8, 640 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 641 + .dev_size = SIZE_512KiB, 642 + .cmd_set = P_ID_AMD_STD, 643 + .nr_regions = 1, 644 .regions = { 645 ERASEINFO(0x10000,8) 646 } ··· 649 .mfr_id = MANUFACTURER_FUJITSU, 650 .dev_id = MBM29F800BA, 651 .name = "Fujitsu MBM29F800BA", 652 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 653 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 654 + .dev_size = SIZE_1MiB, 655 + .cmd_set = P_ID_AMD_STD, 656 + .nr_regions = 4, 657 .regions = { 658 ERASEINFO(0x04000,1), 659 ERASEINFO(0x02000,2), ··· 666 .mfr_id = MANUFACTURER_FUJITSU, 667 .dev_id = MBM29LV650UE, 668 .name = "Fujitsu MBM29LV650UE", 669 + .devtypes = CFI_DEVICETYPE_X8, 670 + .uaddr = MTD_UADDR_DONT_CARE, 671 + .dev_size = SIZE_8MiB, 672 + .cmd_set = P_ID_AMD_STD, 673 + .nr_regions = 1, 674 .regions = { 675 ERASEINFO(0x10000,128) 676 } ··· 679 .mfr_id = MANUFACTURER_FUJITSU, 680 .dev_id = MBM29LV320TE, 681 .name = "Fujitsu MBM29LV320TE", 682 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 683 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 684 + .dev_size = SIZE_4MiB, 685 + .cmd_set = P_ID_AMD_STD, 686 + .nr_regions = 2, 687 .regions = { 688 ERASEINFO(0x10000,63), 689 ERASEINFO(0x02000,8) ··· 694 .mfr_id = MANUFACTURER_FUJITSU, 695 .dev_id = MBM29LV320BE, 696 .name = "Fujitsu MBM29LV320BE", 697 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 698 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 699 + .dev_size = SIZE_4MiB, 700 + .cmd_set = P_ID_AMD_STD, 701 + .nr_regions = 2, 702 .regions = { 703 ERASEINFO(0x02000,8), 704 ERASEINFO(0x10000,63) ··· 709 .mfr_id = MANUFACTURER_FUJITSU, 710 .dev_id = MBM29LV160TE, 711 .name = "Fujitsu MBM29LV160TE", 712 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 713 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 714 + .dev_size = SIZE_2MiB, 715 + .cmd_set = P_ID_AMD_STD, 716 + .nr_regions = 4, 717 .regions = { 718 ERASEINFO(0x10000,31), 719 ERASEINFO(0x08000,1), ··· 726 .mfr_id = MANUFACTURER_FUJITSU, 727 .dev_id = MBM29LV160BE, 728 .name = "Fujitsu MBM29LV160BE", 729 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 730 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 731 + .dev_size = SIZE_2MiB, 732 + .cmd_set = P_ID_AMD_STD, 733 + .nr_regions = 4, 734 .regions = { 735 ERASEINFO(0x04000,1), 736 ERASEINFO(0x02000,2), ··· 743 .mfr_id = MANUFACTURER_FUJITSU, 744 .dev_id = MBM29LV800BA, 745 .name = "Fujitsu MBM29LV800BA", 746 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 747 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 748 + .dev_size = SIZE_1MiB, 749 + .cmd_set = P_ID_AMD_STD, 750 + .nr_regions = 4, 751 .regions = { 752 ERASEINFO(0x04000,1), 753 ERASEINFO(0x02000,2), ··· 760 .mfr_id = MANUFACTURER_FUJITSU, 761 .dev_id = MBM29LV800TA, 762 .name = "Fujitsu MBM29LV800TA", 763 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 764 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 765 + .dev_size = SIZE_1MiB, 766 + .cmd_set = P_ID_AMD_STD, 767 + .nr_regions = 4, 768 .regions = { 769 ERASEINFO(0x10000,15), 770 ERASEINFO(0x08000,1), ··· 777 .mfr_id = MANUFACTURER_FUJITSU, 778 .dev_id = MBM29LV400BC, 779 .name = "Fujitsu MBM29LV400BC", 780 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 781 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 782 + .dev_size = SIZE_512KiB, 783 + .cmd_set = P_ID_AMD_STD, 784 + .nr_regions = 4, 785 .regions = { 786 ERASEINFO(0x04000,1), 787 ERASEINFO(0x02000,2), ··· 794 .mfr_id = MANUFACTURER_FUJITSU, 795 .dev_id = MBM29LV400TC, 796 .name = "Fujitsu MBM29LV400TC", 797 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 798 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 799 + .dev_size = SIZE_512KiB, 800 + .cmd_set = P_ID_AMD_STD, 801 + .nr_regions = 4, 802 .regions = { 803 ERASEINFO(0x10000,7), 804 ERASEINFO(0x08000,1), ··· 811 .mfr_id = MANUFACTURER_HYUNDAI, 812 .dev_id = HY29F002T, 813 .name = "Hyundai HY29F002T", 814 + .devtypes = CFI_DEVICETYPE_X8, 815 + .uaddr = MTD_UADDR_0x0555_0x02AA, 816 + .dev_size = SIZE_256KiB, 817 + .cmd_set = P_ID_AMD_STD, 818 + .nr_regions = 4, 819 .regions = { 820 ERASEINFO(0x10000,3), 821 ERASEINFO(0x08000,1), ··· 827 .mfr_id = MANUFACTURER_INTEL, 828 .dev_id = I28F004B3B, 829 .name = "Intel 28F004B3B", 830 + .devtypes = CFI_DEVICETYPE_X8, 831 + .uaddr = MTD_UADDR_UNNECESSARY, 832 + .dev_size = SIZE_512KiB, 833 + .cmd_set = P_ID_INTEL_STD, 834 + .nr_regions = 2, 835 .regions = { 836 ERASEINFO(0x02000, 8), 837 ERASEINFO(0x10000, 7), ··· 841 .mfr_id = MANUFACTURER_INTEL, 842 .dev_id = I28F004B3T, 843 .name = "Intel 28F004B3T", 844 + .devtypes = CFI_DEVICETYPE_X8, 845 + .uaddr = MTD_UADDR_UNNECESSARY, 846 + .dev_size = SIZE_512KiB, 847 + .cmd_set = P_ID_INTEL_STD, 848 + .nr_regions = 2, 849 .regions = { 850 ERASEINFO(0x10000, 7), 851 ERASEINFO(0x02000, 8), ··· 855 .mfr_id = MANUFACTURER_INTEL, 856 .dev_id = I28F400B3B, 857 .name = "Intel 28F400B3B", 858 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 859 + .uaddr = MTD_UADDR_UNNECESSARY, 860 + .dev_size = SIZE_512KiB, 861 + .cmd_set = P_ID_INTEL_STD, 862 + .nr_regions = 2, 863 .regions = { 864 ERASEINFO(0x02000, 8), 865 ERASEINFO(0x10000, 7), ··· 870 .mfr_id = MANUFACTURER_INTEL, 871 .dev_id = I28F400B3T, 872 .name = "Intel 28F400B3T", 873 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 874 + .uaddr = MTD_UADDR_UNNECESSARY, 875 + .dev_size = SIZE_512KiB, 876 + .cmd_set = P_ID_INTEL_STD, 877 + .nr_regions = 2, 878 .regions = { 879 ERASEINFO(0x10000, 7), 880 ERASEINFO(0x02000, 8), ··· 885 .mfr_id = MANUFACTURER_INTEL, 886 .dev_id = I28F008B3B, 887 .name = "Intel 28F008B3B", 888 + .devtypes = CFI_DEVICETYPE_X8, 889 + .uaddr = MTD_UADDR_UNNECESSARY, 890 + .dev_size = SIZE_1MiB, 891 + .cmd_set = P_ID_INTEL_STD, 892 + .nr_regions = 2, 893 .regions = { 894 ERASEINFO(0x02000, 8), 895 ERASEINFO(0x10000, 15), ··· 899 .mfr_id = MANUFACTURER_INTEL, 900 .dev_id = I28F008B3T, 901 .name = "Intel 28F008B3T", 902 + .devtypes = CFI_DEVICETYPE_X8, 903 + .uaddr = MTD_UADDR_UNNECESSARY, 904 + .dev_size = SIZE_1MiB, 905 + .cmd_set = P_ID_INTEL_STD, 906 + .nr_regions = 2, 907 .regions = { 908 ERASEINFO(0x10000, 15), 909 ERASEINFO(0x02000, 8), ··· 913 .mfr_id = MANUFACTURER_INTEL, 914 .dev_id = I28F008S5, 915 .name = "Intel 28F008S5", 916 + .devtypes = CFI_DEVICETYPE_X8, 917 + .uaddr = MTD_UADDR_UNNECESSARY, 918 + .dev_size = SIZE_1MiB, 919 + .cmd_set = P_ID_INTEL_EXT, 920 + .nr_regions = 1, 921 .regions = { 922 ERASEINFO(0x10000,16), 923 } ··· 926 .mfr_id = MANUFACTURER_INTEL, 927 .dev_id = I28F016S5, 928 .name = "Intel 28F016S5", 929 + .devtypes = CFI_DEVICETYPE_X8, 930 + .uaddr = MTD_UADDR_UNNECESSARY, 931 + .dev_size = SIZE_2MiB, 932 + .cmd_set = P_ID_INTEL_EXT, 933 + .nr_regions = 1, 934 .regions = { 935 ERASEINFO(0x10000,32), 936 } ··· 939 .mfr_id = MANUFACTURER_INTEL, 940 .dev_id = I28F008SA, 941 .name = "Intel 28F008SA", 942 + .devtypes = CFI_DEVICETYPE_X8, 943 + .uaddr = MTD_UADDR_UNNECESSARY, 944 + .dev_size = SIZE_1MiB, 945 + .cmd_set = P_ID_INTEL_STD, 946 + .nr_regions = 1, 947 .regions = { 948 ERASEINFO(0x10000, 16), 949 } ··· 952 .mfr_id = MANUFACTURER_INTEL, 953 .dev_id = I28F800B3B, 954 .name = "Intel 28F800B3B", 955 + .devtypes = CFI_DEVICETYPE_X16, 956 + .uaddr = MTD_UADDR_UNNECESSARY, 957 + .dev_size = SIZE_1MiB, 958 + .cmd_set = P_ID_INTEL_STD, 959 + .nr_regions = 2, 960 .regions = { 961 ERASEINFO(0x02000, 8), 962 ERASEINFO(0x10000, 15), ··· 966 .mfr_id = MANUFACTURER_INTEL, 967 .dev_id = I28F800B3T, 968 .name = "Intel 28F800B3T", 969 + .devtypes = CFI_DEVICETYPE_X16, 970 + .uaddr = MTD_UADDR_UNNECESSARY, 971 + .dev_size = SIZE_1MiB, 972 + .cmd_set = P_ID_INTEL_STD, 973 + .nr_regions = 2, 974 .regions = { 975 ERASEINFO(0x10000, 15), 976 ERASEINFO(0x02000, 8), ··· 980 .mfr_id = MANUFACTURER_INTEL, 981 .dev_id = I28F016B3B, 982 .name = "Intel 28F016B3B", 983 + .devtypes = CFI_DEVICETYPE_X8, 984 + .uaddr = MTD_UADDR_UNNECESSARY, 985 + .dev_size = SIZE_2MiB, 986 + .cmd_set = P_ID_INTEL_STD, 987 + .nr_regions = 2, 988 .regions = { 989 ERASEINFO(0x02000, 8), 990 ERASEINFO(0x10000, 31), ··· 994 .mfr_id = MANUFACTURER_INTEL, 995 .dev_id = I28F016S3, 996 .name = "Intel I28F016S3", 997 + .devtypes = CFI_DEVICETYPE_X8, 998 + .uaddr = MTD_UADDR_UNNECESSARY, 999 + .dev_size = SIZE_2MiB, 1000 + .cmd_set = P_ID_INTEL_STD, 1001 + .nr_regions = 1, 1002 .regions = { 1003 ERASEINFO(0x10000, 32), 1004 } ··· 1007 .mfr_id = MANUFACTURER_INTEL, 1008 .dev_id = I28F016B3T, 1009 .name = "Intel 28F016B3T", 1010 + .devtypes = CFI_DEVICETYPE_X8, 1011 + .uaddr = MTD_UADDR_UNNECESSARY, 1012 + .dev_size = SIZE_2MiB, 1013 + .cmd_set = P_ID_INTEL_STD, 1014 + .nr_regions = 2, 1015 .regions = { 1016 ERASEINFO(0x10000, 31), 1017 ERASEINFO(0x02000, 8), ··· 1021 .mfr_id = MANUFACTURER_INTEL, 1022 .dev_id = I28F160B3B, 1023 .name = "Intel 28F160B3B", 1024 + .devtypes = CFI_DEVICETYPE_X16, 1025 + .uaddr = MTD_UADDR_UNNECESSARY, 1026 + .dev_size = SIZE_2MiB, 1027 + .cmd_set = P_ID_INTEL_STD, 1028 + .nr_regions = 2, 1029 .regions = { 1030 ERASEINFO(0x02000, 8), 1031 ERASEINFO(0x10000, 31), ··· 1035 .mfr_id = MANUFACTURER_INTEL, 1036 .dev_id = I28F160B3T, 1037 .name = "Intel 28F160B3T", 1038 + .devtypes = CFI_DEVICETYPE_X16, 1039 + .uaddr = MTD_UADDR_UNNECESSARY, 1040 + .dev_size = SIZE_2MiB, 1041 + .cmd_set = P_ID_INTEL_STD, 1042 + .nr_regions = 2, 1043 .regions = { 1044 ERASEINFO(0x10000, 31), 1045 ERASEINFO(0x02000, 8), ··· 1049 .mfr_id = MANUFACTURER_INTEL, 1050 .dev_id = I28F320B3B, 1051 .name = "Intel 28F320B3B", 1052 + .devtypes = CFI_DEVICETYPE_X16, 1053 + .uaddr = MTD_UADDR_UNNECESSARY, 1054 + .dev_size = SIZE_4MiB, 1055 + .cmd_set = P_ID_INTEL_STD, 1056 + .nr_regions = 2, 1057 .regions = { 1058 ERASEINFO(0x02000, 8), 1059 ERASEINFO(0x10000, 63), ··· 1063 .mfr_id = MANUFACTURER_INTEL, 1064 .dev_id = I28F320B3T, 1065 .name = "Intel 28F320B3T", 1066 + .devtypes = CFI_DEVICETYPE_X16, 1067 + .uaddr = MTD_UADDR_UNNECESSARY, 1068 + .dev_size = SIZE_4MiB, 1069 + .cmd_set = P_ID_INTEL_STD, 1070 + .nr_regions = 2, 1071 .regions = { 1072 ERASEINFO(0x10000, 63), 1073 ERASEINFO(0x02000, 8), ··· 1077 .mfr_id = MANUFACTURER_INTEL, 1078 .dev_id = I28F640B3B, 1079 .name = "Intel 28F640B3B", 1080 + .devtypes = CFI_DEVICETYPE_X16, 1081 + .uaddr = MTD_UADDR_UNNECESSARY, 1082 + .dev_size = SIZE_8MiB, 1083 + .cmd_set = P_ID_INTEL_STD, 1084 + .nr_regions = 2, 1085 .regions = { 1086 ERASEINFO(0x02000, 8), 1087 ERASEINFO(0x10000, 127), ··· 1091 .mfr_id = MANUFACTURER_INTEL, 1092 .dev_id = I28F640B3T, 1093 .name = "Intel 28F640B3T", 1094 + .devtypes = CFI_DEVICETYPE_X16, 1095 + .uaddr = MTD_UADDR_UNNECESSARY, 1096 + .dev_size = SIZE_8MiB, 1097 + .cmd_set = P_ID_INTEL_STD, 1098 + .nr_regions = 2, 1099 .regions = { 1100 ERASEINFO(0x10000, 127), 1101 ERASEINFO(0x02000, 8), ··· 1105 .mfr_id = MANUFACTURER_INTEL, 1106 .dev_id = I82802AB, 1107 .name = "Intel 82802AB", 1108 + .devtypes = CFI_DEVICETYPE_X8, 1109 + .uaddr = MTD_UADDR_UNNECESSARY, 1110 + .dev_size = SIZE_512KiB, 1111 + .cmd_set = P_ID_INTEL_EXT, 1112 + .nr_regions = 1, 1113 .regions = { 1114 ERASEINFO(0x10000,8), 1115 } ··· 1118 .mfr_id = MANUFACTURER_INTEL, 1119 .dev_id = I82802AC, 1120 .name = "Intel 82802AC", 1121 + .devtypes = CFI_DEVICETYPE_X8, 1122 + .uaddr = MTD_UADDR_UNNECESSARY, 1123 + .dev_size = SIZE_1MiB, 1124 + .cmd_set = P_ID_INTEL_EXT, 1125 + .nr_regions = 1, 1126 .regions = { 1127 ERASEINFO(0x10000,16), 1128 } ··· 1131 .mfr_id = MANUFACTURER_MACRONIX, 1132 .dev_id = MX29LV040C, 1133 .name = "Macronix MX29LV040C", 1134 + .devtypes = CFI_DEVICETYPE_X8, 1135 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1136 + .dev_size = SIZE_512KiB, 1137 + .cmd_set = P_ID_AMD_STD, 1138 + .nr_regions = 1, 1139 .regions = { 1140 ERASEINFO(0x10000,8), 1141 } ··· 1144 .mfr_id = MANUFACTURER_MACRONIX, 1145 .dev_id = MX29LV160T, 1146 .name = "MXIC MX29LV160T", 1147 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1148 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1149 + .dev_size = SIZE_2MiB, 1150 + .cmd_set = P_ID_AMD_STD, 1151 + .nr_regions = 4, 1152 .regions = { 1153 ERASEINFO(0x10000,31), 1154 ERASEINFO(0x08000,1), ··· 1161 .mfr_id = MANUFACTURER_NEC, 1162 .dev_id = UPD29F064115, 1163 .name = "NEC uPD29F064115", 1164 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1165 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1166 + .dev_size = SIZE_8MiB, 1167 + .cmd_set = P_ID_AMD_STD, 1168 + .nr_regions = 3, 1169 .regions = { 1170 ERASEINFO(0x2000,8), 1171 ERASEINFO(0x10000,126), ··· 1177 .mfr_id = MANUFACTURER_MACRONIX, 1178 .dev_id = MX29LV160B, 1179 .name = "MXIC MX29LV160B", 1180 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1181 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1182 + .dev_size = SIZE_2MiB, 1183 + .cmd_set = P_ID_AMD_STD, 1184 + .nr_regions = 4, 1185 .regions = { 1186 ERASEINFO(0x04000,1), 1187 ERASEINFO(0x02000,2), ··· 1194 .mfr_id = MANUFACTURER_MACRONIX, 1195 .dev_id = MX29F040, 1196 .name = "Macronix MX29F040", 1197 + .devtypes = CFI_DEVICETYPE_X8, 1198 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1199 + .dev_size = SIZE_512KiB, 1200 + .cmd_set = P_ID_AMD_STD, 1201 + .nr_regions = 1, 1202 .regions = { 1203 ERASEINFO(0x10000,8), 1204 } ··· 1207 .mfr_id = MANUFACTURER_MACRONIX, 1208 .dev_id = MX29F016, 1209 .name = "Macronix MX29F016", 1210 + .devtypes = CFI_DEVICETYPE_X8, 1211 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1212 + .dev_size = SIZE_2MiB, 1213 + .cmd_set = P_ID_AMD_STD, 1214 + .nr_regions = 1, 1215 .regions = { 1216 ERASEINFO(0x10000,32), 1217 } ··· 1220 .mfr_id = MANUFACTURER_MACRONIX, 1221 .dev_id = MX29F004T, 1222 .name = "Macronix MX29F004T", 1223 + .devtypes = CFI_DEVICETYPE_X8, 1224 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1225 + .dev_size = SIZE_512KiB, 1226 + .cmd_set = P_ID_AMD_STD, 1227 + .nr_regions = 4, 1228 .regions = { 1229 ERASEINFO(0x10000,7), 1230 ERASEINFO(0x08000,1), ··· 1236 .mfr_id = MANUFACTURER_MACRONIX, 1237 .dev_id = MX29F004B, 1238 .name = "Macronix MX29F004B", 1239 + .devtypes = CFI_DEVICETYPE_X8, 1240 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1241 + .dev_size = SIZE_512KiB, 1242 + .cmd_set = P_ID_AMD_STD, 1243 + .nr_regions = 4, 1244 .regions = { 1245 ERASEINFO(0x04000,1), 1246 ERASEINFO(0x02000,2), ··· 1252 .mfr_id = MANUFACTURER_MACRONIX, 1253 .dev_id = MX29F002T, 1254 .name = "Macronix MX29F002T", 1255 + .devtypes = CFI_DEVICETYPE_X8, 1256 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1257 + .dev_size = SIZE_256KiB, 1258 + .cmd_set = P_ID_AMD_STD, 1259 + .nr_regions = 4, 1260 .regions = { 1261 ERASEINFO(0x10000,3), 1262 ERASEINFO(0x08000,1), ··· 1268 .mfr_id = MANUFACTURER_PMC, 1269 .dev_id = PM49FL002, 1270 .name = "PMC Pm49FL002", 1271 + .devtypes = CFI_DEVICETYPE_X8, 1272 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1273 + .dev_size = SIZE_256KiB, 1274 + .cmd_set = P_ID_AMD_STD, 1275 + .nr_regions = 1, 1276 .regions = { 1277 ERASEINFO( 0x01000, 64 ) 1278 } ··· 1281 .mfr_id = MANUFACTURER_PMC, 1282 .dev_id = PM49FL004, 1283 .name = "PMC Pm49FL004", 1284 + .devtypes = CFI_DEVICETYPE_X8, 1285 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1286 + .dev_size = SIZE_512KiB, 1287 + .cmd_set = P_ID_AMD_STD, 1288 + .nr_regions = 1, 1289 .regions = { 1290 ERASEINFO( 0x01000, 128 ) 1291 } ··· 1294 .mfr_id = MANUFACTURER_PMC, 1295 .dev_id = PM49FL008, 1296 .name = "PMC Pm49FL008", 1297 + .devtypes = CFI_DEVICETYPE_X8, 1298 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1299 + .dev_size = SIZE_1MiB, 1300 + .cmd_set = P_ID_AMD_STD, 1301 + .nr_regions = 1, 1302 .regions = { 1303 ERASEINFO( 0x01000, 256 ) 1304 } ··· 1307 .mfr_id = MANUFACTURER_SHARP, 1308 .dev_id = LH28F640BF, 1309 .name = "LH28F640BF", 1310 + .devtypes = CFI_DEVICETYPE_X8, 1311 + .uaddr = MTD_UADDR_UNNECESSARY, 1312 + .dev_size = SIZE_4MiB, 1313 + .cmd_set = P_ID_INTEL_STD, 1314 + .nr_regions = 1, 1315 + .regions = { 1316 ERASEINFO(0x40000,16), 1317 } 1318 }, { 1319 .mfr_id = MANUFACTURER_SST, 1320 .dev_id = SST39LF512, 1321 .name = "SST 39LF512", 1322 + .devtypes = CFI_DEVICETYPE_X8, 1323 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1324 + .dev_size = SIZE_64KiB, 1325 + .cmd_set = P_ID_AMD_STD, 1326 + .nr_regions = 1, 1327 .regions = { 1328 ERASEINFO(0x01000,16), 1329 } ··· 1333 .mfr_id = MANUFACTURER_SST, 1334 .dev_id = SST39LF010, 1335 .name = "SST 39LF010", 1336 + .devtypes = CFI_DEVICETYPE_X8, 1337 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1338 + .dev_size = SIZE_128KiB, 1339 + .cmd_set = P_ID_AMD_STD, 1340 + .nr_regions = 1, 1341 .regions = { 1342 ERASEINFO(0x01000,32), 1343 } ··· 1346 .mfr_id = MANUFACTURER_SST, 1347 .dev_id = SST29EE020, 1348 .name = "SST 29EE020", 1349 + .devtypes = CFI_DEVICETYPE_X8, 1350 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1351 + .dev_size = SIZE_256KiB, 1352 + .cmd_set = P_ID_SST_PAGE, 1353 + .nr_regions = 1, 1354 + .regions = {ERASEINFO(0x01000,64), 1355 + } 1356 + }, { 1357 .mfr_id = MANUFACTURER_SST, 1358 .dev_id = SST29LE020, 1359 .name = "SST 29LE020", 1360 + .devtypes = CFI_DEVICETYPE_X8, 1361 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1362 + .dev_size = SIZE_256KiB, 1363 + .cmd_set = P_ID_SST_PAGE, 1364 + .nr_regions = 1, 1365 + .regions = {ERASEINFO(0x01000,64), 1366 + } 1367 }, { 1368 .mfr_id = MANUFACTURER_SST, 1369 .dev_id = SST39LF020, 1370 .name = "SST 39LF020", 1371 + .devtypes = CFI_DEVICETYPE_X8, 1372 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1373 + .dev_size = SIZE_256KiB, 1374 + .cmd_set = P_ID_AMD_STD, 1375 + .nr_regions = 1, 1376 .regions = { 1377 ERASEINFO(0x01000,64), 1378 } ··· 1383 .mfr_id = MANUFACTURER_SST, 1384 .dev_id = SST39LF040, 1385 .name = "SST 39LF040", 1386 + .devtypes = CFI_DEVICETYPE_X8, 1387 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1388 + .dev_size = SIZE_512KiB, 1389 + .cmd_set = P_ID_AMD_STD, 1390 + .nr_regions = 1, 1391 .regions = { 1392 ERASEINFO(0x01000,128), 1393 } ··· 1396 .mfr_id = MANUFACTURER_SST, 1397 .dev_id = SST39SF010A, 1398 .name = "SST 39SF010A", 1399 + .devtypes = CFI_DEVICETYPE_X8, 1400 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1401 + .dev_size = SIZE_128KiB, 1402 + .cmd_set = P_ID_AMD_STD, 1403 + .nr_regions = 1, 1404 .regions = { 1405 ERASEINFO(0x01000,32), 1406 } ··· 1409 .mfr_id = MANUFACTURER_SST, 1410 .dev_id = SST39SF020A, 1411 .name = "SST 39SF020A", 1412 + .devtypes = CFI_DEVICETYPE_X8, 1413 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1414 + .dev_size = SIZE_256KiB, 1415 + .cmd_set = P_ID_AMD_STD, 1416 + .nr_regions = 1, 1417 .regions = { 1418 ERASEINFO(0x01000,64), 1419 } 1420 }, { 1421 .mfr_id = MANUFACTURER_SST, 1422 + .dev_id = SST49LF040B, 1423 + .name = "SST 49LF040B", 1424 + .devtypes = CFI_DEVICETYPE_X8, 1425 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1426 + .dev_size = SIZE_512KiB, 1427 + .cmd_set = P_ID_AMD_STD, 1428 + .nr_regions = 1, 1429 + .regions = { 1430 ERASEINFO(0x01000,128), 1431 } 1432 }, { ··· 1436 .mfr_id = MANUFACTURER_SST, 1437 .dev_id = SST49LF004B, 1438 .name = "SST 49LF004B", 1439 + .devtypes = CFI_DEVICETYPE_X8, 1440 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1441 + .dev_size = SIZE_512KiB, 1442 + .cmd_set = P_ID_AMD_STD, 1443 + .nr_regions = 1, 1444 .regions = { 1445 ERASEINFO(0x01000,128), 1446 } ··· 1449 .mfr_id = MANUFACTURER_SST, 1450 .dev_id = SST49LF008A, 1451 .name = "SST 49LF008A", 1452 + .devtypes = CFI_DEVICETYPE_X8, 1453 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1454 + .dev_size = SIZE_1MiB, 1455 + .cmd_set = P_ID_AMD_STD, 1456 + .nr_regions = 1, 1457 .regions = { 1458 ERASEINFO(0x01000,256), 1459 } ··· 1462 .mfr_id = MANUFACTURER_SST, 1463 .dev_id = SST49LF030A, 1464 .name = "SST 49LF030A", 1465 + .devtypes = CFI_DEVICETYPE_X8, 1466 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1467 + .dev_size = SIZE_512KiB, 1468 + .cmd_set = P_ID_AMD_STD, 1469 + .nr_regions = 1, 1470 .regions = { 1471 ERASEINFO(0x01000,96), 1472 } ··· 1475 .mfr_id = MANUFACTURER_SST, 1476 .dev_id = SST49LF040A, 1477 .name = "SST 49LF040A", 1478 + .devtypes = CFI_DEVICETYPE_X8, 1479 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1480 + .dev_size = SIZE_512KiB, 1481 + .cmd_set = P_ID_AMD_STD, 1482 + .nr_regions = 1, 1483 .regions = { 1484 ERASEINFO(0x01000,128), 1485 } ··· 1488 .mfr_id = MANUFACTURER_SST, 1489 .dev_id = SST49LF080A, 1490 .name = "SST 49LF080A", 1491 + .devtypes = CFI_DEVICETYPE_X8, 1492 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1493 + .dev_size = SIZE_1MiB, 1494 + .cmd_set = P_ID_AMD_STD, 1495 + .nr_regions = 1, 1496 .regions = { 1497 ERASEINFO(0x01000,256), 1498 } 1499 }, { 1500 + .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1501 + .dev_id = SST39LF160, 1502 + .name = "SST 39LF160", 1503 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1504 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1505 + .dev_size = SIZE_2MiB, 1506 + .cmd_set = P_ID_AMD_STD, 1507 + .nr_regions = 2, 1508 + .regions = { 1509 + ERASEINFO(0x1000,256), 1510 + ERASEINFO(0x1000,256) 1511 + } 1512 }, { 1513 + .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1514 + .dev_id = SST39VF1601, 1515 + .name = "SST 39VF1601", 1516 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1517 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1518 + .dev_size = SIZE_2MiB, 1519 + .cmd_set = P_ID_AMD_STD, 1520 + .nr_regions = 2, 1521 + .regions = { 1522 + ERASEINFO(0x1000,256), 1523 + ERASEINFO(0x1000,256) 1524 + } 1525 }, { 1526 .mfr_id = MANUFACTURER_ST, 1527 .dev_id = M29F800AB, 1528 .name = "ST M29F800AB", 1529 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1530 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1531 + .dev_size = SIZE_1MiB, 1532 + .cmd_set = P_ID_AMD_STD, 1533 + .nr_regions = 4, 1534 .regions = { 1535 ERASEINFO(0x04000,1), 1536 ERASEINFO(0x02000,2), ··· 1549 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1550 .dev_id = M29W800DT, 1551 .name = "ST M29W800DT", 1552 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1553 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1554 + .dev_size = SIZE_1MiB, 1555 + .cmd_set = P_ID_AMD_STD, 1556 + .nr_regions = 4, 1557 .regions = { 1558 ERASEINFO(0x10000,15), 1559 ERASEINFO(0x08000,1), ··· 1566 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1567 .dev_id = M29W800DB, 1568 .name = "ST M29W800DB", 1569 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1570 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1571 + .dev_size = SIZE_1MiB, 1572 + .cmd_set = P_ID_AMD_STD, 1573 + .nr_regions = 4, 1574 .regions = { 1575 ERASEINFO(0x04000,1), 1576 ERASEINFO(0x02000,2), ··· 1583 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1584 .dev_id = M29W160DT, 1585 .name = "ST M29W160DT", 1586 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1587 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1588 + .dev_size = SIZE_2MiB, 1589 + .cmd_set = P_ID_AMD_STD, 1590 + .nr_regions = 4, 1591 .regions = { 1592 ERASEINFO(0x10000,31), 1593 ERASEINFO(0x08000,1), ··· 1600 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1601 .dev_id = M29W160DB, 1602 .name = "ST M29W160DB", 1603 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1604 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1605 + .dev_size = SIZE_2MiB, 1606 + .cmd_set = P_ID_AMD_STD, 1607 + .nr_regions = 4, 1608 .regions = { 1609 ERASEINFO(0x04000,1), 1610 ERASEINFO(0x02000,2), ··· 1617 .mfr_id = MANUFACTURER_ST, 1618 .dev_id = M29W040B, 1619 .name = "ST M29W040B", 1620 + .devtypes = CFI_DEVICETYPE_X8, 1621 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1622 + .dev_size = SIZE_512KiB, 1623 + .cmd_set = P_ID_AMD_STD, 1624 + .nr_regions = 1, 1625 .regions = { 1626 ERASEINFO(0x10000,8), 1627 } ··· 1630 .mfr_id = MANUFACTURER_ST, 1631 .dev_id = M50FW040, 1632 .name = "ST M50FW040", 1633 + .devtypes = CFI_DEVICETYPE_X8, 1634 + .uaddr = MTD_UADDR_UNNECESSARY, 1635 + .dev_size = SIZE_512KiB, 1636 + .cmd_set = P_ID_INTEL_EXT, 1637 + .nr_regions = 1, 1638 .regions = { 1639 ERASEINFO(0x10000,8), 1640 } ··· 1643 .mfr_id = MANUFACTURER_ST, 1644 .dev_id = M50FW080, 1645 .name = "ST M50FW080", 1646 + .devtypes = CFI_DEVICETYPE_X8, 1647 + .uaddr = MTD_UADDR_UNNECESSARY, 1648 + .dev_size = SIZE_1MiB, 1649 + .cmd_set = P_ID_INTEL_EXT, 1650 + .nr_regions = 1, 1651 .regions = { 1652 ERASEINFO(0x10000,16), 1653 } ··· 1656 .mfr_id = MANUFACTURER_ST, 1657 .dev_id = M50FW016, 1658 .name = "ST M50FW016", 1659 + .devtypes = CFI_DEVICETYPE_X8, 1660 + .uaddr = MTD_UADDR_UNNECESSARY, 1661 + .dev_size = SIZE_2MiB, 1662 + .cmd_set = P_ID_INTEL_EXT, 1663 + .nr_regions = 1, 1664 .regions = { 1665 ERASEINFO(0x10000,32), 1666 } ··· 1669 .mfr_id = MANUFACTURER_ST, 1670 .dev_id = M50LPW080, 1671 .name = "ST M50LPW080", 1672 + .devtypes = CFI_DEVICETYPE_X8, 1673 + .uaddr = MTD_UADDR_UNNECESSARY, 1674 + .dev_size = SIZE_1MiB, 1675 + .cmd_set = P_ID_INTEL_EXT, 1676 + .nr_regions = 1, 1677 .regions = { 1678 ERASEINFO(0x10000,16), 1679 } ··· 1682 .mfr_id = MANUFACTURER_TOSHIBA, 1683 .dev_id = TC58FVT160, 1684 .name = "Toshiba TC58FVT160", 1685 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1686 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1687 + .dev_size = SIZE_2MiB, 1688 + .cmd_set = P_ID_AMD_STD, 1689 + .nr_regions = 4, 1690 .regions = { 1691 ERASEINFO(0x10000,31), 1692 ERASEINFO(0x08000,1), ··· 1699 .mfr_id = MANUFACTURER_TOSHIBA, 1700 .dev_id = TC58FVB160, 1701 .name = "Toshiba TC58FVB160", 1702 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1703 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1704 + .dev_size = SIZE_2MiB, 1705 + .cmd_set = P_ID_AMD_STD, 1706 + .nr_regions = 4, 1707 .regions = { 1708 ERASEINFO(0x04000,1), 1709 ERASEINFO(0x02000,2), ··· 1716 .mfr_id = MANUFACTURER_TOSHIBA, 1717 .dev_id = TC58FVB321, 1718 .name = "Toshiba TC58FVB321", 1719 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1720 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1721 + .dev_size = SIZE_4MiB, 1722 + .cmd_set = P_ID_AMD_STD, 1723 + .nr_regions = 2, 1724 .regions = { 1725 ERASEINFO(0x02000,8), 1726 ERASEINFO(0x10000,63) ··· 1731 .mfr_id = MANUFACTURER_TOSHIBA, 1732 .dev_id = TC58FVT321, 1733 .name = "Toshiba TC58FVT321", 1734 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1735 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1736 + .dev_size = SIZE_4MiB, 1737 + .cmd_set = P_ID_AMD_STD, 1738 + .nr_regions = 2, 1739 .regions = { 1740 ERASEINFO(0x10000,63), 1741 ERASEINFO(0x02000,8) ··· 1746 .mfr_id = MANUFACTURER_TOSHIBA, 1747 .dev_id = TC58FVB641, 1748 .name = "Toshiba TC58FVB641", 1749 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1750 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1751 + .dev_size = SIZE_8MiB, 1752 + .cmd_set = P_ID_AMD_STD, 1753 + .nr_regions = 2, 1754 .regions = { 1755 ERASEINFO(0x02000,8), 1756 ERASEINFO(0x10000,127) ··· 1761 .mfr_id = MANUFACTURER_TOSHIBA, 1762 .dev_id = TC58FVT641, 1763 .name = "Toshiba TC58FVT641", 1764 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1765 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1766 + .dev_size = SIZE_8MiB, 1767 + .cmd_set = P_ID_AMD_STD, 1768 + .nr_regions = 2, 1769 .regions = { 1770 ERASEINFO(0x10000,127), 1771 ERASEINFO(0x02000,8) ··· 1776 .mfr_id = MANUFACTURER_WINBOND, 1777 .dev_id = W49V002A, 1778 .name = "Winbond W49V002A", 1779 + .devtypes = CFI_DEVICETYPE_X8, 1780 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1781 + .dev_size = SIZE_256KiB, 1782 + .cmd_set = P_ID_AMD_STD, 1783 + .nr_regions = 4, 1784 .regions = { 1785 ERASEINFO(0x10000, 3), 1786 ERASEINFO(0x08000, 1), ··· 1791 } 1792 }; 1793 1794 + static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, 1795 struct cfi_private *cfi) 1796 { 1797 map_word result; ··· 1810 return result.x[0] & mask; 1811 } 1812 1813 + static inline u32 jedec_read_id(struct map_info *map, uint32_t base, 1814 struct cfi_private *cfi) 1815 { 1816 map_word result; ··· 1821 return result.x[0] & mask; 1822 } 1823 1824 + static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) 1825 { 1826 /* Reset */ 1827 ··· 1832 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1833 * as they will ignore the writes and dont care what address 1834 * the F0 is written to */ 1835 + if (cfi->addr_unlock1) { 1836 DEBUG( MTD_DEBUG_LEVEL3, 1837 "reset unlock called %x %x \n", 1838 cfi->addr_unlock1,cfi->addr_unlock2); ··· 1841 } 1842 1843 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1844 + /* Some misdesigned Intel chips do not respond for 0xF0 for a reset, 1845 * so ensure we're in read mode. Send both the Intel and the AMD command 1846 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1847 * this should be safe. ··· 1851 } 1852 1853 1854 static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1855 { 1856 int i,num_erase_regions; 1857 + uint8_t uaddr; 1858 1859 + if (! (jedec_table[index].devtypes & p_cfi->device_type)) { 1860 + DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1861 + jedec_table[index].name, 4 * (1<<p_cfi->device_type)); 1862 + return 0; 1863 + } 1864 1865 + printk(KERN_INFO "Found: %s\n",jedec_table[index].name); 1866 + 1867 + num_erase_regions = jedec_table[index].nr_regions; 1868 1869 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1870 if (!p_cfi->cfiq) { ··· 1896 1897 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1898 1899 + p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; 1900 + p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; 1901 + p_cfi->cfiq->DevSize = jedec_table[index].dev_size; 1902 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1903 1904 for (i=0; i<num_erase_regions; i++){ ··· 1910 p_cfi->mfr = jedec_table[index].mfr_id; 1911 p_cfi->id = jedec_table[index].dev_id; 1912 1913 + uaddr = jedec_table[index].uaddr; 1914 1915 + /* The table has unlock addresses in _bytes_, and we try not to let 1916 + our brains explode when we see the datasheets talking about address 1917 + lines numbered from A-1 to A18. The CFI table has unlock addresses 1918 + in device-words according to the mode the device is connected in */ 1919 + p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; 1920 + p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; 1921 1922 return 1; /* ok */ 1923 } ··· 1930 * be perfect - consequently there should be some module parameters that 1931 * could be manually specified to force the chip info. 1932 */ 1933 + static inline int jedec_match( uint32_t base, 1934 struct map_info *map, 1935 struct cfi_private *cfi, 1936 const struct amd_flash_info *finfo ) 1937 { 1938 int rc = 0; /* failure until all tests pass */ 1939 u32 mfr, id; 1940 + uint8_t uaddr; 1941 1942 /* 1943 * The IDs must match. For X16 and X32 devices operating in ··· 1950 */ 1951 switch (cfi->device_type) { 1952 case CFI_DEVICETYPE_X8: 1953 + mfr = (uint8_t)finfo->mfr_id; 1954 + id = (uint8_t)finfo->dev_id; 1955 1956 /* bjd: it seems that if we do this, we can end up 1957 * detecting 16bit flashes as an 8bit device, even though ··· 1964 } 1965 break; 1966 case CFI_DEVICETYPE_X16: 1967 + mfr = (uint16_t)finfo->mfr_id; 1968 + id = (uint16_t)finfo->dev_id; 1969 break; 1970 case CFI_DEVICETYPE_X32: 1971 + mfr = (uint16_t)finfo->mfr_id; 1972 + id = (uint32_t)finfo->dev_id; 1973 break; 1974 default: 1975 printk(KERN_WARNING ··· 1984 /* the part size must fit in the memory window */ 1985 DEBUG( MTD_DEBUG_LEVEL3, 1986 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 1987 + __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); 1988 + if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { 1989 DEBUG( MTD_DEBUG_LEVEL3, 1990 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 1991 __func__, finfo->mfr_id, finfo->dev_id, 1992 + 1 << finfo->dev_size ); 1993 goto match_done; 1994 } 1995 1996 + if (! (finfo->devtypes & cfi->device_type)) 1997 goto match_done; 1998 + 1999 + uaddr = finfo->uaddr; 2000 2001 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 2002 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 2003 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 2004 + && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || 2005 + unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { 2006 DEBUG( MTD_DEBUG_LEVEL3, 2007 "MTD %s(): 0x%.4x 0x%.4x did not match\n", 2008 __func__, ··· 2042 * were truly frobbing a real device. 2043 */ 2044 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 2045 + if (cfi->addr_unlock1) { 2046 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 2047 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 2048 } ··· 2068 if (MTD_UADDR_UNNECESSARY == uaddr_idx) 2069 return 0; 2070 2071 + cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type; 2072 + cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type; 2073 } 2074 2075 /* Make certain we aren't probing past the end of map */ ··· 2081 2082 } 2083 /* Ensure the unlock addresses we try stay inside the map */ 2084 + probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type); 2085 + probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type); 2086 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 2087 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 2088 goto retry; 2089 2090 /* Reset */ 2091 jedec_reset(base, map, cfi); ··· 2128 } 2129 goto retry; 2130 } else { 2131 + uint16_t mfr; 2132 + uint16_t id; 2133 2134 /* Make sure it is a chip of the same manufacturer and id */ 2135 mfr = jedec_read_mfr(map, base, cfi);
+8 -1
drivers/mtd/cmdlinepart.c
··· 9 * 10 * mtdparts=<mtddef>[;<mtddef] 11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 12 - * <partdef> := <size>[@offset][<name>][ro] 13 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 14 * <size> := standard linux memsize OR "-" to denote all remaining space 15 * <name> := '(' NAME ')' ··· 140 if (strncmp(s, "ro", 2) == 0) 141 { 142 mask_flags |= MTD_WRITEABLE; 143 s += 2; 144 } 145
··· 9 * 10 * mtdparts=<mtddef>[;<mtddef] 11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 12 + * <partdef> := <size>[@offset][<name>][ro][lk] 13 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 14 * <size> := standard linux memsize OR "-" to denote all remaining space 15 * <name> := '(' NAME ')' ··· 140 if (strncmp(s, "ro", 2) == 0) 141 { 142 mask_flags |= MTD_WRITEABLE; 143 + s += 2; 144 + } 145 + 146 + /* if lk is found do NOT unlock the MTD partition*/ 147 + if (strncmp(s, "lk", 2) == 0) 148 + { 149 + mask_flags |= MTD_POWERUP_LOCK; 150 s += 2; 151 } 152
+2 -2
drivers/mtd/devices/doc2000.c
··· 632 len = ((from | 0x1ff) + 1) - from; 633 634 /* The ECC will not be calculated correctly if less than 512 is read */ 635 - if (len != 0x200 && eccbuf) 636 printk(KERN_WARNING 637 "ECC needs a full sector read (adr: %lx size %lx)\n", 638 (long) from, (long) len); ··· 896 /* Let the caller know we completed it */ 897 *retlen += len; 898 899 - if (eccbuf) { 900 unsigned char x[8]; 901 size_t dummy; 902 int ret;
··· 632 len = ((from | 0x1ff) + 1) - from; 633 634 /* The ECC will not be calculated correctly if less than 512 is read */ 635 + if (len != 0x200) 636 printk(KERN_WARNING 637 "ECC needs a full sector read (adr: %lx size %lx)\n", 638 (long) from, (long) len); ··· 896 /* Let the caller know we completed it */ 897 *retlen += len; 898 899 + { 900 unsigned char x[8]; 901 size_t dummy; 902 int ret;
+1 -1
drivers/mtd/devices/doc2001plus.c
··· 748 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); 749 750 /* On interleaved devices the flags for 2nd half 512 are before data */ 751 - if (eccbuf && before) 752 fto -= 2; 753 754 /* issue the Serial Data In command to initial the Page Program process */
··· 748 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); 749 750 /* On interleaved devices the flags for 2nd half 512 are before data */ 751 + if (before) 752 fto -= 2; 753 754 /* issue the Serial Data In command to initial the Page Program process */
+1 -1
drivers/mtd/devices/lart.c
··· 323 /* put the flash back into command mode */ 324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); 325 326 - return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM)); 327 } 328 329 /*
··· 323 /* put the flash back into command mode */ 324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); 325 326 + return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM)); 327 } 328 329 /*
+1 -1
drivers/mtd/devices/mtd_dataflash.c
··· 420 status = dataflash_waitready(priv->spi); 421 422 /* Check result of the compare operation */ 423 - if ((status & (1 << 6)) == 1) { 424 printk(KERN_ERR "%s: compare page %u, err %d\n", 425 spi->dev.bus_id, pageaddr, status); 426 remaining = 0;
··· 420 status = dataflash_waitready(priv->spi); 421 422 /* Check result of the compare operation */ 423 + if (status & (1 << 6)) { 424 printk(KERN_ERR "%s: compare page %u, err %d\n", 425 spi->dev.bus_id, pageaddr, status); 426 remaining = 0;
+1 -8
drivers/mtd/maps/Kconfig
··· 110 Sun Microsystems boardsets. This driver will require CFI support 111 in the kernel, so if you did not enable CFI previously, do that now. 112 113 - config MTD_PNC2000 114 - tristate "CFI Flash device mapped on Photron PNC-2000" 115 - depends on X86 && MTD_CFI && MTD_PARTITIONS 116 - help 117 - PNC-2000 is the name of Network Camera product from PHOTRON 118 - Ltd. in Japan. It uses CFI-compliant flash. 119 - 120 config MTD_SC520CDP 121 tristate "CFI Flash device mapped on AMD SC520 CDP" 122 depends on X86 && MTD_CFI && MTD_CONCAT ··· 569 default "4" 570 571 config MTD_SHARP_SL 572 - bool "ROM mapped on Sharp SL Series" 573 depends on ARCH_PXA 574 help 575 This enables access to the flash chip on the Sharp SL Series of PDAs.
··· 110 Sun Microsystems boardsets. This driver will require CFI support 111 in the kernel, so if you did not enable CFI previously, do that now. 112 113 config MTD_SC520CDP 114 tristate "CFI Flash device mapped on AMD SC520 CDP" 115 depends on X86 && MTD_CFI && MTD_CONCAT ··· 576 default "4" 577 578 config MTD_SHARP_SL 579 + tristate "ROM mapped on Sharp SL Series" 580 depends on ARCH_PXA 581 help 582 This enables access to the flash chip on the Sharp SL Series of PDAs.
-1
drivers/mtd/maps/Makefile
··· 28 obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 29 obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 30 obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o 31 - obj-$(CONFIG_MTD_PNC2000) += pnc2000.o 32 obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 33 obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 34 obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
··· 28 obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 29 obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 30 obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o 31 obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 32 obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 33 obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
+112 -64
drivers/mtd/maps/physmap.c
··· 20 #include <linux/mtd/map.h> 21 #include <linux/mtd/partitions.h> 22 #include <linux/mtd/physmap.h> 23 #include <asm/io.h> 24 25 struct physmap_flash_info { 26 - struct mtd_info *mtd; 27 - struct map_info map; 28 struct resource *res; 29 #ifdef CONFIG_MTD_PARTITIONS 30 int nr_parts; ··· 36 #endif 37 }; 38 39 - 40 static int physmap_flash_remove(struct platform_device *dev) 41 { 42 struct physmap_flash_info *info; 43 struct physmap_flash_data *physmap_data; 44 45 info = platform_get_drvdata(dev); 46 if (info == NULL) ··· 49 50 physmap_data = dev->dev.platform_data; 51 52 - if (info->mtd != NULL) { 53 - #ifdef CONFIG_MTD_PARTITIONS 54 - if (info->nr_parts) { 55 - del_mtd_partitions(info->mtd); 56 - kfree(info->parts); 57 - } else if (physmap_data->nr_parts) { 58 - del_mtd_partitions(info->mtd); 59 - } else { 60 - del_mtd_device(info->mtd); 61 - } 62 - #else 63 - del_mtd_device(info->mtd); 64 - #endif 65 - map_destroy(info->mtd); 66 } 67 68 - if (info->map.virt != NULL) 69 - iounmap(info->map.virt); 70 71 if (info->res != NULL) { 72 release_resource(info->res); ··· 95 struct physmap_flash_data *physmap_data; 96 struct physmap_flash_info *info; 97 const char **probe_type; 98 - int err; 99 100 physmap_data = dev->dev.platform_data; 101 if (physmap_data == NULL) 102 return -ENODEV; 103 - 104 - printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", 105 - (unsigned long long)(dev->resource->end - dev->resource->start + 1), 106 - (unsigned long long)dev->resource->start); 107 108 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 109 if (info == NULL) { ··· 111 112 platform_set_drvdata(dev, info); 113 114 - info->res = request_mem_region(dev->resource->start, 115 - dev->resource->end - dev->resource->start + 1, 116 - dev->dev.bus_id); 117 - if (info->res == NULL) { 118 - dev_err(&dev->dev, "Could not reserve memory region\n"); 119 - err = -ENOMEM; 120 - goto err_out; 121 } 122 123 - info->map.name = dev->dev.bus_id; 124 - info->map.phys = dev->resource->start; 125 - info->map.size = dev->resource->end - dev->resource->start + 1; 126 - info->map.bankwidth = physmap_data->width; 127 - info->map.set_vpp = physmap_data->set_vpp; 128 - 129 - info->map.virt = ioremap(info->map.phys, info->map.size); 130 - if (info->map.virt == NULL) { 131 - dev_err(&dev->dev, "Failed to ioremap flash region\n"); 132 - err = EIO; 133 - goto err_out; 134 - } 135 - 136 - simple_map_init(&info->map); 137 - 138 - probe_type = rom_probe_types; 139 - for (; info->mtd == NULL && *probe_type != NULL; probe_type++) 140 - info->mtd = do_map_probe(*probe_type, &info->map); 141 - if (info->mtd == NULL) { 142 - dev_err(&dev->dev, "map_probe failed\n"); 143 err = -ENXIO; 144 - goto err_out; 145 } 146 - info->mtd->owner = THIS_MODULE; 147 148 #ifdef CONFIG_MTD_PARTITIONS 149 - err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0); 150 if (err > 0) { 151 - add_mtd_partitions(info->mtd, info->parts, err); 152 return 0; 153 } 154 155 if (physmap_data->nr_parts) { 156 printk(KERN_NOTICE "Using physmap partition information\n"); 157 - add_mtd_partitions(info->mtd, physmap_data->parts, 158 - physmap_data->nr_parts); 159 return 0; 160 } 161 #endif 162 163 - add_mtd_device(info->mtd); 164 return 0; 165 166 err_out: ··· 200 { 201 struct physmap_flash_info *info = platform_get_drvdata(dev); 202 int ret = 0; 203 204 if (info) 205 - ret = info->mtd->suspend(info->mtd); 206 207 return ret; 208 } ··· 212 static int physmap_flash_resume(struct platform_device *dev) 213 { 214 struct physmap_flash_info *info = platform_get_drvdata(dev); 215 if (info) 216 - info->mtd->resume(info->mtd); 217 return 0; 218 } 219 220 static void physmap_flash_shutdown(struct platform_device *dev) 221 { 222 struct physmap_flash_info *info = platform_get_drvdata(dev); 223 - if (info && info->mtd->suspend(info->mtd) == 0) 224 - info->mtd->resume(info->mtd); 225 } 226 #endif 227 228 static struct platform_driver physmap_flash_driver = { 229 .probe = physmap_flash_probe, 230 .remove = physmap_flash_remove, 231 - #ifdef CONFIG_PM 232 .suspend = physmap_flash_suspend, 233 .resume = physmap_flash_resume, 234 .shutdown = physmap_flash_shutdown, 235 - #endif 236 .driver = { 237 .name = "physmap-flash", 238 },
··· 20 #include <linux/mtd/map.h> 21 #include <linux/mtd/partitions.h> 22 #include <linux/mtd/physmap.h> 23 + #include <linux/mtd/concat.h> 24 #include <asm/io.h> 25 26 + #define MAX_RESOURCES 4 27 + 28 struct physmap_flash_info { 29 + struct mtd_info *mtd[MAX_RESOURCES]; 30 + struct mtd_info *cmtd; 31 + struct map_info map[MAX_RESOURCES]; 32 struct resource *res; 33 #ifdef CONFIG_MTD_PARTITIONS 34 int nr_parts; ··· 32 #endif 33 }; 34 35 static int physmap_flash_remove(struct platform_device *dev) 36 { 37 struct physmap_flash_info *info; 38 struct physmap_flash_data *physmap_data; 39 + int i; 40 41 info = platform_get_drvdata(dev); 42 if (info == NULL) ··· 45 46 physmap_data = dev->dev.platform_data; 47 48 + #ifdef CONFIG_MTD_CONCAT 49 + if (info->cmtd != info->mtd[0]) { 50 + del_mtd_device(info->cmtd); 51 + mtd_concat_destroy(info->cmtd); 52 } 53 + #endif 54 55 + for (i = 0; i < MAX_RESOURCES; i++) { 56 + if (info->mtd[i] != NULL) { 57 + #ifdef CONFIG_MTD_PARTITIONS 58 + if (info->nr_parts) { 59 + del_mtd_partitions(info->mtd[i]); 60 + kfree(info->parts); 61 + } else if (physmap_data->nr_parts) { 62 + del_mtd_partitions(info->mtd[i]); 63 + } else { 64 + del_mtd_device(info->mtd[i]); 65 + } 66 + #else 67 + del_mtd_device(info->mtd[i]); 68 + #endif 69 + map_destroy(info->mtd[i]); 70 + } 71 + 72 + if (info->map[i].virt != NULL) 73 + iounmap(info->map[i].virt); 74 + } 75 76 if (info->res != NULL) { 77 release_resource(info->res); ··· 82 struct physmap_flash_data *physmap_data; 83 struct physmap_flash_info *info; 84 const char **probe_type; 85 + int err = 0; 86 + int i; 87 + int devices_found = 0; 88 89 physmap_data = dev->dev.platform_data; 90 if (physmap_data == NULL) 91 return -ENODEV; 92 93 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 94 if (info == NULL) { ··· 100 101 platform_set_drvdata(dev, info); 102 103 + for (i = 0; i < dev->num_resources; i++) { 104 + printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", 105 + (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), 106 + (unsigned long long)dev->resource[i].start); 107 + 108 + info->res = request_mem_region(dev->resource[i].start, 109 + dev->resource[i].end - dev->resource[i].start + 1, 110 + dev->dev.bus_id); 111 + if (info->res == NULL) { 112 + dev_err(&dev->dev, "Could not reserve memory region\n"); 113 + err = -ENOMEM; 114 + goto err_out; 115 + } 116 + 117 + info->map[i].name = dev->dev.bus_id; 118 + info->map[i].phys = dev->resource[i].start; 119 + info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; 120 + info->map[i].bankwidth = physmap_data->width; 121 + info->map[i].set_vpp = physmap_data->set_vpp; 122 + 123 + info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); 124 + if (info->map[i].virt == NULL) { 125 + dev_err(&dev->dev, "Failed to ioremap flash region\n"); 126 + err = EIO; 127 + goto err_out; 128 + } 129 + 130 + simple_map_init(&info->map[i]); 131 + 132 + probe_type = rom_probe_types; 133 + for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) 134 + info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); 135 + if (info->mtd[i] == NULL) { 136 + dev_err(&dev->dev, "map_probe failed\n"); 137 + err = -ENXIO; 138 + goto err_out; 139 + } else { 140 + devices_found++; 141 + } 142 + info->mtd[i]->owner = THIS_MODULE; 143 } 144 145 + if (devices_found == 1) { 146 + info->cmtd = info->mtd[0]; 147 + } else if (devices_found > 1) { 148 + /* 149 + * We detected multiple devices. Concatenate them together. 150 + */ 151 + #ifdef CONFIG_MTD_CONCAT 152 + info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id); 153 + if (info->cmtd == NULL) 154 + err = -ENXIO; 155 + #else 156 + printk(KERN_ERR "physmap-flash: multiple devices " 157 + "found but MTD concat support disabled.\n"); 158 err = -ENXIO; 159 + #endif 160 } 161 + if (err) 162 + goto err_out; 163 164 #ifdef CONFIG_MTD_PARTITIONS 165 + err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0); 166 if (err > 0) { 167 + add_mtd_partitions(info->cmtd, info->parts, err); 168 return 0; 169 } 170 171 if (physmap_data->nr_parts) { 172 printk(KERN_NOTICE "Using physmap partition information\n"); 173 + add_mtd_partitions(info->cmtd, physmap_data->parts, 174 + physmap_data->nr_parts); 175 return 0; 176 } 177 #endif 178 179 + add_mtd_device(info->cmtd); 180 return 0; 181 182 err_out: ··· 162 { 163 struct physmap_flash_info *info = platform_get_drvdata(dev); 164 int ret = 0; 165 + int i; 166 167 if (info) 168 + for (i = 0; i < MAX_RESOURCES; i++) 169 + ret |= info->mtd[i]->suspend(info->mtd[i]); 170 171 return ret; 172 } ··· 172 static int physmap_flash_resume(struct platform_device *dev) 173 { 174 struct physmap_flash_info *info = platform_get_drvdata(dev); 175 + int i; 176 + 177 if (info) 178 + for (i = 0; i < MAX_RESOURCES; i++) 179 + info->mtd[i]->resume(info->mtd[i]); 180 return 0; 181 } 182 183 static void physmap_flash_shutdown(struct platform_device *dev) 184 { 185 struct physmap_flash_info *info = platform_get_drvdata(dev); 186 + int i; 187 + 188 + for (i = 0; i < MAX_RESOURCES; i++) 189 + if (info && info->mtd[i]->suspend(info->mtd[i]) == 0) 190 + info->mtd[i]->resume(info->mtd[i]); 191 } 192 + #else 193 + #define physmap_flash_suspend NULL 194 + #define physmap_flash_resume NULL 195 + #define physmap_flash_shutdown NULL 196 #endif 197 198 static struct platform_driver physmap_flash_driver = { 199 .probe = physmap_flash_probe, 200 .remove = physmap_flash_remove, 201 .suspend = physmap_flash_suspend, 202 .resume = physmap_flash_resume, 203 .shutdown = physmap_flash_shutdown, 204 .driver = { 205 .name = "physmap-flash", 206 },
+27 -61
drivers/mtd/maps/physmap_of.c
··· 80 81 return nr_parts; 82 } 83 - 84 - static int __devinit parse_partitions(struct of_flash *info, 85 - struct of_device *dev) 86 - { 87 - const char *partname; 88 - static const char *part_probe_types[] 89 - = { "cmdlinepart", "RedBoot", NULL }; 90 - struct device_node *dp = dev->node, *pp; 91 - int nr_parts, i; 92 - 93 - /* First look for RedBoot table or partitions on the command 94 - * line, these take precedence over device tree information */ 95 - nr_parts = parse_mtd_partitions(info->mtd, part_probe_types, 96 - &info->parts, 0); 97 - if (nr_parts > 0) { 98 - add_mtd_partitions(info->mtd, info->parts, nr_parts); 99 - return 0; 100 - } 101 - 102 - /* First count the subnodes */ 103 - nr_parts = 0; 104 - for (pp = dp->child; pp; pp = pp->sibling) 105 - nr_parts++; 106 - 107 - if (nr_parts == 0) 108 - return parse_obsolete_partitions(dev, info, dp); 109 - 110 - info->parts = kzalloc(nr_parts * sizeof(*info->parts), 111 - GFP_KERNEL); 112 - if (!info->parts) 113 - return -ENOMEM; 114 - 115 - for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) { 116 - const u32 *reg; 117 - int len; 118 - 119 - reg = of_get_property(pp, "reg", &len); 120 - if (!reg || (len != 2*sizeof(u32))) { 121 - dev_err(&dev->dev, "Invalid 'reg' on %s\n", 122 - dp->full_name); 123 - kfree(info->parts); 124 - info->parts = NULL; 125 - return -EINVAL; 126 - } 127 - info->parts[i].offset = reg[0]; 128 - info->parts[i].size = reg[1]; 129 - 130 - partname = of_get_property(pp, "label", &len); 131 - if (!partname) 132 - partname = of_get_property(pp, "name", &len); 133 - info->parts[i].name = (char *)partname; 134 - 135 - if (of_get_property(pp, "read-only", &len)) 136 - info->parts[i].mask_flags = MTD_WRITEABLE; 137 - } 138 - 139 - return nr_parts; 140 - } 141 #else /* MTD_PARTITIONS */ 142 #define OF_FLASH_PARTS(info) (0) 143 #define parse_partitions(info, dev) (0) ··· 154 static int __devinit of_flash_probe(struct of_device *dev, 155 const struct of_device_id *match) 156 { 157 struct device_node *dp = dev->node; 158 struct resource res; 159 struct of_flash *info; ··· 220 } 221 info->mtd->owner = THIS_MODULE; 222 223 - err = parse_partitions(info, dev); 224 if (err < 0) 225 - goto err_out; 226 227 if (err > 0) 228 - add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err); 229 else 230 add_mtd_device(info->mtd); 231 232 return 0;
··· 80 81 return nr_parts; 82 } 83 #else /* MTD_PARTITIONS */ 84 #define OF_FLASH_PARTS(info) (0) 85 #define parse_partitions(info, dev) (0) ··· 212 static int __devinit of_flash_probe(struct of_device *dev, 213 const struct of_device_id *match) 214 { 215 + #ifdef CONFIG_MTD_PARTITIONS 216 + static const char *part_probe_types[] 217 + = { "cmdlinepart", "RedBoot", NULL }; 218 + #endif 219 struct device_node *dp = dev->node; 220 struct resource res; 221 struct of_flash *info; ··· 274 } 275 info->mtd->owner = THIS_MODULE; 276 277 + #ifdef CONFIG_MTD_PARTITIONS 278 + /* First look for RedBoot table or partitions on the command 279 + * line, these take precedence over device tree information */ 280 + err = parse_mtd_partitions(info->mtd, part_probe_types, 281 + &info->parts, 0); 282 if (err < 0) 283 + return err; 284 + 285 + #ifdef CONFIG_MTD_OF_PARTS 286 + if (err == 0) { 287 + err = of_mtd_parse_partitions(&dev->dev, info->mtd, 288 + dp, &info->parts); 289 + if (err < 0) 290 + return err; 291 + } 292 + #endif 293 + 294 + if (err == 0) { 295 + err = parse_obsolete_partitions(dev, info, dp); 296 + if (err < 0) 297 + return err; 298 + } 299 300 if (err > 0) 301 + add_mtd_partitions(info->mtd, info->parts, err); 302 else 303 + #endif 304 add_mtd_device(info->mtd); 305 306 return 0;
-93
drivers/mtd/maps/pnc2000.c
··· 1 - /* 2 - * pnc2000.c - mapper for Photron PNC-2000 board. 3 - * 4 - * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 5 - * 6 - * This code is GPL 7 - * 8 - * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $ 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <linux/types.h> 13 - #include <linux/kernel.h> 14 - #include <linux/init.h> 15 - 16 - #include <linux/mtd/mtd.h> 17 - #include <linux/mtd/map.h> 18 - #include <linux/mtd/partitions.h> 19 - 20 - 21 - #define WINDOW_ADDR 0xbf000000 22 - #define WINDOW_SIZE 0x00400000 23 - 24 - /* 25 - * MAP DRIVER STUFF 26 - */ 27 - 28 - 29 - static struct map_info pnc_map = { 30 - .name = "PNC-2000", 31 - .size = WINDOW_SIZE, 32 - .bankwidth = 4, 33 - .phys = 0xFFFFFFFF, 34 - .virt = (void __iomem *)WINDOW_ADDR, 35 - }; 36 - 37 - 38 - /* 39 - * MTD 'PARTITIONING' STUFF 40 - */ 41 - static struct mtd_partition pnc_partitions[3] = { 42 - { 43 - .name = "PNC-2000 boot firmware", 44 - .size = 0x20000, 45 - .offset = 0 46 - }, 47 - { 48 - .name = "PNC-2000 kernel", 49 - .size = 0x1a0000, 50 - .offset = 0x20000 51 - }, 52 - { 53 - .name = "PNC-2000 filesystem", 54 - .size = 0x240000, 55 - .offset = 0x1c0000 56 - } 57 - }; 58 - 59 - /* 60 - * This is the master MTD device for which all the others are just 61 - * auto-relocating aliases. 62 - */ 63 - static struct mtd_info *mymtd; 64 - 65 - static int __init init_pnc2000(void) 66 - { 67 - printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR); 68 - 69 - simple_map_init(&pnc_map); 70 - 71 - mymtd = do_map_probe("cfi_probe", &pnc_map); 72 - if (mymtd) { 73 - mymtd->owner = THIS_MODULE; 74 - return add_mtd_partitions(mymtd, pnc_partitions, 3); 75 - } 76 - 77 - return -ENXIO; 78 - } 79 - 80 - static void __exit cleanup_pnc2000(void) 81 - { 82 - if (mymtd) { 83 - del_mtd_partitions(mymtd); 84 - map_destroy(mymtd); 85 - } 86 - } 87 - 88 - module_init(init_pnc2000); 89 - module_exit(cleanup_pnc2000); 90 - 91 - MODULE_LICENSE("GPL"); 92 - MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>"); 93 - MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
···
+1 -1
drivers/mtd/maps/scb2_flash.c
··· 79 struct cfi_private *cfi = map->fldrv_priv; 80 81 /* barf if this doesn't look right */ 82 - if (cfi->cfiq->InterfaceDesc != 1) { 83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", 84 cfi->cfiq->InterfaceDesc); 85 return -1;
··· 79 struct cfi_private *cfi = map->fldrv_priv; 80 81 /* barf if this doesn't look right */ 82 + if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) { 83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", 84 cfi->cfiq->InterfaceDesc); 85 return -1;
+1 -1
drivers/mtd/mtd_blkdevs.c
··· 248 return -EBUSY; 249 } 250 251 - mutex_init(&new->lock); 252 list_add_tail(&new->list, &tr->devs); 253 added: 254 if (!tr->writesect) 255 new->readonly = 1; 256
··· 248 return -EBUSY; 249 } 250 251 list_add_tail(&new->list, &tr->devs); 252 added: 253 + mutex_init(&new->lock); 254 if (!tr->writesect) 255 new->readonly = 1; 256
+6 -2
drivers/mtd/mtdchar.c
··· 481 { 482 struct mtd_oob_buf buf; 483 struct mtd_oob_ops ops; 484 485 if(!(file->f_mode & 2)) 486 return -EPERM; ··· 521 buf.start &= ~(mtd->oobsize - 1); 522 ret = mtd->write_oob(mtd, buf.start, &ops); 523 524 - if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 525 - sizeof(uint32_t))) 526 ret = -EFAULT; 527 528 kfree(ops.oobbuf);
··· 481 { 482 struct mtd_oob_buf buf; 483 struct mtd_oob_ops ops; 484 + uint32_t retlen; 485 486 if(!(file->f_mode & 2)) 487 return -EPERM; ··· 520 buf.start &= ~(mtd->oobsize - 1); 521 ret = mtd->write_oob(mtd, buf.start, &ops); 522 523 + if (ops.oobretlen > 0xFFFFFFFFU) 524 + ret = -EOVERFLOW; 525 + retlen = ops.oobretlen; 526 + if (copy_to_user(&((struct mtd_oob_buf *)argp)->length, 527 + &retlen, sizeof(buf.length))) 528 ret = -EFAULT; 529 530 kfree(ops.oobbuf);
+1 -1
drivers/mtd/mtdcore.c
··· 61 62 /* Some chips always power up locked. Unlock them now */ 63 if ((mtd->flags & MTD_WRITEABLE) 64 - && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { 65 if (mtd->unlock(mtd, 0, mtd->size)) 66 printk(KERN_WARNING 67 "%s: unlock failed, "
··· 61 62 /* Some chips always power up locked. Unlock them now */ 63 if ((mtd->flags & MTD_WRITEABLE) 64 + && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 65 if (mtd->unlock(mtd, 0, mtd->size)) 66 printk(KERN_WARNING 67 "%s: unlock failed, "
+129 -66
drivers/mtd/mtdoops.c
··· 28 #include <linux/workqueue.h> 29 #include <linux/sched.h> 30 #include <linux/wait.h> 31 #include <linux/mtd/mtd.h> 32 33 #define OOPS_PAGE_SIZE 4096 34 35 - static struct mtdoops_context { 36 int mtd_index; 37 - struct work_struct work; 38 struct mtd_info *mtd; 39 int oops_pages; 40 int nextpage; 41 int nextcount; 42 43 void *oops_buf; 44 int ready; 45 int writecount; 46 } oops_cxt; ··· 69 erase.mtd = mtd; 70 erase.callback = mtdoops_erase_callback; 71 erase.addr = offset; 72 - if (mtd->erasesize < OOPS_PAGE_SIZE) 73 - erase.len = OOPS_PAGE_SIZE; 74 - else 75 - erase.len = mtd->erasesize; 76 erase.priv = (u_long)&wait_q; 77 78 set_current_state(TASK_INTERRUPTIBLE); ··· 91 return 0; 92 } 93 94 - static int mtdoops_inc_counter(struct mtdoops_context *cxt) 95 { 96 struct mtd_info *mtd = cxt->mtd; 97 size_t retlen; ··· 107 108 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 109 &retlen, (u_char *) &count); 110 - if ((retlen != 4) || (ret < 0)) { 111 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 112 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 113 retlen, ret); 114 - return 1; 115 } 116 117 /* See if we need to erase the next block */ 118 - if (count != 0xffffffff) 119 - return 1; 120 121 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 122 cxt->nextpage, cxt->nextcount); 123 cxt->ready = 1; 124 - return 0; 125 } 126 127 - static void mtdoops_prepare(struct mtdoops_context *cxt) 128 { 129 struct mtd_info *mtd = cxt->mtd; 130 int i = 0, j, ret, mod; 131 ··· 145 cxt->nextpage = 0; 146 } 147 148 - while (mtd->block_isbad && 149 - mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { 150 badblock: 151 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 152 cxt->nextpage * OOPS_PAGE_SIZE); ··· 169 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 170 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 171 172 - if (ret < 0) { 173 - if (mtd->block_markbad) 174 - mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 175 - goto badblock; 176 } 177 178 - printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 179 - 180 - cxt->ready = 1; 181 } 182 183 - static void mtdoops_workfunc(struct work_struct *work) 184 - { 185 - struct mtdoops_context *cxt = 186 - container_of(work, struct mtdoops_context, work); 187 - 188 - mtdoops_prepare(cxt); 189 - } 190 - 191 - static int find_next_position(struct mtdoops_context *cxt) 192 { 193 struct mtd_info *mtd = cxt->mtd; 194 - int page, maxpos = 0; 195 u32 count, maxcount = 0xffffffff; 196 size_t retlen; 197 198 for (page = 0; page < cxt->oops_pages; page++) { 199 - mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 200 if (count == 0xffffffff) 201 continue; 202 if (maxcount == 0xffffffff) { ··· 258 cxt->ready = 1; 259 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 260 cxt->nextpage, cxt->nextcount); 261 - return 0; 262 } 263 264 cxt->nextpage = maxpos; 265 cxt->nextcount = maxcount; 266 267 - return mtdoops_inc_counter(cxt); 268 } 269 270 271 static void mtdoops_notify_add(struct mtd_info *mtd) 272 { 273 struct mtdoops_context *cxt = &oops_cxt; 274 - int ret; 275 276 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 277 return; ··· 281 return; 282 } 283 284 cxt->mtd = mtd; 285 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 286 287 - ret = find_next_position(cxt); 288 - if (ret == 1) 289 - mtdoops_prepare(cxt); 290 291 - printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); 292 } 293 294 static void mtdoops_notify_remove(struct mtd_info *mtd) ··· 310 { 311 struct mtdoops_context *cxt = &oops_cxt; 312 struct mtd_info *mtd = cxt->mtd; 313 - size_t retlen; 314 - int ret; 315 316 - if (!cxt->ready || !mtd) 317 return; 318 319 - if (cxt->writecount == 0) 320 return; 321 - 322 - if (cxt->writecount < OOPS_PAGE_SIZE) 323 - memset(cxt->oops_buf + cxt->writecount, 0xff, 324 - OOPS_PAGE_SIZE - cxt->writecount); 325 - 326 - ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 327 - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 328 cxt->ready = 0; 329 - cxt->writecount = 0; 330 331 - if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 332 - printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 333 - cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 334 - 335 - ret = mtdoops_inc_counter(cxt); 336 - if (ret == 1) 337 - schedule_work(&cxt->work); 338 } 339 340 static void ··· 339 { 340 struct mtdoops_context *cxt = co->data; 341 struct mtd_info *mtd = cxt->mtd; 342 - int i; 343 344 if (!oops_in_progress) { 345 mtdoops_console_sync(); ··· 347 } 348 349 if (!cxt->ready || !mtd) 350 return; 351 352 if (cxt->writecount == 0) { ··· 365 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 366 count = OOPS_PAGE_SIZE - cxt->writecount; 367 368 - for (i = 0; i < count; i++, s++) 369 - *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s; 370 371 - cxt->writecount = cxt->writecount + count; 372 } 373 374 static int __init mtdoops_console_setup(struct console *co, char *options) ··· 397 .write = mtdoops_console_write, 398 .setup = mtdoops_console_setup, 399 .unblank = mtdoops_console_sync, 400 - .flags = CON_PRINTBUFFER, 401 .index = -1, 402 .data = &oops_cxt, 403 }; ··· 409 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 410 411 if (!cxt->oops_buf) { 412 - printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); 413 return -ENOMEM; 414 } 415 416 - INIT_WORK(&cxt->work, mtdoops_workfunc); 417 418 register_console(&mtdoops_console); 419 register_mtd_user(&mtdoops_notifier);
··· 28 #include <linux/workqueue.h> 29 #include <linux/sched.h> 30 #include <linux/wait.h> 31 + #include <linux/delay.h> 32 + #include <linux/spinlock.h> 33 + #include <linux/interrupt.h> 34 #include <linux/mtd/mtd.h> 35 36 #define OOPS_PAGE_SIZE 4096 37 38 + struct mtdoops_context { 39 int mtd_index; 40 + struct work_struct work_erase; 41 + struct work_struct work_write; 42 struct mtd_info *mtd; 43 int oops_pages; 44 int nextpage; 45 int nextcount; 46 47 void *oops_buf; 48 + 49 + /* writecount and disabling ready are spin lock protected */ 50 + spinlock_t writecount_lock; 51 int ready; 52 int writecount; 53 } oops_cxt; ··· 62 erase.mtd = mtd; 63 erase.callback = mtdoops_erase_callback; 64 erase.addr = offset; 65 + erase.len = mtd->erasesize; 66 erase.priv = (u_long)&wait_q; 67 68 set_current_state(TASK_INTERRUPTIBLE); ··· 87 return 0; 88 } 89 90 + static void mtdoops_inc_counter(struct mtdoops_context *cxt) 91 { 92 struct mtd_info *mtd = cxt->mtd; 93 size_t retlen; ··· 103 104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 105 &retlen, (u_char *) &count); 106 + if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { 107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 109 retlen, ret); 110 + schedule_work(&cxt->work_erase); 111 + return; 112 } 113 114 /* See if we need to erase the next block */ 115 + if (count != 0xffffffff) { 116 + schedule_work(&cxt->work_erase); 117 + return; 118 + } 119 120 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 121 cxt->nextpage, cxt->nextcount); 122 cxt->ready = 1; 123 } 124 125 + /* Scheduled work - when we can't proceed without erasing a block */ 126 + static void mtdoops_workfunc_erase(struct work_struct *work) 127 { 128 + struct mtdoops_context *cxt = 129 + container_of(work, struct mtdoops_context, work_erase); 130 struct mtd_info *mtd = cxt->mtd; 131 int i = 0, j, ret, mod; 132 ··· 136 cxt->nextpage = 0; 137 } 138 139 + while (mtd->block_isbad) { 140 + ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 141 + if (!ret) 142 + break; 143 + if (ret < 0) { 144 + printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); 145 + return; 146 + } 147 badblock: 148 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 149 cxt->nextpage * OOPS_PAGE_SIZE); ··· 154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 156 157 + if (ret >= 0) { 158 + printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 159 + cxt->ready = 1; 160 + return; 161 } 162 163 + if (mtd->block_markbad && (ret == -EIO)) { 164 + ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 165 + if (ret < 0) { 166 + printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); 167 + return; 168 + } 169 + } 170 + goto badblock; 171 } 172 173 + static void mtdoops_write(struct mtdoops_context *cxt, int panic) 174 { 175 struct mtd_info *mtd = cxt->mtd; 176 + size_t retlen; 177 + int ret; 178 + 179 + if (cxt->writecount < OOPS_PAGE_SIZE) 180 + memset(cxt->oops_buf + cxt->writecount, 0xff, 181 + OOPS_PAGE_SIZE - cxt->writecount); 182 + 183 + if (panic) 184 + ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 185 + OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 186 + else 187 + ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 188 + OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 189 + 190 + cxt->writecount = 0; 191 + 192 + if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 193 + printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 194 + cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 195 + 196 + mtdoops_inc_counter(cxt); 197 + } 198 + 199 + 200 + static void mtdoops_workfunc_write(struct work_struct *work) 201 + { 202 + struct mtdoops_context *cxt = 203 + container_of(work, struct mtdoops_context, work_write); 204 + 205 + mtdoops_write(cxt, 0); 206 + } 207 + 208 + static void find_next_position(struct mtdoops_context *cxt) 209 + { 210 + struct mtd_info *mtd = cxt->mtd; 211 + int ret, page, maxpos = 0; 212 u32 count, maxcount = 0xffffffff; 213 size_t retlen; 214 215 for (page = 0; page < cxt->oops_pages; page++) { 216 + ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 217 + if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { 218 + printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 219 + ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); 220 + continue; 221 + } 222 + 223 if (count == 0xffffffff) 224 continue; 225 if (maxcount == 0xffffffff) { ··· 205 cxt->ready = 1; 206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 207 cxt->nextpage, cxt->nextcount); 208 + return; 209 } 210 211 cxt->nextpage = maxpos; 212 cxt->nextcount = maxcount; 213 214 + mtdoops_inc_counter(cxt); 215 } 216 217 218 static void mtdoops_notify_add(struct mtd_info *mtd) 219 { 220 struct mtdoops_context *cxt = &oops_cxt; 221 222 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 223 return; ··· 229 return; 230 } 231 232 + if (mtd->erasesize < OOPS_PAGE_SIZE) { 233 + printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", 234 + mtd->index); 235 + return; 236 + } 237 + 238 cxt->mtd = mtd; 239 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 240 241 + find_next_position(cxt); 242 243 + printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); 244 } 245 246 static void mtdoops_notify_remove(struct mtd_info *mtd) ··· 254 { 255 struct mtdoops_context *cxt = &oops_cxt; 256 struct mtd_info *mtd = cxt->mtd; 257 + unsigned long flags; 258 259 + if (!cxt->ready || !mtd || cxt->writecount == 0) 260 return; 261 262 + /* 263 + * Once ready is 0 and we've held the lock no further writes to the 264 + * buffer will happen 265 + */ 266 + spin_lock_irqsave(&cxt->writecount_lock, flags); 267 + if (!cxt->ready) { 268 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 269 return; 270 + } 271 cxt->ready = 0; 272 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 273 274 + if (mtd->panic_write && in_interrupt()) 275 + /* Interrupt context, we're going to panic so try and log */ 276 + mtdoops_write(cxt, 1); 277 + else 278 + schedule_work(&cxt->work_write); 279 } 280 281 static void ··· 286 { 287 struct mtdoops_context *cxt = co->data; 288 struct mtd_info *mtd = cxt->mtd; 289 + unsigned long flags; 290 291 if (!oops_in_progress) { 292 mtdoops_console_sync(); ··· 294 } 295 296 if (!cxt->ready || !mtd) 297 + return; 298 + 299 + /* Locking on writecount ensures sequential writes to the buffer */ 300 + spin_lock_irqsave(&cxt->writecount_lock, flags); 301 + 302 + /* Check ready status didn't change whilst waiting for the lock */ 303 + if (!cxt->ready) 304 return; 305 306 if (cxt->writecount == 0) { ··· 305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 306 count = OOPS_PAGE_SIZE - cxt->writecount; 307 308 + memcpy(cxt->oops_buf + cxt->writecount, s, count); 309 + cxt->writecount += count; 310 311 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 312 + 313 + if (cxt->writecount == OOPS_PAGE_SIZE) 314 + mtdoops_console_sync(); 315 } 316 317 static int __init mtdoops_console_setup(struct console *co, char *options) ··· 334 .write = mtdoops_console_write, 335 .setup = mtdoops_console_setup, 336 .unblank = mtdoops_console_sync, 337 .index = -1, 338 .data = &oops_cxt, 339 }; ··· 347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 348 349 if (!cxt->oops_buf) { 350 + printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); 351 return -ENOMEM; 352 } 353 354 + INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); 355 + INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); 356 357 register_console(&mtdoops_console); 358 register_mtd_user(&mtdoops_notifier);
+17
drivers/mtd/mtdpart.c
··· 151 len, retlen, buf); 152 } 153 154 static int part_write_oob(struct mtd_info *mtd, loff_t to, 155 struct mtd_oob_ops *ops) 156 { ··· 365 366 slave->mtd.read = part_read; 367 slave->mtd.write = part_write; 368 369 if(master->point && master->unpoint){ 370 slave->mtd.point = part_point;
··· 151 len, retlen, buf); 152 } 153 154 + static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len, 155 + size_t *retlen, const u_char *buf) 156 + { 157 + struct mtd_part *part = PART(mtd); 158 + if (!(mtd->flags & MTD_WRITEABLE)) 159 + return -EROFS; 160 + if (to >= mtd->size) 161 + len = 0; 162 + else if (to + len > mtd->size) 163 + len = mtd->size - to; 164 + return part->master->panic_write (part->master, to + part->offset, 165 + len, retlen, buf); 166 + } 167 + 168 static int part_write_oob(struct mtd_info *mtd, loff_t to, 169 struct mtd_oob_ops *ops) 170 { ··· 351 352 slave->mtd.read = part_read; 353 slave->mtd.write = part_write; 354 + 355 + if (master->panic_write) 356 + slave->mtd.panic_write = part_panic_write; 357 358 if(master->point && master->unpoint){ 359 slave->mtd.point = part_point;
+25 -1
drivers/mtd/nand/Kconfig
··· 93 94 config MTD_NAND_BF5XX 95 tristate "Blackfin on-chip NAND Flash Controller driver" 96 - depends on BF54x && MTD_NAND 97 help 98 This enables the Blackfin on-chip NAND flash controller 99 ··· 283 tristate "Support for NAND Flash on CM-X270 modules" 284 depends on MTD_NAND && MACH_ARMCORE 285 286 287 config MTD_NAND_NANDSIM 288 tristate "Support for NAND Flash Simulator" ··· 311 help 312 These two (and possibly other) Alauda-based cardreaders for 313 SmartMedia and xD allow raw flash access. 314 315 endif # MTD_NAND
··· 93 94 config MTD_NAND_BF5XX 95 tristate "Blackfin on-chip NAND Flash Controller driver" 96 + depends on (BF54x || BF52x) && MTD_NAND 97 help 98 This enables the Blackfin on-chip NAND flash controller 99 ··· 283 tristate "Support for NAND Flash on CM-X270 modules" 284 depends on MTD_NAND && MACH_ARMCORE 285 286 + config MTD_NAND_PASEMI 287 + tristate "NAND support for PA Semi PWRficient" 288 + depends on MTD_NAND && PPC_PASEMI 289 + help 290 + Enables support for NAND Flash interface on PA Semi PWRficient 291 + based boards 292 293 config MTD_NAND_NANDSIM 294 tristate "Support for NAND Flash Simulator" ··· 305 help 306 These two (and possibly other) Alauda-based cardreaders for 307 SmartMedia and xD allow raw flash access. 308 + 309 + config MTD_NAND_ORION 310 + tristate "NAND Flash support for Marvell Orion SoC" 311 + depends on ARCH_ORION && MTD_NAND 312 + help 313 + This enables the NAND flash controller on Orion machines. 314 + 315 + No board specific support is done by this driver, each board 316 + must advertise a platform_device for the driver to attach. 317 + 318 + config MTD_NAND_FSL_ELBC 319 + tristate "NAND support for Freescale eLBC controllers" 320 + depends on MTD_NAND && PPC_OF 321 + help 322 + Various Freescale chips, including the 8313, include a NAND Flash 323 + Controller Module with built-in hardware ECC capabilities. 324 + Enabling this option will enable you to use this to control 325 + external NAND devices. 326 327 endif # MTD_NAND
+3
drivers/mtd/nand/Makefile
··· 29 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 30 obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 31 obj-$(CONFIG_MTD_ALAUDA) += alauda.o 32 33 nand-objs := nand_base.o nand_bbt.o
··· 29 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 30 obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 31 obj-$(CONFIG_MTD_ALAUDA) += alauda.o 32 + obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 33 + obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 34 + obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 35 36 nand-objs := nand_base.o nand_bbt.o
+6 -6
drivers/mtd/nand/at91_nand.c
··· 156 } 157 158 #ifdef CONFIG_MTD_PARTITIONS 159 - if (host->board->partition_info) 160 - partitions = host->board->partition_info(mtd->size, &num_partitions); 161 #ifdef CONFIG_MTD_CMDLINE_PARTS 162 - else { 163 - mtd->name = "at91_nand"; 164 - num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 165 - } 166 #endif 167 168 if ((!partitions) || (num_partitions == 0)) { 169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
··· 156 } 157 158 #ifdef CONFIG_MTD_PARTITIONS 159 #ifdef CONFIG_MTD_CMDLINE_PARTS 160 + mtd->name = "at91_nand"; 161 + num_partitions = parse_mtd_partitions(mtd, part_probes, 162 + &partitions, 0); 163 #endif 164 + if (num_partitions <= 0 && host->board->partition_info) 165 + partitions = host->board->partition_info(mtd->size, 166 + &num_partitions); 167 168 if ((!partitions) || (num_partitions == 0)) { 169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
+28 -11
drivers/mtd/nand/bf5xx_nand.c
··· 74 static int hardware_ecc; 75 #endif 76 77 - static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0}; 78 79 /* 80 * Data structures for bf5xx nand flash controller driver ··· 293 u16 ecc0, ecc1; 294 u32 code[2]; 295 u8 *p; 296 - int bytes = 3, i; 297 298 /* first 4 bytes ECC code for 256 page size */ 299 ecc0 = bfin_read_NFC_ECC0(); ··· 302 303 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); 304 305 /* second 4 bytes ECC code for 512 page size */ 306 if (page_size == 512) { 307 ecc0 = bfin_read_NFC_ECC2(); 308 ecc1 = bfin_read_NFC_ECC3(); 309 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 310 - bytes = 6; 311 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); 312 } 313 - 314 - p = (u8 *)code; 315 - for (i = 0; i < bytes; i++) 316 - ecc_code[i] = p[i]; 317 318 return 0; 319 } ··· 526 527 init_completion(&info->dma_completion); 528 529 /* Setup DMAC1 channel mux for NFC which shared with SDH */ 530 val = bfin_read_DMAC1_PERIMUX(); 531 val &= 0xFFFE; 532 bfin_write_DMAC1_PERIMUX(val); 533 SSYNC(); 534 - 535 /* Request NFC DMA channel */ 536 ret = request_dma(CH_NFC, "BF5XX NFC driver"); 537 if (ret < 0) { ··· 763 static int bf5xx_nand_resume(struct platform_device *dev) 764 { 765 struct bf5xx_nand_info *info = platform_get_drvdata(dev); 766 - 767 - if (info) 768 - bf5xx_nand_hw_init(info); 769 770 return 0; 771 }
··· 74 static int hardware_ecc; 75 #endif 76 77 + static unsigned short bfin_nfc_pin_req[] = 78 + {P_NAND_CE, 79 + P_NAND_RB, 80 + P_NAND_D0, 81 + P_NAND_D1, 82 + P_NAND_D2, 83 + P_NAND_D3, 84 + P_NAND_D4, 85 + P_NAND_D5, 86 + P_NAND_D6, 87 + P_NAND_D7, 88 + P_NAND_WE, 89 + P_NAND_RE, 90 + P_NAND_CLE, 91 + P_NAND_ALE, 92 + 0}; 93 94 /* 95 * Data structures for bf5xx nand flash controller driver ··· 278 u16 ecc0, ecc1; 279 u32 code[2]; 280 u8 *p; 281 282 /* first 4 bytes ECC code for 256 page size */ 283 ecc0 = bfin_read_NFC_ECC0(); ··· 288 289 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); 290 291 + /* first 3 bytes in ecc_code for 256 page size */ 292 + p = (u8 *) code; 293 + memcpy(ecc_code, p, 3); 294 + 295 /* second 4 bytes ECC code for 512 page size */ 296 if (page_size == 512) { 297 ecc0 = bfin_read_NFC_ECC2(); 298 ecc1 = bfin_read_NFC_ECC3(); 299 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 300 + 301 + /* second 3 bytes in ecc_code for second 256 302 + * bytes of 512 page size 303 + */ 304 + p = (u8 *) (code + 1); 305 + memcpy((ecc_code + 3), p, 3); 306 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); 307 } 308 309 return 0; 310 } ··· 507 508 init_completion(&info->dma_completion); 509 510 + #ifdef CONFIG_BF54x 511 /* Setup DMAC1 channel mux for NFC which shared with SDH */ 512 val = bfin_read_DMAC1_PERIMUX(); 513 val &= 0xFFFE; 514 bfin_write_DMAC1_PERIMUX(val); 515 SSYNC(); 516 + #endif 517 /* Request NFC DMA channel */ 518 ret = request_dma(CH_NFC, "BF5XX NFC driver"); 519 if (ret < 0) { ··· 743 static int bf5xx_nand_resume(struct platform_device *dev) 744 { 745 struct bf5xx_nand_info *info = platform_get_drvdata(dev); 746 747 return 0; 748 }
+19
drivers/mtd/nand/cafe_nand.c
··· 11 #undef DEBUG 12 #include <linux/mtd/mtd.h> 13 #include <linux/mtd/nand.h> 14 #include <linux/rslib.h> 15 #include <linux/pci.h> 16 #include <linux/delay.h> ··· 53 54 struct cafe_priv { 55 struct nand_chip nand; 56 struct pci_dev *pdev; 57 void __iomem *mmio; 58 struct rs_control *rs; ··· 85 static unsigned int numtimings; 86 static int timing[3]; 87 module_param_array(timing, int, &numtimings, 0644); 88 89 /* Hrm. Why isn't this already conditional on something in the struct device? */ 90 #define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) ··· 626 { 627 struct mtd_info *mtd; 628 struct cafe_priv *cafe; 629 uint32_t ctrl; 630 int err = 0; 631 632 /* Very old versions shared the same PCI ident for all three ··· 795 goto out_irq; 796 797 pci_set_drvdata(pdev, mtd); 798 add_mtd_device(mtd); 799 goto out; 800 801 out_irq:
··· 11 #undef DEBUG 12 #include <linux/mtd/mtd.h> 13 #include <linux/mtd/nand.h> 14 + #include <linux/mtd/partitions.h> 15 #include <linux/rslib.h> 16 #include <linux/pci.h> 17 #include <linux/delay.h> ··· 52 53 struct cafe_priv { 54 struct nand_chip nand; 55 + struct mtd_partition *parts; 56 struct pci_dev *pdev; 57 void __iomem *mmio; 58 struct rs_control *rs; ··· 83 static unsigned int numtimings; 84 static int timing[3]; 85 module_param_array(timing, int, &numtimings, 0644); 86 + 87 + #ifdef CONFIG_MTD_PARTITIONS 88 + static const char *part_probes[] = { "RedBoot", NULL }; 89 + #endif 90 91 /* Hrm. Why isn't this already conditional on something in the struct device? */ 92 #define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) ··· 620 { 621 struct mtd_info *mtd; 622 struct cafe_priv *cafe; 623 + struct mtd_partition *parts; 624 uint32_t ctrl; 625 + int nr_parts; 626 int err = 0; 627 628 /* Very old versions shared the same PCI ident for all three ··· 787 goto out_irq; 788 789 pci_set_drvdata(pdev, mtd); 790 + 791 + /* We register the whole device first, separate from the partitions */ 792 add_mtd_device(mtd); 793 + 794 + #ifdef CONFIG_MTD_PARTITIONS 795 + nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 796 + if (nr_parts > 0) { 797 + cafe->parts = parts; 798 + dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts); 799 + add_mtd_partitions(mtd, parts, nr_parts); 800 + } 801 + #endif 802 goto out; 803 804 out_irq:
+1244
drivers/mtd/nand/fsl_elbc_nand.c
···
··· 1 + /* Freescale Enhanced Local Bus Controller NAND driver 2 + * 3 + * Copyright (c) 2006-2007 Freescale Semiconductor 4 + * 5 + * Authors: Nick Spence <nick.spence@freescale.com>, 6 + * Scott Wood <scottwood@freescale.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the Free Software 20 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 + */ 22 + 23 + #include <linux/module.h> 24 + #include <linux/types.h> 25 + #include <linux/init.h> 26 + #include <linux/kernel.h> 27 + #include <linux/string.h> 28 + #include <linux/ioport.h> 29 + #include <linux/of_platform.h> 30 + #include <linux/slab.h> 31 + #include <linux/interrupt.h> 32 + 33 + #include <linux/mtd/mtd.h> 34 + #include <linux/mtd/nand.h> 35 + #include <linux/mtd/nand_ecc.h> 36 + #include <linux/mtd/partitions.h> 37 + 38 + #include <asm/io.h> 39 + 40 + 41 + #define MAX_BANKS 8 42 + #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ 43 + #define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */ 44 + 45 + struct elbc_bank { 46 + __be32 br; /**< Base Register */ 47 + #define BR_BA 0xFFFF8000 48 + #define BR_BA_SHIFT 15 49 + #define BR_PS 0x00001800 50 + #define BR_PS_SHIFT 11 51 + #define BR_PS_8 0x00000800 /* Port Size 8 bit */ 52 + #define BR_PS_16 0x00001000 /* Port Size 16 bit */ 53 + #define BR_PS_32 0x00001800 /* Port Size 32 bit */ 54 + #define BR_DECC 0x00000600 55 + #define BR_DECC_SHIFT 9 56 + #define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */ 57 + #define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */ 58 + #define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */ 59 + #define BR_WP 0x00000100 60 + #define BR_WP_SHIFT 8 61 + #define BR_MSEL 0x000000E0 62 + #define BR_MSEL_SHIFT 5 63 + #define BR_MS_GPCM 0x00000000 /* GPCM */ 64 + #define BR_MS_FCM 0x00000020 /* FCM */ 65 + #define BR_MS_SDRAM 0x00000060 /* SDRAM */ 66 + #define BR_MS_UPMA 0x00000080 /* UPMA */ 67 + #define BR_MS_UPMB 0x000000A0 /* UPMB */ 68 + #define BR_MS_UPMC 0x000000C0 /* UPMC */ 69 + #define BR_V 0x00000001 70 + #define BR_V_SHIFT 0 71 + #define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V) 72 + 73 + __be32 or; /**< Base Register */ 74 + #define OR0 0x5004 75 + #define OR1 0x500C 76 + #define OR2 0x5014 77 + #define OR3 0x501C 78 + #define OR4 0x5024 79 + #define OR5 0x502C 80 + #define OR6 0x5034 81 + #define OR7 0x503C 82 + 83 + #define OR_FCM_AM 0xFFFF8000 84 + #define OR_FCM_AM_SHIFT 15 85 + #define OR_FCM_BCTLD 0x00001000 86 + #define OR_FCM_BCTLD_SHIFT 12 87 + #define OR_FCM_PGS 0x00000400 88 + #define OR_FCM_PGS_SHIFT 10 89 + #define OR_FCM_CSCT 0x00000200 90 + #define OR_FCM_CSCT_SHIFT 9 91 + #define OR_FCM_CST 0x00000100 92 + #define OR_FCM_CST_SHIFT 8 93 + #define OR_FCM_CHT 0x00000080 94 + #define OR_FCM_CHT_SHIFT 7 95 + #define OR_FCM_SCY 0x00000070 96 + #define OR_FCM_SCY_SHIFT 4 97 + #define OR_FCM_SCY_1 0x00000010 98 + #define OR_FCM_SCY_2 0x00000020 99 + #define OR_FCM_SCY_3 0x00000030 100 + #define OR_FCM_SCY_4 0x00000040 101 + #define OR_FCM_SCY_5 0x00000050 102 + #define OR_FCM_SCY_6 0x00000060 103 + #define OR_FCM_SCY_7 0x00000070 104 + #define OR_FCM_RST 0x00000008 105 + #define OR_FCM_RST_SHIFT 3 106 + #define OR_FCM_TRLX 0x00000004 107 + #define OR_FCM_TRLX_SHIFT 2 108 + #define OR_FCM_EHTR 0x00000002 109 + #define OR_FCM_EHTR_SHIFT 1 110 + }; 111 + 112 + struct elbc_regs { 113 + struct elbc_bank bank[8]; 114 + u8 res0[0x28]; 115 + __be32 mar; /**< UPM Address Register */ 116 + u8 res1[0x4]; 117 + __be32 mamr; /**< UPMA Mode Register */ 118 + __be32 mbmr; /**< UPMB Mode Register */ 119 + __be32 mcmr; /**< UPMC Mode Register */ 120 + u8 res2[0x8]; 121 + __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */ 122 + __be32 mdr; /**< UPM Data Register */ 123 + u8 res3[0x4]; 124 + __be32 lsor; /**< Special Operation Initiation Register */ 125 + __be32 lsdmr; /**< SDRAM Mode Register */ 126 + u8 res4[0x8]; 127 + __be32 lurt; /**< UPM Refresh Timer */ 128 + __be32 lsrt; /**< SDRAM Refresh Timer */ 129 + u8 res5[0x8]; 130 + __be32 ltesr; /**< Transfer Error Status Register */ 131 + #define LTESR_BM 0x80000000 132 + #define LTESR_FCT 0x40000000 133 + #define LTESR_PAR 0x20000000 134 + #define LTESR_WP 0x04000000 135 + #define LTESR_ATMW 0x00800000 136 + #define LTESR_ATMR 0x00400000 137 + #define LTESR_CS 0x00080000 138 + #define LTESR_CC 0x00000001 139 + #define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) 140 + __be32 ltedr; /**< Transfer Error Disable Register */ 141 + __be32 lteir; /**< Transfer Error Interrupt Register */ 142 + __be32 lteatr; /**< Transfer Error Attributes Register */ 143 + __be32 ltear; /**< Transfer Error Address Register */ 144 + u8 res6[0xC]; 145 + __be32 lbcr; /**< Configuration Register */ 146 + #define LBCR_LDIS 0x80000000 147 + #define LBCR_LDIS_SHIFT 31 148 + #define LBCR_BCTLC 0x00C00000 149 + #define LBCR_BCTLC_SHIFT 22 150 + #define LBCR_AHD 0x00200000 151 + #define LBCR_LPBSE 0x00020000 152 + #define LBCR_LPBSE_SHIFT 17 153 + #define LBCR_EPAR 0x00010000 154 + #define LBCR_EPAR_SHIFT 16 155 + #define LBCR_BMT 0x0000FF00 156 + #define LBCR_BMT_SHIFT 8 157 + #define LBCR_INIT 0x00040000 158 + __be32 lcrr; /**< Clock Ratio Register */ 159 + #define LCRR_DBYP 0x80000000 160 + #define LCRR_DBYP_SHIFT 31 161 + #define LCRR_BUFCMDC 0x30000000 162 + #define LCRR_BUFCMDC_SHIFT 28 163 + #define LCRR_ECL 0x03000000 164 + #define LCRR_ECL_SHIFT 24 165 + #define LCRR_EADC 0x00030000 166 + #define LCRR_EADC_SHIFT 16 167 + #define LCRR_CLKDIV 0x0000000F 168 + #define LCRR_CLKDIV_SHIFT 0 169 + u8 res7[0x8]; 170 + __be32 fmr; /**< Flash Mode Register */ 171 + #define FMR_CWTO 0x0000F000 172 + #define FMR_CWTO_SHIFT 12 173 + #define FMR_BOOT 0x00000800 174 + #define FMR_ECCM 0x00000100 175 + #define FMR_AL 0x00000030 176 + #define FMR_AL_SHIFT 4 177 + #define FMR_OP 0x00000003 178 + #define FMR_OP_SHIFT 0 179 + __be32 fir; /**< Flash Instruction Register */ 180 + #define FIR_OP0 0xF0000000 181 + #define FIR_OP0_SHIFT 28 182 + #define FIR_OP1 0x0F000000 183 + #define FIR_OP1_SHIFT 24 184 + #define FIR_OP2 0x00F00000 185 + #define FIR_OP2_SHIFT 20 186 + #define FIR_OP3 0x000F0000 187 + #define FIR_OP3_SHIFT 16 188 + #define FIR_OP4 0x0000F000 189 + #define FIR_OP4_SHIFT 12 190 + #define FIR_OP5 0x00000F00 191 + #define FIR_OP5_SHIFT 8 192 + #define FIR_OP6 0x000000F0 193 + #define FIR_OP6_SHIFT 4 194 + #define FIR_OP7 0x0000000F 195 + #define FIR_OP7_SHIFT 0 196 + #define FIR_OP_NOP 0x0 /* No operation and end of sequence */ 197 + #define FIR_OP_CA 0x1 /* Issue current column address */ 198 + #define FIR_OP_PA 0x2 /* Issue current block+page address */ 199 + #define FIR_OP_UA 0x3 /* Issue user defined address */ 200 + #define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */ 201 + #define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */ 202 + #define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */ 203 + #define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */ 204 + #define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */ 205 + #define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */ 206 + #define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */ 207 + #define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */ 208 + #define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */ 209 + #define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */ 210 + #define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */ 211 + #define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */ 212 + __be32 fcr; /**< Flash Command Register */ 213 + #define FCR_CMD0 0xFF000000 214 + #define FCR_CMD0_SHIFT 24 215 + #define FCR_CMD1 0x00FF0000 216 + #define FCR_CMD1_SHIFT 16 217 + #define FCR_CMD2 0x0000FF00 218 + #define FCR_CMD2_SHIFT 8 219 + #define FCR_CMD3 0x000000FF 220 + #define FCR_CMD3_SHIFT 0 221 + __be32 fbar; /**< Flash Block Address Register */ 222 + #define FBAR_BLK 0x00FFFFFF 223 + __be32 fpar; /**< Flash Page Address Register */ 224 + #define FPAR_SP_PI 0x00007C00 225 + #define FPAR_SP_PI_SHIFT 10 226 + #define FPAR_SP_MS 0x00000200 227 + #define FPAR_SP_CI 0x000001FF 228 + #define FPAR_SP_CI_SHIFT 0 229 + #define FPAR_LP_PI 0x0003F000 230 + #define FPAR_LP_PI_SHIFT 12 231 + #define FPAR_LP_MS 0x00000800 232 + #define FPAR_LP_CI 0x000007FF 233 + #define FPAR_LP_CI_SHIFT 0 234 + __be32 fbcr; /**< Flash Byte Count Register */ 235 + #define FBCR_BC 0x00000FFF 236 + u8 res11[0x8]; 237 + u8 res8[0xF00]; 238 + }; 239 + 240 + struct fsl_elbc_ctrl; 241 + 242 + /* mtd information per set */ 243 + 244 + struct fsl_elbc_mtd { 245 + struct mtd_info mtd; 246 + struct nand_chip chip; 247 + struct fsl_elbc_ctrl *ctrl; 248 + 249 + struct device *dev; 250 + int bank; /* Chip select bank number */ 251 + u8 __iomem *vbase; /* Chip select base virtual address */ 252 + int page_size; /* NAND page size (0=512, 1=2048) */ 253 + unsigned int fmr; /* FCM Flash Mode Register value */ 254 + }; 255 + 256 + /* overview of the fsl elbc controller */ 257 + 258 + struct fsl_elbc_ctrl { 259 + struct nand_hw_control controller; 260 + struct fsl_elbc_mtd *chips[MAX_BANKS]; 261 + 262 + /* device info */ 263 + struct device *dev; 264 + struct elbc_regs __iomem *regs; 265 + int irq; 266 + wait_queue_head_t irq_wait; 267 + unsigned int irq_status; /* status read from LTESR by irq handler */ 268 + u8 __iomem *addr; /* Address of assigned FCM buffer */ 269 + unsigned int page; /* Last page written to / read from */ 270 + unsigned int read_bytes; /* Number of bytes read during command */ 271 + unsigned int column; /* Saved column from SEQIN */ 272 + unsigned int index; /* Pointer to next byte to 'read' */ 273 + unsigned int status; /* status read from LTESR after last op */ 274 + unsigned int mdr; /* UPM/FCM Data Register value */ 275 + unsigned int use_mdr; /* Non zero if the MDR is to be set */ 276 + unsigned int oob; /* Non zero if operating on OOB data */ 277 + char *oob_poi; /* Place to write ECC after read back */ 278 + }; 279 + 280 + /* These map to the positions used by the FCM hardware ECC generator */ 281 + 282 + /* Small Page FLASH with FMR[ECCM] = 0 */ 283 + static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { 284 + .eccbytes = 3, 285 + .eccpos = {6, 7, 8}, 286 + .oobfree = { {0, 5}, {9, 7} }, 287 + .oobavail = 12, 288 + }; 289 + 290 + /* Small Page FLASH with FMR[ECCM] = 1 */ 291 + static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { 292 + .eccbytes = 3, 293 + .eccpos = {8, 9, 10}, 294 + .oobfree = { {0, 5}, {6, 2}, {11, 5} }, 295 + .oobavail = 12, 296 + }; 297 + 298 + /* Large Page FLASH with FMR[ECCM] = 0 */ 299 + static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { 300 + .eccbytes = 12, 301 + .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, 302 + .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, 303 + .oobavail = 48, 304 + }; 305 + 306 + /* Large Page FLASH with FMR[ECCM] = 1 */ 307 + static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { 308 + .eccbytes = 12, 309 + .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, 310 + .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, 311 + .oobavail = 48, 312 + }; 313 + 314 + /*=================================*/ 315 + 316 + /* 317 + * Set up the FCM hardware block and page address fields, and the fcm 318 + * structure addr field to point to the correct FCM buffer in memory 319 + */ 320 + static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) 321 + { 322 + struct nand_chip *chip = mtd->priv; 323 + struct fsl_elbc_mtd *priv = chip->priv; 324 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 325 + struct elbc_regs __iomem *lbc = ctrl->regs; 326 + int buf_num; 327 + 328 + ctrl->page = page_addr; 329 + 330 + out_be32(&lbc->fbar, 331 + page_addr >> (chip->phys_erase_shift - chip->page_shift)); 332 + 333 + if (priv->page_size) { 334 + out_be32(&lbc->fpar, 335 + ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | 336 + (oob ? FPAR_LP_MS : 0) | column); 337 + buf_num = (page_addr & 1) << 2; 338 + } else { 339 + out_be32(&lbc->fpar, 340 + ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | 341 + (oob ? FPAR_SP_MS : 0) | column); 342 + buf_num = page_addr & 7; 343 + } 344 + 345 + ctrl->addr = priv->vbase + buf_num * 1024; 346 + ctrl->index = column; 347 + 348 + /* for OOB data point to the second half of the buffer */ 349 + if (oob) 350 + ctrl->index += priv->page_size ? 2048 : 512; 351 + 352 + dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), " 353 + "index %x, pes %d ps %d\n", 354 + buf_num, ctrl->addr, priv->vbase, ctrl->index, 355 + chip->phys_erase_shift, chip->page_shift); 356 + } 357 + 358 + /* 359 + * execute FCM command and wait for it to complete 360 + */ 361 + static int fsl_elbc_run_command(struct mtd_info *mtd) 362 + { 363 + struct nand_chip *chip = mtd->priv; 364 + struct fsl_elbc_mtd *priv = chip->priv; 365 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 366 + struct elbc_regs __iomem *lbc = ctrl->regs; 367 + 368 + /* Setup the FMR[OP] to execute without write protection */ 369 + out_be32(&lbc->fmr, priv->fmr | 3); 370 + if (ctrl->use_mdr) 371 + out_be32(&lbc->mdr, ctrl->mdr); 372 + 373 + dev_vdbg(ctrl->dev, 374 + "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n", 375 + in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); 376 + dev_vdbg(ctrl->dev, 377 + "fsl_elbc_run_command: fbar=%08x fpar=%08x " 378 + "fbcr=%08x bank=%d\n", 379 + in_be32(&lbc->fbar), in_be32(&lbc->fpar), 380 + in_be32(&lbc->fbcr), priv->bank); 381 + 382 + /* execute special operation */ 383 + out_be32(&lbc->lsor, priv->bank); 384 + 385 + /* wait for FCM complete flag or timeout */ 386 + ctrl->irq_status = 0; 387 + wait_event_timeout(ctrl->irq_wait, ctrl->irq_status, 388 + FCM_TIMEOUT_MSECS * HZ/1000); 389 + ctrl->status = ctrl->irq_status; 390 + 391 + /* store mdr value in case it was needed */ 392 + if (ctrl->use_mdr) 393 + ctrl->mdr = in_be32(&lbc->mdr); 394 + 395 + ctrl->use_mdr = 0; 396 + 397 + dev_vdbg(ctrl->dev, 398 + "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", 399 + ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); 400 + 401 + /* returns 0 on success otherwise non-zero) */ 402 + return ctrl->status == LTESR_CC ? 0 : -EIO; 403 + } 404 + 405 + static void fsl_elbc_do_read(struct nand_chip *chip, int oob) 406 + { 407 + struct fsl_elbc_mtd *priv = chip->priv; 408 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 409 + struct elbc_regs __iomem *lbc = ctrl->regs; 410 + 411 + if (priv->page_size) { 412 + out_be32(&lbc->fir, 413 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 414 + (FIR_OP_CA << FIR_OP1_SHIFT) | 415 + (FIR_OP_PA << FIR_OP2_SHIFT) | 416 + (FIR_OP_CW1 << FIR_OP3_SHIFT) | 417 + (FIR_OP_RBW << FIR_OP4_SHIFT)); 418 + 419 + out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | 420 + (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); 421 + } else { 422 + out_be32(&lbc->fir, 423 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 424 + (FIR_OP_CA << FIR_OP1_SHIFT) | 425 + (FIR_OP_PA << FIR_OP2_SHIFT) | 426 + (FIR_OP_RBW << FIR_OP3_SHIFT)); 427 + 428 + if (oob) 429 + out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT); 430 + else 431 + out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); 432 + } 433 + } 434 + 435 + /* cmdfunc send commands to the FCM */ 436 + static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, 437 + int column, int page_addr) 438 + { 439 + struct nand_chip *chip = mtd->priv; 440 + struct fsl_elbc_mtd *priv = chip->priv; 441 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 442 + struct elbc_regs __iomem *lbc = ctrl->regs; 443 + 444 + ctrl->use_mdr = 0; 445 + 446 + /* clear the read buffer */ 447 + ctrl->read_bytes = 0; 448 + if (command != NAND_CMD_PAGEPROG) 449 + ctrl->index = 0; 450 + 451 + switch (command) { 452 + /* READ0 and READ1 read the entire buffer to use hardware ECC. */ 453 + case NAND_CMD_READ1: 454 + column += 256; 455 + 456 + /* fall-through */ 457 + case NAND_CMD_READ0: 458 + dev_dbg(ctrl->dev, 459 + "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:" 460 + " 0x%x, column: 0x%x.\n", page_addr, column); 461 + 462 + 463 + out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */ 464 + set_addr(mtd, 0, page_addr, 0); 465 + 466 + ctrl->read_bytes = mtd->writesize + mtd->oobsize; 467 + ctrl->index += column; 468 + 469 + fsl_elbc_do_read(chip, 0); 470 + fsl_elbc_run_command(mtd); 471 + return; 472 + 473 + /* READOOB reads only the OOB because no ECC is performed. */ 474 + case NAND_CMD_READOOB: 475 + dev_vdbg(ctrl->dev, 476 + "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:" 477 + " 0x%x, column: 0x%x.\n", page_addr, column); 478 + 479 + out_be32(&lbc->fbcr, mtd->oobsize - column); 480 + set_addr(mtd, column, page_addr, 1); 481 + 482 + ctrl->read_bytes = mtd->writesize + mtd->oobsize; 483 + 484 + fsl_elbc_do_read(chip, 1); 485 + fsl_elbc_run_command(mtd); 486 + return; 487 + 488 + /* READID must read all 5 possible bytes while CEB is active */ 489 + case NAND_CMD_READID: 490 + dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 491 + 492 + out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | 493 + (FIR_OP_UA << FIR_OP1_SHIFT) | 494 + (FIR_OP_RBW << FIR_OP2_SHIFT)); 495 + out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 496 + /* 5 bytes for manuf, device and exts */ 497 + out_be32(&lbc->fbcr, 5); 498 + ctrl->read_bytes = 5; 499 + ctrl->use_mdr = 1; 500 + ctrl->mdr = 0; 501 + 502 + set_addr(mtd, 0, 0, 0); 503 + fsl_elbc_run_command(mtd); 504 + return; 505 + 506 + /* ERASE1 stores the block and page address */ 507 + case NAND_CMD_ERASE1: 508 + dev_vdbg(ctrl->dev, 509 + "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, " 510 + "page_addr: 0x%x.\n", page_addr); 511 + set_addr(mtd, 0, page_addr, 0); 512 + return; 513 + 514 + /* ERASE2 uses the block and page address from ERASE1 */ 515 + case NAND_CMD_ERASE2: 516 + dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); 517 + 518 + out_be32(&lbc->fir, 519 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 520 + (FIR_OP_PA << FIR_OP1_SHIFT) | 521 + (FIR_OP_CM1 << FIR_OP2_SHIFT)); 522 + 523 + out_be32(&lbc->fcr, 524 + (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | 525 + (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); 526 + 527 + out_be32(&lbc->fbcr, 0); 528 + ctrl->read_bytes = 0; 529 + 530 + fsl_elbc_run_command(mtd); 531 + return; 532 + 533 + /* SEQIN sets up the addr buffer and all registers except the length */ 534 + case NAND_CMD_SEQIN: { 535 + __be32 fcr; 536 + dev_vdbg(ctrl->dev, 537 + "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, " 538 + "page_addr: 0x%x, column: 0x%x.\n", 539 + page_addr, column); 540 + 541 + ctrl->column = column; 542 + ctrl->oob = 0; 543 + 544 + fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) | 545 + (NAND_CMD_SEQIN << FCR_CMD2_SHIFT); 546 + 547 + if (priv->page_size) { 548 + out_be32(&lbc->fir, 549 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 550 + (FIR_OP_CA << FIR_OP1_SHIFT) | 551 + (FIR_OP_PA << FIR_OP2_SHIFT) | 552 + (FIR_OP_WB << FIR_OP3_SHIFT) | 553 + (FIR_OP_CW1 << FIR_OP4_SHIFT)); 554 + 555 + fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 556 + } else { 557 + out_be32(&lbc->fir, 558 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 559 + (FIR_OP_CM2 << FIR_OP1_SHIFT) | 560 + (FIR_OP_CA << FIR_OP2_SHIFT) | 561 + (FIR_OP_PA << FIR_OP3_SHIFT) | 562 + (FIR_OP_WB << FIR_OP4_SHIFT) | 563 + (FIR_OP_CW1 << FIR_OP5_SHIFT)); 564 + 565 + if (column >= mtd->writesize) { 566 + /* OOB area --> READOOB */ 567 + column -= mtd->writesize; 568 + fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 569 + ctrl->oob = 1; 570 + } else if (column < 256) { 571 + /* First 256 bytes --> READ0 */ 572 + fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 573 + } else { 574 + /* Second 256 bytes --> READ1 */ 575 + fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT; 576 + } 577 + } 578 + 579 + out_be32(&lbc->fcr, fcr); 580 + set_addr(mtd, column, page_addr, ctrl->oob); 581 + return; 582 + } 583 + 584 + /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 585 + case NAND_CMD_PAGEPROG: { 586 + int full_page; 587 + dev_vdbg(ctrl->dev, 588 + "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 589 + "writing %d bytes.\n", ctrl->index); 590 + 591 + /* if the write did not start at 0 or is not a full page 592 + * then set the exact length, otherwise use a full page 593 + * write so the HW generates the ECC. 594 + */ 595 + if (ctrl->oob || ctrl->column != 0 || 596 + ctrl->index != mtd->writesize + mtd->oobsize) { 597 + out_be32(&lbc->fbcr, ctrl->index); 598 + full_page = 0; 599 + } else { 600 + out_be32(&lbc->fbcr, 0); 601 + full_page = 1; 602 + } 603 + 604 + fsl_elbc_run_command(mtd); 605 + 606 + /* Read back the page in order to fill in the ECC for the 607 + * caller. Is this really needed? 608 + */ 609 + if (full_page && ctrl->oob_poi) { 610 + out_be32(&lbc->fbcr, 3); 611 + set_addr(mtd, 6, page_addr, 1); 612 + 613 + ctrl->read_bytes = mtd->writesize + 9; 614 + 615 + fsl_elbc_do_read(chip, 1); 616 + fsl_elbc_run_command(mtd); 617 + 618 + memcpy_fromio(ctrl->oob_poi + 6, 619 + &ctrl->addr[ctrl->index], 3); 620 + ctrl->index += 3; 621 + } 622 + 623 + ctrl->oob_poi = NULL; 624 + return; 625 + } 626 + 627 + /* CMD_STATUS must read the status byte while CEB is active */ 628 + /* Note - it does not wait for the ready line */ 629 + case NAND_CMD_STATUS: 630 + out_be32(&lbc->fir, 631 + (FIR_OP_CM0 << FIR_OP0_SHIFT) | 632 + (FIR_OP_RBW << FIR_OP1_SHIFT)); 633 + out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); 634 + out_be32(&lbc->fbcr, 1); 635 + set_addr(mtd, 0, 0, 0); 636 + ctrl->read_bytes = 1; 637 + 638 + fsl_elbc_run_command(mtd); 639 + 640 + /* The chip always seems to report that it is 641 + * write-protected, even when it is not. 642 + */ 643 + setbits8(ctrl->addr, NAND_STATUS_WP); 644 + return; 645 + 646 + /* RESET without waiting for the ready line */ 647 + case NAND_CMD_RESET: 648 + dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n"); 649 + out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT); 650 + out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT); 651 + fsl_elbc_run_command(mtd); 652 + return; 653 + 654 + default: 655 + dev_err(ctrl->dev, 656 + "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n", 657 + command); 658 + } 659 + } 660 + 661 + static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip) 662 + { 663 + /* The hardware does not seem to support multiple 664 + * chips per bank. 665 + */ 666 + } 667 + 668 + /* 669 + * Write buf to the FCM Controller Data Buffer 670 + */ 671 + static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 672 + { 673 + struct nand_chip *chip = mtd->priv; 674 + struct fsl_elbc_mtd *priv = chip->priv; 675 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 676 + unsigned int bufsize = mtd->writesize + mtd->oobsize; 677 + 678 + if (len < 0) { 679 + dev_err(ctrl->dev, "write_buf of %d bytes", len); 680 + ctrl->status = 0; 681 + return; 682 + } 683 + 684 + if ((unsigned int)len > bufsize - ctrl->index) { 685 + dev_err(ctrl->dev, 686 + "write_buf beyond end of buffer " 687 + "(%d requested, %u available)\n", 688 + len, bufsize - ctrl->index); 689 + len = bufsize - ctrl->index; 690 + } 691 + 692 + memcpy_toio(&ctrl->addr[ctrl->index], buf, len); 693 + ctrl->index += len; 694 + } 695 + 696 + /* 697 + * read a byte from either the FCM hardware buffer if it has any data left 698 + * otherwise issue a command to read a single byte. 699 + */ 700 + static u8 fsl_elbc_read_byte(struct mtd_info *mtd) 701 + { 702 + struct nand_chip *chip = mtd->priv; 703 + struct fsl_elbc_mtd *priv = chip->priv; 704 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 705 + 706 + /* If there are still bytes in the FCM, then use the next byte. */ 707 + if (ctrl->index < ctrl->read_bytes) 708 + return in_8(&ctrl->addr[ctrl->index++]); 709 + 710 + dev_err(ctrl->dev, "read_byte beyond end of buffer\n"); 711 + return ERR_BYTE; 712 + } 713 + 714 + /* 715 + * Read from the FCM Controller Data Buffer 716 + */ 717 + static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len) 718 + { 719 + struct nand_chip *chip = mtd->priv; 720 + struct fsl_elbc_mtd *priv = chip->priv; 721 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 722 + int avail; 723 + 724 + if (len < 0) 725 + return; 726 + 727 + avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index); 728 + memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail); 729 + ctrl->index += avail; 730 + 731 + if (len > avail) 732 + dev_err(ctrl->dev, 733 + "read_buf beyond end of buffer " 734 + "(%d requested, %d available)\n", 735 + len, avail); 736 + } 737 + 738 + /* 739 + * Verify buffer against the FCM Controller Data Buffer 740 + */ 741 + static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 742 + { 743 + struct nand_chip *chip = mtd->priv; 744 + struct fsl_elbc_mtd *priv = chip->priv; 745 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 746 + int i; 747 + 748 + if (len < 0) { 749 + dev_err(ctrl->dev, "write_buf of %d bytes", len); 750 + return -EINVAL; 751 + } 752 + 753 + if ((unsigned int)len > ctrl->read_bytes - ctrl->index) { 754 + dev_err(ctrl->dev, 755 + "verify_buf beyond end of buffer " 756 + "(%d requested, %u available)\n", 757 + len, ctrl->read_bytes - ctrl->index); 758 + 759 + ctrl->index = ctrl->read_bytes; 760 + return -EINVAL; 761 + } 762 + 763 + for (i = 0; i < len; i++) 764 + if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i]) 765 + break; 766 + 767 + ctrl->index += len; 768 + return i == len && ctrl->status == LTESR_CC ? 0 : -EIO; 769 + } 770 + 771 + /* This function is called after Program and Erase Operations to 772 + * check for success or failure. 773 + */ 774 + static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) 775 + { 776 + struct fsl_elbc_mtd *priv = chip->priv; 777 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 778 + struct elbc_regs __iomem *lbc = ctrl->regs; 779 + 780 + if (ctrl->status != LTESR_CC) 781 + return NAND_STATUS_FAIL; 782 + 783 + /* Use READ_STATUS command, but wait for the device to be ready */ 784 + ctrl->use_mdr = 0; 785 + out_be32(&lbc->fir, 786 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 787 + (FIR_OP_RBW << FIR_OP1_SHIFT)); 788 + out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); 789 + out_be32(&lbc->fbcr, 1); 790 + set_addr(mtd, 0, 0, 0); 791 + ctrl->read_bytes = 1; 792 + 793 + fsl_elbc_run_command(mtd); 794 + 795 + if (ctrl->status != LTESR_CC) 796 + return NAND_STATUS_FAIL; 797 + 798 + /* The chip always seems to report that it is 799 + * write-protected, even when it is not. 800 + */ 801 + setbits8(ctrl->addr, NAND_STATUS_WP); 802 + return fsl_elbc_read_byte(mtd); 803 + } 804 + 805 + static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) 806 + { 807 + struct nand_chip *chip = mtd->priv; 808 + struct fsl_elbc_mtd *priv = chip->priv; 809 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 810 + struct elbc_regs __iomem *lbc = ctrl->regs; 811 + unsigned int al; 812 + 813 + /* calculate FMR Address Length field */ 814 + al = 0; 815 + if (chip->pagemask & 0xffff0000) 816 + al++; 817 + if (chip->pagemask & 0xff000000) 818 + al++; 819 + 820 + /* add to ECCM mode set in fsl_elbc_init */ 821 + priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */ 822 + (al << FMR_AL_SHIFT); 823 + 824 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n", 825 + chip->numchips); 826 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n", 827 + chip->chipsize); 828 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n", 829 + chip->pagemask); 830 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n", 831 + chip->chip_delay); 832 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n", 833 + chip->badblockpos); 834 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n", 835 + chip->chip_shift); 836 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n", 837 + chip->page_shift); 838 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n", 839 + chip->phys_erase_shift); 840 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n", 841 + chip->ecclayout); 842 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n", 843 + chip->ecc.mode); 844 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n", 845 + chip->ecc.steps); 846 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n", 847 + chip->ecc.bytes); 848 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n", 849 + chip->ecc.total); 850 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", 851 + chip->ecc.layout); 852 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); 853 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size); 854 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n", 855 + mtd->erasesize); 856 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n", 857 + mtd->writesize); 858 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n", 859 + mtd->oobsize); 860 + 861 + /* adjust Option Register and ECC to match Flash page size */ 862 + if (mtd->writesize == 512) { 863 + priv->page_size = 0; 864 + clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS); 865 + } else if (mtd->writesize == 2048) { 866 + priv->page_size = 1; 867 + setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); 868 + /* adjust ecc setup if needed */ 869 + if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 870 + BR_DECC_CHK_GEN) { 871 + chip->ecc.size = 512; 872 + chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 873 + &fsl_elbc_oob_lp_eccm1 : 874 + &fsl_elbc_oob_lp_eccm0; 875 + mtd->ecclayout = chip->ecc.layout; 876 + mtd->oobavail = chip->ecc.layout->oobavail; 877 + } 878 + } else { 879 + dev_err(ctrl->dev, 880 + "fsl_elbc_init: page size %d is not supported\n", 881 + mtd->writesize); 882 + return -1; 883 + } 884 + 885 + /* The default u-boot configuration on MPC8313ERDB causes errors; 886 + * more delay is needed. This should be safe for other boards 887 + * as well. 888 + */ 889 + setbits32(&lbc->bank[priv->bank].or, 0x70); 890 + return 0; 891 + } 892 + 893 + static int fsl_elbc_read_page(struct mtd_info *mtd, 894 + struct nand_chip *chip, 895 + uint8_t *buf) 896 + { 897 + fsl_elbc_read_buf(mtd, buf, mtd->writesize); 898 + fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 899 + 900 + if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) 901 + mtd->ecc_stats.failed++; 902 + 903 + return 0; 904 + } 905 + 906 + /* ECC will be calculated automatically, and errors will be detected in 907 + * waitfunc. 908 + */ 909 + static void fsl_elbc_write_page(struct mtd_info *mtd, 910 + struct nand_chip *chip, 911 + const uint8_t *buf) 912 + { 913 + struct fsl_elbc_mtd *priv = chip->priv; 914 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 915 + 916 + fsl_elbc_write_buf(mtd, buf, mtd->writesize); 917 + fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 918 + 919 + ctrl->oob_poi = chip->oob_poi; 920 + } 921 + 922 + static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 923 + { 924 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 925 + struct elbc_regs __iomem *lbc = ctrl->regs; 926 + struct nand_chip *chip = &priv->chip; 927 + 928 + dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank); 929 + 930 + /* Fill in fsl_elbc_mtd structure */ 931 + priv->mtd.priv = chip; 932 + priv->mtd.owner = THIS_MODULE; 933 + priv->fmr = 0; /* rest filled in later */ 934 + 935 + /* fill in nand_chip structure */ 936 + /* set up function call table */ 937 + chip->read_byte = fsl_elbc_read_byte; 938 + chip->write_buf = fsl_elbc_write_buf; 939 + chip->read_buf = fsl_elbc_read_buf; 940 + chip->verify_buf = fsl_elbc_verify_buf; 941 + chip->select_chip = fsl_elbc_select_chip; 942 + chip->cmdfunc = fsl_elbc_cmdfunc; 943 + chip->waitfunc = fsl_elbc_wait; 944 + 945 + /* set up nand options */ 946 + chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 947 + 948 + chip->controller = &ctrl->controller; 949 + chip->priv = priv; 950 + 951 + chip->ecc.read_page = fsl_elbc_read_page; 952 + chip->ecc.write_page = fsl_elbc_write_page; 953 + 954 + /* If CS Base Register selects full hardware ECC then use it */ 955 + if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 956 + BR_DECC_CHK_GEN) { 957 + chip->ecc.mode = NAND_ECC_HW; 958 + /* put in small page settings and adjust later if needed */ 959 + chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 960 + &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 961 + chip->ecc.size = 512; 962 + chip->ecc.bytes = 3; 963 + } else { 964 + /* otherwise fall back to default software ECC */ 965 + chip->ecc.mode = NAND_ECC_SOFT; 966 + } 967 + 968 + return 0; 969 + } 970 + 971 + static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) 972 + { 973 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 974 + 975 + nand_release(&priv->mtd); 976 + 977 + if (priv->vbase) 978 + iounmap(priv->vbase); 979 + 980 + ctrl->chips[priv->bank] = NULL; 981 + kfree(priv); 982 + 983 + return 0; 984 + } 985 + 986 + static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, 987 + struct device_node *node) 988 + { 989 + struct elbc_regs __iomem *lbc = ctrl->regs; 990 + struct fsl_elbc_mtd *priv; 991 + struct resource res; 992 + #ifdef CONFIG_MTD_PARTITIONS 993 + static const char *part_probe_types[] 994 + = { "cmdlinepart", "RedBoot", NULL }; 995 + struct mtd_partition *parts; 996 + #endif 997 + int ret; 998 + int bank; 999 + 1000 + /* get, allocate and map the memory resource */ 1001 + ret = of_address_to_resource(node, 0, &res); 1002 + if (ret) { 1003 + dev_err(ctrl->dev, "failed to get resource\n"); 1004 + return ret; 1005 + } 1006 + 1007 + /* find which chip select it is connected to */ 1008 + for (bank = 0; bank < MAX_BANKS; bank++) 1009 + if ((in_be32(&lbc->bank[bank].br) & BR_V) && 1010 + (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM && 1011 + (in_be32(&lbc->bank[bank].br) & 1012 + in_be32(&lbc->bank[bank].or) & BR_BA) 1013 + == res.start) 1014 + break; 1015 + 1016 + if (bank >= MAX_BANKS) { 1017 + dev_err(ctrl->dev, "address did not match any chip selects\n"); 1018 + return -ENODEV; 1019 + } 1020 + 1021 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1022 + if (!priv) 1023 + return -ENOMEM; 1024 + 1025 + ctrl->chips[bank] = priv; 1026 + priv->bank = bank; 1027 + priv->ctrl = ctrl; 1028 + priv->dev = ctrl->dev; 1029 + 1030 + priv->vbase = ioremap(res.start, res.end - res.start + 1); 1031 + if (!priv->vbase) { 1032 + dev_err(ctrl->dev, "failed to map chip region\n"); 1033 + ret = -ENOMEM; 1034 + goto err; 1035 + } 1036 + 1037 + ret = fsl_elbc_chip_init(priv); 1038 + if (ret) 1039 + goto err; 1040 + 1041 + ret = nand_scan_ident(&priv->mtd, 1); 1042 + if (ret) 1043 + goto err; 1044 + 1045 + ret = fsl_elbc_chip_init_tail(&priv->mtd); 1046 + if (ret) 1047 + goto err; 1048 + 1049 + ret = nand_scan_tail(&priv->mtd); 1050 + if (ret) 1051 + goto err; 1052 + 1053 + #ifdef CONFIG_MTD_PARTITIONS 1054 + /* First look for RedBoot table or partitions on the command 1055 + * line, these take precedence over device tree information */ 1056 + ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 1057 + if (ret < 0) 1058 + goto err; 1059 + 1060 + #ifdef CONFIG_MTD_OF_PARTS 1061 + if (ret == 0) { 1062 + ret = of_mtd_parse_partitions(priv->dev, &priv->mtd, 1063 + node, &parts); 1064 + if (ret < 0) 1065 + goto err; 1066 + } 1067 + #endif 1068 + 1069 + if (ret > 0) 1070 + add_mtd_partitions(&priv->mtd, parts, ret); 1071 + else 1072 + #endif 1073 + add_mtd_device(&priv->mtd); 1074 + 1075 + printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n", 1076 + res.start, priv->bank); 1077 + return 0; 1078 + 1079 + err: 1080 + fsl_elbc_chip_remove(priv); 1081 + return ret; 1082 + } 1083 + 1084 + static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl) 1085 + { 1086 + struct elbc_regs __iomem *lbc = ctrl->regs; 1087 + 1088 + /* clear event registers */ 1089 + setbits32(&lbc->ltesr, LTESR_NAND_MASK); 1090 + out_be32(&lbc->lteatr, 0); 1091 + 1092 + /* Enable interrupts for any detected events */ 1093 + out_be32(&lbc->lteir, LTESR_NAND_MASK); 1094 + 1095 + ctrl->read_bytes = 0; 1096 + ctrl->index = 0; 1097 + ctrl->addr = NULL; 1098 + 1099 + return 0; 1100 + } 1101 + 1102 + static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev) 1103 + { 1104 + struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev); 1105 + int i; 1106 + 1107 + for (i = 0; i < MAX_BANKS; i++) 1108 + if (ctrl->chips[i]) 1109 + fsl_elbc_chip_remove(ctrl->chips[i]); 1110 + 1111 + if (ctrl->irq) 1112 + free_irq(ctrl->irq, ctrl); 1113 + 1114 + if (ctrl->regs) 1115 + iounmap(ctrl->regs); 1116 + 1117 + dev_set_drvdata(&ofdev->dev, NULL); 1118 + kfree(ctrl); 1119 + return 0; 1120 + } 1121 + 1122 + /* NOTE: This interrupt is also used to report other localbus events, 1123 + * such as transaction errors on other chipselects. If we want to 1124 + * capture those, we'll need to move the IRQ code into a shared 1125 + * LBC driver. 1126 + */ 1127 + 1128 + static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data) 1129 + { 1130 + struct fsl_elbc_ctrl *ctrl = data; 1131 + struct elbc_regs __iomem *lbc = ctrl->regs; 1132 + __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK; 1133 + 1134 + if (status) { 1135 + out_be32(&lbc->ltesr, status); 1136 + out_be32(&lbc->lteatr, 0); 1137 + 1138 + ctrl->irq_status = status; 1139 + smp_wmb(); 1140 + wake_up(&ctrl->irq_wait); 1141 + 1142 + return IRQ_HANDLED; 1143 + } 1144 + 1145 + return IRQ_NONE; 1146 + } 1147 + 1148 + /* fsl_elbc_ctrl_probe 1149 + * 1150 + * called by device layer when it finds a device matching 1151 + * one our driver can handled. This code allocates all of 1152 + * the resources needed for the controller only. The 1153 + * resources for the NAND banks themselves are allocated 1154 + * in the chip probe function. 1155 + */ 1156 + 1157 + static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev, 1158 + const struct of_device_id *match) 1159 + { 1160 + struct device_node *child; 1161 + struct fsl_elbc_ctrl *ctrl; 1162 + int ret; 1163 + 1164 + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1165 + if (!ctrl) 1166 + return -ENOMEM; 1167 + 1168 + dev_set_drvdata(&ofdev->dev, ctrl); 1169 + 1170 + spin_lock_init(&ctrl->controller.lock); 1171 + init_waitqueue_head(&ctrl->controller.wq); 1172 + init_waitqueue_head(&ctrl->irq_wait); 1173 + 1174 + ctrl->regs = of_iomap(ofdev->node, 0); 1175 + if (!ctrl->regs) { 1176 + dev_err(&ofdev->dev, "failed to get memory region\n"); 1177 + ret = -ENODEV; 1178 + goto err; 1179 + } 1180 + 1181 + ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL); 1182 + if (ctrl->irq == NO_IRQ) { 1183 + dev_err(&ofdev->dev, "failed to get irq resource\n"); 1184 + ret = -ENODEV; 1185 + goto err; 1186 + } 1187 + 1188 + ctrl->dev = &ofdev->dev; 1189 + 1190 + ret = fsl_elbc_ctrl_init(ctrl); 1191 + if (ret < 0) 1192 + goto err; 1193 + 1194 + ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl); 1195 + if (ret != 0) { 1196 + dev_err(&ofdev->dev, "failed to install irq (%d)\n", 1197 + ctrl->irq); 1198 + ret = ctrl->irq; 1199 + goto err; 1200 + } 1201 + 1202 + for_each_child_of_node(ofdev->node, child) 1203 + if (of_device_is_compatible(child, "fsl,elbc-fcm-nand")) 1204 + fsl_elbc_chip_probe(ctrl, child); 1205 + 1206 + return 0; 1207 + 1208 + err: 1209 + fsl_elbc_ctrl_remove(ofdev); 1210 + return ret; 1211 + } 1212 + 1213 + static const struct of_device_id fsl_elbc_match[] = { 1214 + { 1215 + .compatible = "fsl,elbc", 1216 + }, 1217 + {} 1218 + }; 1219 + 1220 + static struct of_platform_driver fsl_elbc_ctrl_driver = { 1221 + .driver = { 1222 + .name = "fsl-elbc", 1223 + }, 1224 + .match_table = fsl_elbc_match, 1225 + .probe = fsl_elbc_ctrl_probe, 1226 + .remove = __devexit_p(fsl_elbc_ctrl_remove), 1227 + }; 1228 + 1229 + static int __init fsl_elbc_init(void) 1230 + { 1231 + return of_register_platform_driver(&fsl_elbc_ctrl_driver); 1232 + } 1233 + 1234 + static void __exit fsl_elbc_exit(void) 1235 + { 1236 + of_unregister_platform_driver(&fsl_elbc_ctrl_driver); 1237 + } 1238 + 1239 + module_init(fsl_elbc_init); 1240 + module_exit(fsl_elbc_exit); 1241 + 1242 + MODULE_LICENSE("GPL"); 1243 + MODULE_AUTHOR("Freescale"); 1244 + MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
+6 -2
drivers/mtd/nand/nand_base.c
··· 2469 chip->ecc.write_oob = nand_write_oob_std; 2470 2471 case NAND_ECC_HW_SYNDROME: 2472 - if (!chip->ecc.calculate || !chip->ecc.correct || 2473 - !chip->ecc.hwctl) { 2474 printk(KERN_WARNING "No ECC functions supplied, " 2475 "Hardware ECC not possible\n"); 2476 BUG();
··· 2469 chip->ecc.write_oob = nand_write_oob_std; 2470 2471 case NAND_ECC_HW_SYNDROME: 2472 + if ((!chip->ecc.calculate || !chip->ecc.correct || 2473 + !chip->ecc.hwctl) && 2474 + (!chip->ecc.read_page || 2475 + chip->ecc.read_page == nand_read_page_hwecc || 2476 + !chip->ecc.write_page || 2477 + chip->ecc.write_page == nand_write_page_hwecc)) { 2478 printk(KERN_WARNING "No ECC functions supplied, " 2479 "Hardware ECC not possible\n"); 2480 BUG();
+171
drivers/mtd/nand/orion_nand.c
···
··· 1 + /* 2 + * drivers/mtd/nand/orion_nand.c 3 + * 4 + * NAND support for Marvell Orion SoC platforms 5 + * 6 + * Tzachi Perelstein <tzachi@marvell.com> 7 + * 8 + * This file is licensed under the terms of the GNU General Public 9 + * License version 2. This program is licensed "as is" without any 10 + * warranty of any kind, whether express or implied. 11 + */ 12 + 13 + #include <linux/slab.h> 14 + #include <linux/module.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/mtd/mtd.h> 17 + #include <linux/mtd/nand.h> 18 + #include <linux/mtd/partitions.h> 19 + #include <asm/io.h> 20 + #include <asm/sizes.h> 21 + #include <asm/arch/platform.h> 22 + #include <asm/arch/hardware.h> 23 + 24 + #ifdef CONFIG_MTD_CMDLINE_PARTS 25 + static const char *part_probes[] = { "cmdlinepart", NULL }; 26 + #endif 27 + 28 + static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 29 + { 30 + struct nand_chip *nc = mtd->priv; 31 + struct orion_nand_data *board = nc->priv; 32 + u32 offs; 33 + 34 + if (cmd == NAND_CMD_NONE) 35 + return; 36 + 37 + if (ctrl & NAND_CLE) 38 + offs = (1 << board->cle); 39 + else if (ctrl & NAND_ALE) 40 + offs = (1 << board->ale); 41 + else 42 + return; 43 + 44 + if (nc->options & NAND_BUSWIDTH_16) 45 + offs <<= 1; 46 + 47 + writeb(cmd, nc->IO_ADDR_W + offs); 48 + } 49 + 50 + static int __init orion_nand_probe(struct platform_device *pdev) 51 + { 52 + struct mtd_info *mtd; 53 + struct nand_chip *nc; 54 + struct orion_nand_data *board; 55 + void __iomem *io_base; 56 + int ret = 0; 57 + #ifdef CONFIG_MTD_PARTITIONS 58 + struct mtd_partition *partitions = NULL; 59 + int num_part = 0; 60 + #endif 61 + 62 + nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 63 + if (!nc) { 64 + printk(KERN_ERR "orion_nand: failed to allocate device structure.\n"); 65 + ret = -ENOMEM; 66 + goto no_res; 67 + } 68 + mtd = (struct mtd_info *)(nc + 1); 69 + 70 + io_base = ioremap(pdev->resource[0].start, 71 + pdev->resource[0].end - pdev->resource[0].start + 1); 72 + if (!io_base) { 73 + printk(KERN_ERR "orion_nand: ioremap failed\n"); 74 + ret = -EIO; 75 + goto no_res; 76 + } 77 + 78 + board = pdev->dev.platform_data; 79 + 80 + mtd->priv = nc; 81 + mtd->owner = THIS_MODULE; 82 + 83 + nc->priv = board; 84 + nc->IO_ADDR_R = nc->IO_ADDR_W = io_base; 85 + nc->cmd_ctrl = orion_nand_cmd_ctrl; 86 + nc->ecc.mode = NAND_ECC_SOFT; 87 + 88 + if (board->width == 16) 89 + nc->options |= NAND_BUSWIDTH_16; 90 + 91 + platform_set_drvdata(pdev, mtd); 92 + 93 + if (nand_scan(mtd, 1)) { 94 + ret = -ENXIO; 95 + goto no_dev; 96 + } 97 + 98 + #ifdef CONFIG_MTD_PARTITIONS 99 + #ifdef CONFIG_MTD_CMDLINE_PARTS 100 + mtd->name = "orion_nand"; 101 + num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 102 + #endif 103 + /* If cmdline partitions have been passed, let them be used */ 104 + if (num_part <= 0) { 105 + num_part = board->nr_parts; 106 + partitions = board->parts; 107 + } 108 + 109 + if (partitions && num_part > 0) 110 + ret = add_mtd_partitions(mtd, partitions, num_part); 111 + else 112 + ret = add_mtd_device(mtd); 113 + #else 114 + ret = add_mtd_device(mtd); 115 + #endif 116 + 117 + if (ret) { 118 + nand_release(mtd); 119 + goto no_dev; 120 + } 121 + 122 + return 0; 123 + 124 + no_dev: 125 + platform_set_drvdata(pdev, NULL); 126 + iounmap(io_base); 127 + no_res: 128 + kfree(nc); 129 + 130 + return ret; 131 + } 132 + 133 + static int __devexit orion_nand_remove(struct platform_device *pdev) 134 + { 135 + struct mtd_info *mtd = platform_get_drvdata(pdev); 136 + struct nand_chip *nc = mtd->priv; 137 + 138 + nand_release(mtd); 139 + 140 + iounmap(nc->IO_ADDR_W); 141 + 142 + kfree(nc); 143 + 144 + return 0; 145 + } 146 + 147 + static struct platform_driver orion_nand_driver = { 148 + .probe = orion_nand_probe, 149 + .remove = orion_nand_remove, 150 + .driver = { 151 + .name = "orion_nand", 152 + .owner = THIS_MODULE, 153 + }, 154 + }; 155 + 156 + static int __init orion_nand_init(void) 157 + { 158 + return platform_driver_register(&orion_nand_driver); 159 + } 160 + 161 + static void __exit orion_nand_exit(void) 162 + { 163 + platform_driver_unregister(&orion_nand_driver); 164 + } 165 + 166 + module_init(orion_nand_init); 167 + module_exit(orion_nand_exit); 168 + 169 + MODULE_LICENSE("GPL"); 170 + MODULE_AUTHOR("Tzachi Perelstein"); 171 + MODULE_DESCRIPTION("NAND glue for Orion platforms");
+243
drivers/mtd/nand/pasemi_nand.c
···
··· 1 + /* 2 + * Copyright (C) 2006-2007 PA Semi, Inc 3 + * 4 + * Author: Egor Martovetsky <egor@pasemi.com> 5 + * Maintained by: Olof Johansson <olof@lixom.net> 6 + * 7 + * Driver for the PWRficient onchip NAND flash interface 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the Free Software 20 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 + */ 22 + 23 + #undef DEBUG 24 + 25 + #include <linux/slab.h> 26 + #include <linux/init.h> 27 + #include <linux/module.h> 28 + #include <linux/mtd/mtd.h> 29 + #include <linux/mtd/nand.h> 30 + #include <linux/mtd/nand_ecc.h> 31 + #include <linux/of_platform.h> 32 + #include <linux/platform_device.h> 33 + #include <linux/pci.h> 34 + 35 + #include <asm/io.h> 36 + 37 + #define LBICTRL_LPCCTL_NR 0x00004000 38 + #define CLE_PIN_CTL 15 39 + #define ALE_PIN_CTL 14 40 + 41 + static unsigned int lpcctl; 42 + static struct mtd_info *pasemi_nand_mtd; 43 + static const char driver_name[] = "pasemi-nand"; 44 + 45 + static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len) 46 + { 47 + struct nand_chip *chip = mtd->priv; 48 + 49 + while (len > 0x800) { 50 + memcpy_fromio(buf, chip->IO_ADDR_R, 0x800); 51 + buf += 0x800; 52 + len -= 0x800; 53 + } 54 + memcpy_fromio(buf, chip->IO_ADDR_R, len); 55 + } 56 + 57 + static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 58 + { 59 + struct nand_chip *chip = mtd->priv; 60 + 61 + while (len > 0x800) { 62 + memcpy_toio(chip->IO_ADDR_R, buf, 0x800); 63 + buf += 0x800; 64 + len -= 0x800; 65 + } 66 + memcpy_toio(chip->IO_ADDR_R, buf, len); 67 + } 68 + 69 + static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd, 70 + unsigned int ctrl) 71 + { 72 + struct nand_chip *chip = mtd->priv; 73 + 74 + if (cmd == NAND_CMD_NONE) 75 + return; 76 + 77 + if (ctrl & NAND_CLE) 78 + out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd); 79 + else 80 + out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd); 81 + 82 + /* Push out posted writes */ 83 + eieio(); 84 + inl(lpcctl); 85 + } 86 + 87 + int pasemi_device_ready(struct mtd_info *mtd) 88 + { 89 + return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 90 + } 91 + 92 + static int __devinit pasemi_nand_probe(struct of_device *ofdev, 93 + const struct of_device_id *match) 94 + { 95 + struct pci_dev *pdev; 96 + struct device_node *np = ofdev->node; 97 + struct resource res; 98 + struct nand_chip *chip; 99 + int err = 0; 100 + 101 + err = of_address_to_resource(np, 0, &res); 102 + 103 + if (err) 104 + return -EINVAL; 105 + 106 + /* We only support one device at the moment */ 107 + if (pasemi_nand_mtd) 108 + return -ENODEV; 109 + 110 + pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end); 111 + 112 + /* Allocate memory for MTD device structure and private data */ 113 + pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + 114 + sizeof(struct nand_chip), GFP_KERNEL); 115 + if (!pasemi_nand_mtd) { 116 + printk(KERN_WARNING 117 + "Unable to allocate PASEMI NAND MTD device structure\n"); 118 + err = -ENOMEM; 119 + goto out; 120 + } 121 + 122 + /* Get pointer to private data */ 123 + chip = (struct nand_chip *)&pasemi_nand_mtd[1]; 124 + 125 + /* Link the private data with the MTD structure */ 126 + pasemi_nand_mtd->priv = chip; 127 + pasemi_nand_mtd->owner = THIS_MODULE; 128 + 129 + chip->IO_ADDR_R = of_iomap(np, 0); 130 + chip->IO_ADDR_W = chip->IO_ADDR_R; 131 + 132 + if (!chip->IO_ADDR_R) { 133 + err = -EIO; 134 + goto out_mtd; 135 + } 136 + 137 + pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL); 138 + if (!pdev) { 139 + err = -ENODEV; 140 + goto out_ior; 141 + } 142 + 143 + lpcctl = pci_resource_start(pdev, 0); 144 + 145 + if (!request_region(lpcctl, 4, driver_name)) { 146 + err = -EBUSY; 147 + goto out_ior; 148 + } 149 + 150 + chip->cmd_ctrl = pasemi_hwcontrol; 151 + chip->dev_ready = pasemi_device_ready; 152 + chip->read_buf = pasemi_read_buf; 153 + chip->write_buf = pasemi_write_buf; 154 + chip->chip_delay = 0; 155 + chip->ecc.mode = NAND_ECC_SOFT; 156 + 157 + /* Enable the following for a flash based bad block table */ 158 + chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 159 + 160 + /* Scan to find existance of the device */ 161 + if (nand_scan(pasemi_nand_mtd, 1)) { 162 + err = -ENXIO; 163 + goto out_lpc; 164 + } 165 + 166 + if (add_mtd_device(pasemi_nand_mtd)) { 167 + printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); 168 + err = -ENODEV; 169 + goto out_lpc; 170 + } 171 + 172 + printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n", 173 + res.start, lpcctl); 174 + 175 + return 0; 176 + 177 + out_lpc: 178 + release_region(lpcctl, 4); 179 + out_ior: 180 + iounmap(chip->IO_ADDR_R); 181 + out_mtd: 182 + kfree(pasemi_nand_mtd); 183 + out: 184 + return err; 185 + } 186 + 187 + static int __devexit pasemi_nand_remove(struct of_device *ofdev) 188 + { 189 + struct nand_chip *chip; 190 + 191 + if (!pasemi_nand_mtd) 192 + return 0; 193 + 194 + chip = pasemi_nand_mtd->priv; 195 + 196 + /* Release resources, unregister device */ 197 + nand_release(pasemi_nand_mtd); 198 + 199 + release_region(lpcctl, 4); 200 + 201 + iounmap(chip->IO_ADDR_R); 202 + 203 + /* Free the MTD device structure */ 204 + kfree(pasemi_nand_mtd); 205 + 206 + pasemi_nand_mtd = NULL; 207 + 208 + return 0; 209 + } 210 + 211 + static struct of_device_id pasemi_nand_match[] = 212 + { 213 + { 214 + .compatible = "pasemi,localbus-nand", 215 + }, 216 + {}, 217 + }; 218 + 219 + MODULE_DEVICE_TABLE(of, pasemi_nand_match); 220 + 221 + static struct of_platform_driver pasemi_nand_driver = 222 + { 223 + .name = (char*)driver_name, 224 + .match_table = pasemi_nand_match, 225 + .probe = pasemi_nand_probe, 226 + .remove = pasemi_nand_remove, 227 + }; 228 + 229 + static int __init pasemi_nand_init(void) 230 + { 231 + return of_register_platform_driver(&pasemi_nand_driver); 232 + } 233 + module_init(pasemi_nand_init); 234 + 235 + static void __exit pasemi_nand_exit(void) 236 + { 237 + of_unregister_platform_driver(&pasemi_nand_driver); 238 + } 239 + module_exit(pasemi_nand_exit); 240 + 241 + MODULE_LICENSE("GPL"); 242 + MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 243 + MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
+2
drivers/mtd/nand/plat_nand.c
··· 110 static int __devexit plat_nand_remove(struct platform_device *pdev) 111 { 112 struct plat_nand_data *data = platform_get_drvdata(pdev); 113 struct platform_nand_data *pdata = pdev->dev.platform_data; 114 115 nand_release(&data->mtd); 116 #ifdef CONFIG_MTD_PARTITIONS
··· 110 static int __devexit plat_nand_remove(struct platform_device *pdev) 111 { 112 struct plat_nand_data *data = platform_get_drvdata(pdev); 113 + #ifdef CONFIG_MTD_PARTITIONS 114 struct platform_nand_data *pdata = pdev->dev.platform_data; 115 + #endif 116 117 nand_release(&data->mtd); 118 #ifdef CONFIG_MTD_PARTITIONS
+33 -15
drivers/mtd/nand/s3c2410.c
··· 120 int sel_bit; 121 int mtd_count; 122 123 enum s3c_cpu_type cpu_type; 124 }; 125 ··· 366 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 367 /* calculate the bit position of the error */ 368 369 - bit = (diff2 >> 2) & 1; 370 - bit |= (diff2 >> 3) & 2; 371 - bit |= (diff2 >> 4) & 4; 372 373 /* calculate the byte position of the error */ 374 375 - byte = (diff1 << 1) & 0x80; 376 - byte |= (diff1 << 2) & 0x40; 377 - byte |= (diff1 << 3) & 0x20; 378 - byte |= (diff1 << 4) & 0x10; 379 - 380 - byte |= (diff0 >> 3) & 0x08; 381 - byte |= (diff0 >> 2) & 0x04; 382 - byte |= (diff0 >> 1) & 0x02; 383 - byte |= (diff0 >> 0) & 0x01; 384 - 385 - byte |= (diff2 << 8) & 0x100; 386 387 dev_dbg(info->device, "correcting error bit %d, byte %d\n", 388 bit, byte); ··· 399 if ((diff0 & ~(1<<fls(diff0))) == 0) 400 return 1; 401 402 - return 0; 403 } 404 405 /* ECC functions ··· 810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 811 812 if (info) { 813 if (!allow_clk_stop(info)) 814 clk_disable(info->clk); 815 } ··· 830 static int s3c24xx_nand_resume(struct platform_device *dev) 831 { 832 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 833 834 if (info) { 835 clk_enable(info->clk); 836 s3c2410_nand_inithw(info, dev); 837 838 if (allow_clk_stop(info)) 839 clk_disable(info->clk);
··· 120 int sel_bit; 121 int mtd_count; 122 123 + unsigned long save_nfconf; 124 + 125 enum s3c_cpu_type cpu_type; 126 }; 127 ··· 364 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 365 /* calculate the bit position of the error */ 366 367 + bit = ((diff2 >> 3) & 1) | 368 + ((diff2 >> 4) & 2) | 369 + ((diff2 >> 5) & 4); 370 371 /* calculate the byte position of the error */ 372 373 + byte = ((diff2 << 7) & 0x100) | 374 + ((diff1 << 0) & 0x80) | 375 + ((diff1 << 1) & 0x40) | 376 + ((diff1 << 2) & 0x20) | 377 + ((diff1 << 3) & 0x10) | 378 + ((diff0 >> 4) & 0x08) | 379 + ((diff0 >> 3) & 0x04) | 380 + ((diff0 >> 2) & 0x02) | 381 + ((diff0 >> 1) & 0x01); 382 383 dev_dbg(info->device, "correcting error bit %d, byte %d\n", 384 bit, byte); ··· 399 if ((diff0 & ~(1<<fls(diff0))) == 0) 400 return 1; 401 402 + return -1; 403 } 404 405 /* ECC functions ··· 810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 811 812 if (info) { 813 + info->save_nfconf = readl(info->regs + S3C2410_NFCONF); 814 + 815 + /* For the moment, we must ensure nFCE is high during 816 + * the time we are suspended. This really should be 817 + * handled by suspending the MTDs we are using, but 818 + * that is currently not the case. */ 819 + 820 + writel(info->save_nfconf | info->sel_bit, 821 + info->regs + S3C2410_NFCONF); 822 + 823 if (!allow_clk_stop(info)) 824 clk_disable(info->clk); 825 } ··· 820 static int s3c24xx_nand_resume(struct platform_device *dev) 821 { 822 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 823 + unsigned long nfconf; 824 825 if (info) { 826 clk_enable(info->clk); 827 s3c2410_nand_inithw(info, dev); 828 + 829 + /* Restore the state of the nFCE line. */ 830 + 831 + nfconf = readl(info->regs + S3C2410_NFCONF); 832 + nfconf &= ~info->sel_bit; 833 + nfconf |= info->save_nfconf & info->sel_bit; 834 + writel(nfconf, info->regs + S3C2410_NFCONF); 835 836 if (allow_clk_stop(info)) 837 clk_disable(info->clk);
+74
drivers/mtd/ofpart.c
···
··· 1 + /* 2 + * Flash partitions described by the OF (or flattened) device tree 3 + * 4 + * Copyright (C) 2006 MontaVista Software Inc. 5 + * Author: Vitaly Wool <vwool@ru.mvista.com> 6 + * 7 + * Revised to handle newer style flash binding by: 8 + * Copyright (C) 2007 David Gibson, IBM Corporation. 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the 12 + * Free Software Foundation; either version 2 of the License, or (at your 13 + * option) any later version. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/init.h> 18 + #include <linux/of.h> 19 + #include <linux/mtd/mtd.h> 20 + #include <linux/mtd/partitions.h> 21 + 22 + int __devinit of_mtd_parse_partitions(struct device *dev, 23 + struct mtd_info *mtd, 24 + struct device_node *node, 25 + struct mtd_partition **pparts) 26 + { 27 + const char *partname; 28 + struct device_node *pp; 29 + int nr_parts, i; 30 + 31 + /* First count the subnodes */ 32 + pp = NULL; 33 + nr_parts = 0; 34 + while ((pp = of_get_next_child(node, pp))) 35 + nr_parts++; 36 + 37 + if (nr_parts == 0) 38 + return 0; 39 + 40 + *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL); 41 + if (!*pparts) 42 + return -ENOMEM; 43 + 44 + pp = NULL; 45 + i = 0; 46 + while ((pp = of_get_next_child(node, pp))) { 47 + const u32 *reg; 48 + int len; 49 + 50 + reg = of_get_property(pp, "reg", &len); 51 + if (!reg || (len != 2 * sizeof(u32))) { 52 + of_node_put(pp); 53 + dev_err(dev, "Invalid 'reg' on %s\n", node->full_name); 54 + kfree(*pparts); 55 + *pparts = NULL; 56 + return -EINVAL; 57 + } 58 + (*pparts)[i].offset = reg[0]; 59 + (*pparts)[i].size = reg[1]; 60 + 61 + partname = of_get_property(pp, "label", &len); 62 + if (!partname) 63 + partname = of_get_property(pp, "name", &len); 64 + (*pparts)[i].name = (char *)partname; 65 + 66 + if (of_get_property(pp, "read-only", &len)) 67 + (*pparts)[i].mask_flags = MTD_WRITEABLE; 68 + 69 + i++; 70 + } 71 + 72 + return nr_parts; 73 + } 74 + EXPORT_SYMBOL(of_mtd_parse_partitions);
+158 -41
drivers/mtd/onenand/onenand_base.c
··· 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/mtd/mtd.h> ··· 171 } 172 173 /** 174 * onenand_command - [DEFAULT] Send command to OneNAND device 175 * @param mtd MTD device structure 176 * @param cmd the command to be sent ··· 195 static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) 196 { 197 struct onenand_chip *this = mtd->priv; 198 - int value, readcmd = 0, block_cmd = 0; 199 - int block, page; 200 201 /* Address translation */ 202 switch (cmd) { ··· 210 case ONENAND_CMD_ERASE: 211 case ONENAND_CMD_BUFFERRAM: 212 case ONENAND_CMD_OTP_ACCESS: 213 - block_cmd = 1; 214 block = (int) (addr >> this->erase_shift); 215 page = -1; 216 break; ··· 251 value = onenand_block_address(this, block); 252 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 253 254 - if (block_cmd) { 255 - /* Select DataRAM for DDP */ 256 - value = onenand_bufferram_address(this, block); 257 - this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 258 - } 259 } 260 261 if (page != -1) { ··· 265 case ONENAND_CMD_READ: 266 case ONENAND_CMD_READOOB: 267 dataram = ONENAND_SET_NEXT_BUFFERRAM(this); 268 - readcmd = 1; 269 break; 270 271 default: ··· 281 /* Write 'BSA, BSC' of DataRAM */ 282 value = onenand_buffer_address(dataram, sectors, count); 283 this->write_word(value, this->base + ONENAND_REG_START_BUFFER); 284 - 285 - if (readcmd) { 286 - /* Select DataRAM for DDP */ 287 - value = onenand_bufferram_address(this, block); 288 - this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 289 - } 290 } 291 292 /* Interrupt clear */ ··· 857 this->command(mtd, ONENAND_CMD_READ, from, writesize); 858 ret = this->wait(mtd, FL_READING); 859 onenand_update_bufferram(mtd, from, !ret); 860 } 861 } 862 ··· 917 /* Now wait for load */ 918 ret = this->wait(mtd, FL_READING); 919 onenand_update_bufferram(mtd, from, !ret); 920 } 921 922 /* ··· 929 ops->retlen = read; 930 ops->oobretlen = oobread; 931 932 - if (mtd->ecc_stats.failed - stats.failed) 933 - return -EBADMSG; 934 - 935 if (ret) 936 return ret; 937 938 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 939 } ··· 950 struct mtd_oob_ops *ops) 951 { 952 struct onenand_chip *this = mtd->priv; 953 int read = 0, thislen, column, oobsize; 954 size_t len = ops->ooblen; 955 mtd_oob_mode_t mode = ops->mode; ··· 984 return -EINVAL; 985 } 986 987 while (read < len) { 988 cond_resched(); 989 ··· 997 onenand_update_bufferram(mtd, from, 0); 998 999 ret = this->wait(mtd, FL_READING); 1000 - /* First copy data and check return value for ECC handling */ 1001 1002 if (mode == MTD_OOB_AUTO) 1003 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1004 else 1005 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1006 - 1007 - if (ret) { 1008 - printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1009 - break; 1010 - } 1011 1012 read += thislen; 1013 ··· 1023 } 1024 1025 ops->oobretlen = read; 1026 - return ret; 1027 } 1028 1029 /** ··· 1120 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1121 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1122 1123 if (ctrl & ONENAND_CTRL_ERROR) { 1124 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl); 1125 - /* Initial bad block case */ 1126 - if (ctrl & ONENAND_CTRL_LOAD) 1127 - return ONENAND_BBT_READ_ERROR; 1128 - return ONENAND_BBT_READ_FATAL_ERROR; 1129 } 1130 1131 if (interrupt & ONENAND_INT_READ) { ··· 1218 static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) 1219 { 1220 struct onenand_chip *this = mtd->priv; 1221 - char oobbuf[64]; 1222 int status, i; 1223 1224 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize); ··· 1227 if (status) 1228 return status; 1229 1230 - this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 1231 for (i = 0; i < mtd->oobsize; i++) 1232 - if (buf[i] != 0xFF && buf[i] != oobbuf[i]) 1233 return -EBADMSG; 1234 1235 return 0; ··· 1284 #endif 1285 1286 #define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0) 1287 1288 /** 1289 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer ··· 1537 } 1538 1539 /* Only check verify write turn on */ 1540 - ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen); 1541 if (ret) { 1542 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1543 break; ··· 1552 to += thislen; 1553 buf += thislen; 1554 } 1555 - 1556 - /* Deselect and wake up anyone waiting on the device */ 1557 - onenand_release_device(mtd); 1558 1559 ops->retlen = written; 1560 ··· 2263 2264 *retlen = 0; 2265 2266 - density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2267 if (density < ONENAND_DEVICE_DENSITY_512Mb) 2268 otp_pages = 20; 2269 else ··· 2414 static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 2415 size_t len) 2416 { 2417 - unsigned char oob_buf[64]; 2418 size_t retlen; 2419 int ret; 2420 ··· 2455 unsigned int density, process; 2456 2457 /* Lock scheme depends on density and process */ 2458 - density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2459 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 2460 2461 /* Lock scheme */ ··· 2504 vcc = device & ONENAND_DEVICE_VCC_MASK; 2505 demuxed = device & ONENAND_DEVICE_IS_DEMUX; 2506 ddp = device & ONENAND_DEVICE_IS_DDP; 2507 - density = device >> ONENAND_DEVICE_DENSITY_SHIFT; 2508 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n", 2509 demuxed ? "" : "Muxed ", 2510 ddp ? "(DDP)" : "", ··· 2596 this->device_id = dev_id; 2597 this->version_id = ver_id; 2598 2599 - density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2600 this->chipsize = (16 << density) << 20; 2601 /* Set density mask. it is used for DDP */ 2602 if (ONENAND_IS_DDP(this)) ··· 2780 mtd->write = onenand_write; 2781 mtd->read_oob = onenand_read_oob; 2782 mtd->write_oob = onenand_write_oob; 2783 #ifdef CONFIG_MTD_ONENAND_OTP 2784 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 2785 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
··· 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 + #include <linux/delay.h> 22 #include <linux/interrupt.h> 23 #include <linux/jiffies.h> 24 #include <linux/mtd/mtd.h> ··· 170 } 171 172 /** 173 + * onenand_get_density - [DEFAULT] Get OneNAND density 174 + * @param dev_id OneNAND device ID 175 + * 176 + * Get OneNAND density from device ID 177 + */ 178 + static inline int onenand_get_density(int dev_id) 179 + { 180 + int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; 181 + return (density & ONENAND_DEVICE_DENSITY_MASK); 182 + } 183 + 184 + /** 185 * onenand_command - [DEFAULT] Send command to OneNAND device 186 * @param mtd MTD device structure 187 * @param cmd the command to be sent ··· 182 static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) 183 { 184 struct onenand_chip *this = mtd->priv; 185 + int value, block, page; 186 187 /* Address translation */ 188 switch (cmd) { ··· 198 case ONENAND_CMD_ERASE: 199 case ONENAND_CMD_BUFFERRAM: 200 case ONENAND_CMD_OTP_ACCESS: 201 block = (int) (addr >> this->erase_shift); 202 page = -1; 203 break; ··· 240 value = onenand_block_address(this, block); 241 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 242 243 + /* Select DataRAM for DDP */ 244 + value = onenand_bufferram_address(this, block); 245 + this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 246 } 247 248 if (page != -1) { ··· 256 case ONENAND_CMD_READ: 257 case ONENAND_CMD_READOOB: 258 dataram = ONENAND_SET_NEXT_BUFFERRAM(this); 259 break; 260 261 default: ··· 273 /* Write 'BSA, BSC' of DataRAM */ 274 value = onenand_buffer_address(dataram, sectors, count); 275 this->write_word(value, this->base + ONENAND_REG_START_BUFFER); 276 } 277 278 /* Interrupt clear */ ··· 855 this->command(mtd, ONENAND_CMD_READ, from, writesize); 856 ret = this->wait(mtd, FL_READING); 857 onenand_update_bufferram(mtd, from, !ret); 858 + if (ret == -EBADMSG) 859 + ret = 0; 860 } 861 } 862 ··· 913 /* Now wait for load */ 914 ret = this->wait(mtd, FL_READING); 915 onenand_update_bufferram(mtd, from, !ret); 916 + if (ret == -EBADMSG) 917 + ret = 0; 918 } 919 920 /* ··· 923 ops->retlen = read; 924 ops->oobretlen = oobread; 925 926 if (ret) 927 return ret; 928 + 929 + if (mtd->ecc_stats.failed - stats.failed) 930 + return -EBADMSG; 931 932 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 933 } ··· 944 struct mtd_oob_ops *ops) 945 { 946 struct onenand_chip *this = mtd->priv; 947 + struct mtd_ecc_stats stats; 948 int read = 0, thislen, column, oobsize; 949 size_t len = ops->ooblen; 950 mtd_oob_mode_t mode = ops->mode; ··· 977 return -EINVAL; 978 } 979 980 + stats = mtd->ecc_stats; 981 + 982 while (read < len) { 983 cond_resched(); 984 ··· 988 onenand_update_bufferram(mtd, from, 0); 989 990 ret = this->wait(mtd, FL_READING); 991 + if (ret && ret != -EBADMSG) { 992 + printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 993 + break; 994 + } 995 996 if (mode == MTD_OOB_AUTO) 997 onenand_transfer_auto_oob(mtd, buf, column, thislen); 998 else 999 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1000 1001 read += thislen; 1002 ··· 1016 } 1017 1018 ops->oobretlen = read; 1019 + 1020 + if (ret) 1021 + return ret; 1022 + 1023 + if (mtd->ecc_stats.failed - stats.failed) 1024 + return -EBADMSG; 1025 + 1026 + return 0; 1027 } 1028 1029 /** ··· 1106 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1107 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1108 1109 + /* Initial bad block case: 0x2400 or 0x0400 */ 1110 if (ctrl & ONENAND_CTRL_ERROR) { 1111 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl); 1112 + return ONENAND_BBT_READ_ERROR; 1113 } 1114 1115 if (interrupt & ONENAND_INT_READ) { ··· 1206 static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) 1207 { 1208 struct onenand_chip *this = mtd->priv; 1209 + u_char *oob_buf = this->oob_buf; 1210 int status, i; 1211 1212 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize); ··· 1215 if (status) 1216 return status; 1217 1218 + this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); 1219 for (i = 0; i < mtd->oobsize; i++) 1220 + if (buf[i] != 0xFF && buf[i] != oob_buf[i]) 1221 return -EBADMSG; 1222 1223 return 0; ··· 1272 #endif 1273 1274 #define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0) 1275 + 1276 + static void onenand_panic_wait(struct mtd_info *mtd) 1277 + { 1278 + struct onenand_chip *this = mtd->priv; 1279 + unsigned int interrupt; 1280 + int i; 1281 + 1282 + for (i = 0; i < 2000; i++) { 1283 + interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1284 + if (interrupt & ONENAND_INT_MASTER) 1285 + break; 1286 + udelay(10); 1287 + } 1288 + } 1289 + 1290 + /** 1291 + * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context 1292 + * @param mtd MTD device structure 1293 + * @param to offset to write to 1294 + * @param len number of bytes to write 1295 + * @param retlen pointer to variable to store the number of written bytes 1296 + * @param buf the data to write 1297 + * 1298 + * Write with ECC 1299 + */ 1300 + static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 1301 + size_t *retlen, const u_char *buf) 1302 + { 1303 + struct onenand_chip *this = mtd->priv; 1304 + int column, subpage; 1305 + int written = 0; 1306 + int ret = 0; 1307 + 1308 + if (this->state == FL_PM_SUSPENDED) 1309 + return -EBUSY; 1310 + 1311 + /* Wait for any existing operation to clear */ 1312 + onenand_panic_wait(mtd); 1313 + 1314 + DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", 1315 + (unsigned int) to, (int) len); 1316 + 1317 + /* Initialize retlen, in case of early exit */ 1318 + *retlen = 0; 1319 + 1320 + /* Do not allow writes past end of device */ 1321 + if (unlikely((to + len) > mtd->size)) { 1322 + printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); 1323 + return -EINVAL; 1324 + } 1325 + 1326 + /* Reject writes, which are not page aligned */ 1327 + if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 1328 + printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1329 + return -EINVAL; 1330 + } 1331 + 1332 + column = to & (mtd->writesize - 1); 1333 + 1334 + /* Loop until all data write */ 1335 + while (written < len) { 1336 + int thislen = min_t(int, mtd->writesize - column, len - written); 1337 + u_char *wbuf = (u_char *) buf; 1338 + 1339 + this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); 1340 + 1341 + /* Partial page write */ 1342 + subpage = thislen < mtd->writesize; 1343 + if (subpage) { 1344 + memset(this->page_buf, 0xff, mtd->writesize); 1345 + memcpy(this->page_buf + column, buf, thislen); 1346 + wbuf = this->page_buf; 1347 + } 1348 + 1349 + this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize); 1350 + this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 1351 + 1352 + this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); 1353 + 1354 + onenand_panic_wait(mtd); 1355 + 1356 + /* In partial page write we don't update bufferram */ 1357 + onenand_update_bufferram(mtd, to, !ret && !subpage); 1358 + if (ONENAND_IS_2PLANE(this)) { 1359 + ONENAND_SET_BUFFERRAM1(this); 1360 + onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage); 1361 + } 1362 + 1363 + if (ret) { 1364 + printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); 1365 + break; 1366 + } 1367 + 1368 + written += thislen; 1369 + 1370 + if (written == len) 1371 + break; 1372 + 1373 + column = 0; 1374 + to += thislen; 1375 + buf += thislen; 1376 + } 1377 + 1378 + *retlen = written; 1379 + return ret; 1380 + } 1381 1382 /** 1383 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer ··· 1419 } 1420 1421 /* Only check verify write turn on */ 1422 + ret = onenand_verify(mtd, buf, to, thislen); 1423 if (ret) { 1424 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1425 break; ··· 1434 to += thislen; 1435 buf += thislen; 1436 } 1437 1438 ops->retlen = written; 1439 ··· 2148 2149 *retlen = 0; 2150 2151 + density = onenand_get_density(this->device_id); 2152 if (density < ONENAND_DEVICE_DENSITY_512Mb) 2153 otp_pages = 20; 2154 else ··· 2299 static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 2300 size_t len) 2301 { 2302 + struct onenand_chip *this = mtd->priv; 2303 + u_char *oob_buf = this->oob_buf; 2304 size_t retlen; 2305 int ret; 2306 ··· 2339 unsigned int density, process; 2340 2341 /* Lock scheme depends on density and process */ 2342 + density = onenand_get_density(this->device_id); 2343 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 2344 2345 /* Lock scheme */ ··· 2388 vcc = device & ONENAND_DEVICE_VCC_MASK; 2389 demuxed = device & ONENAND_DEVICE_IS_DEMUX; 2390 ddp = device & ONENAND_DEVICE_IS_DDP; 2391 + density = onenand_get_density(device); 2392 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n", 2393 demuxed ? "" : "Muxed ", 2394 ddp ? "(DDP)" : "", ··· 2480 this->device_id = dev_id; 2481 this->version_id = ver_id; 2482 2483 + density = onenand_get_density(dev_id); 2484 this->chipsize = (16 << density) << 20; 2485 /* Set density mask. it is used for DDP */ 2486 if (ONENAND_IS_DDP(this)) ··· 2664 mtd->write = onenand_write; 2665 mtd->read_oob = onenand_read_oob; 2666 mtd->write_oob = onenand_write_oob; 2667 + mtd->panic_write = onenand_panic_write; 2668 #ifdef CONFIG_MTD_ONENAND_OTP 2669 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 2670 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
+20 -5
drivers/mtd/redboot.c
··· 59 static char nullstring[] = "unallocated"; 60 #endif 61 62 buf = vmalloc(master->erasesize); 63 64 if (!buf) 65 return -ENOMEM; 66 - 67 - if ( directory < 0 ) 68 - offset = master->size + directory*master->erasesize; 69 - else 70 - offset = directory*master->erasesize; 71 72 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 73 master->name, offset);
··· 59 static char nullstring[] = "unallocated"; 60 #endif 61 62 + if ( directory < 0 ) { 63 + offset = master->size + directory * master->erasesize; 64 + while (master->block_isbad && 65 + master->block_isbad(master, offset)) { 66 + if (!offset) { 67 + nogood: 68 + printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); 69 + return -EIO; 70 + } 71 + offset -= master->erasesize; 72 + } 73 + } else { 74 + offset = directory * master->erasesize; 75 + while (master->block_isbad && 76 + master->block_isbad(master, offset)) { 77 + offset += master->erasesize; 78 + if (offset == master->size) 79 + goto nogood; 80 + } 81 + } 82 buf = vmalloc(master->erasesize); 83 84 if (!buf) 85 return -ENOMEM; 86 87 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 88 master->name, offset);
+504 -198
drivers/mtd/ubi/build.c
··· 21 */ 22 23 /* 24 - * This file includes UBI initialization and building of UBI devices. At the 25 - * moment UBI devices may only be added while UBI is initialized, but dynamic 26 - * device add/remove functionality is planned. Also, at the moment we only 27 - * attach UBI devices by scanning, which will become a bottleneck when flashes 28 - * reach certain large size. Then one may improve UBI and add other methods. 29 */ 30 31 #include <linux/err.h> ··· 38 #include <linux/moduleparam.h> 39 #include <linux/stringify.h> 40 #include <linux/stat.h> 41 #include <linux/log2.h> 42 #include "ubi.h" 43 44 /* Maximum length of the 'mtd=' parameter */ ··· 50 * struct mtd_dev_param - MTD device parameter description data structure. 51 * @name: MTD device name or number string 52 * @vid_hdr_offs: VID header offset 53 - * @data_offs: data offset 54 */ 55 struct mtd_dev_param 56 { 57 char name[MTD_PARAM_LEN_MAX]; 58 int vid_hdr_offs; 59 - int data_offs; 60 }; 61 62 /* Numbers of elements set in the @mtd_dev_param array */ ··· 63 /* MTD devices specification parameters */ 64 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 65 66 - /* Number of UBI devices in system */ 67 - int ubi_devices_cnt; 68 - 69 - /* All UBI devices in system */ 70 - struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 71 - 72 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 73 struct class *ubi_class; 74 75 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 76 static ssize_t ubi_version_show(struct class *class, char *buf) ··· 119 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 120 static struct device_attribute dev_bgt_enabled = 121 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 122 123 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 124 static ssize_t dev_attribute_show(struct device *dev, 125 struct device_attribute *attr, char *buf) 126 { 127 - const struct ubi_device *ubi; 128 129 ubi = container_of(dev, struct ubi_device, dev); 130 - if (attr == &dev_eraseblock_size) 131 - return sprintf(buf, "%d\n", ubi->leb_size); 132 - else if (attr == &dev_avail_eraseblocks) 133 - return sprintf(buf, "%d\n", ubi->avail_pebs); 134 - else if (attr == &dev_total_eraseblocks) 135 - return sprintf(buf, "%d\n", ubi->good_peb_count); 136 - else if (attr == &dev_volumes_count) 137 - return sprintf(buf, "%d\n", ubi->vol_count); 138 - else if (attr == &dev_max_ec) 139 - return sprintf(buf, "%d\n", ubi->max_ec); 140 - else if (attr == &dev_reserved_for_bad) 141 - return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 142 - else if (attr == &dev_bad_peb_count) 143 - return sprintf(buf, "%d\n", ubi->bad_peb_count); 144 - else if (attr == &dev_max_vol_count) 145 - return sprintf(buf, "%d\n", ubi->vtbl_slots); 146 - else if (attr == &dev_min_io_size) 147 - return sprintf(buf, "%d\n", ubi->min_io_size); 148 - else if (attr == &dev_bgt_enabled) 149 - return sprintf(buf, "%d\n", ubi->thread_enabled); 150 - else 151 - BUG(); 152 153 - return 0; 154 } 155 156 /* Fake "release" method for UBI devices */ ··· 280 int err; 281 282 ubi->dev.release = dev_release; 283 - ubi->dev.devt = MKDEV(ubi->major, 0); 284 ubi->dev.class = ubi_class; 285 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 286 err = device_register(&ubi->dev); 287 if (err) 288 - goto out; 289 290 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 291 if (err) 292 - goto out_unregister; 293 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 294 if (err) 295 - goto out_eraseblock_size; 296 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 297 if (err) 298 - goto out_avail_eraseblocks; 299 err = device_create_file(&ubi->dev, &dev_volumes_count); 300 if (err) 301 - goto out_total_eraseblocks; 302 err = device_create_file(&ubi->dev, &dev_max_ec); 303 if (err) 304 - goto out_volumes_count; 305 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 306 if (err) 307 - goto out_volumes_max_ec; 308 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 309 if (err) 310 - goto out_reserved_for_bad; 311 err = device_create_file(&ubi->dev, &dev_max_vol_count); 312 if (err) 313 - goto out_bad_peb_count; 314 err = device_create_file(&ubi->dev, &dev_min_io_size); 315 if (err) 316 - goto out_max_vol_count; 317 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 318 if (err) 319 - goto out_min_io_size; 320 - 321 - return 0; 322 - 323 - out_min_io_size: 324 - device_remove_file(&ubi->dev, &dev_min_io_size); 325 - out_max_vol_count: 326 - device_remove_file(&ubi->dev, &dev_max_vol_count); 327 - out_bad_peb_count: 328 - device_remove_file(&ubi->dev, &dev_bad_peb_count); 329 - out_reserved_for_bad: 330 - device_remove_file(&ubi->dev, &dev_reserved_for_bad); 331 - out_volumes_max_ec: 332 - device_remove_file(&ubi->dev, &dev_max_ec); 333 - out_volumes_count: 334 - device_remove_file(&ubi->dev, &dev_volumes_count); 335 - out_total_eraseblocks: 336 - device_remove_file(&ubi->dev, &dev_total_eraseblocks); 337 - out_avail_eraseblocks: 338 - device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 339 - out_eraseblock_size: 340 - device_remove_file(&ubi->dev, &dev_eraseblock_size); 341 - out_unregister: 342 - device_unregister(&ubi->dev); 343 - out: 344 - ubi_err("failed to initialize sysfs for %s", ubi->ubi_name); 345 return err; 346 } 347 ··· 327 */ 328 static void ubi_sysfs_close(struct ubi_device *ubi) 329 { 330 device_remove_file(&ubi->dev, &dev_bgt_enabled); 331 device_remove_file(&ubi->dev, &dev_min_io_size); 332 device_remove_file(&ubi->dev, &dev_max_vol_count); ··· 351 352 for (i = 0; i < ubi->vtbl_slots; i++) 353 if (ubi->volumes[i]) 354 - ubi_free_volume(ubi, i); 355 } 356 357 /** ··· 365 { 366 int i, err; 367 dev_t dev; 368 - 369 - mutex_init(&ubi->vtbl_mutex); 370 - spin_lock_init(&ubi->volumes_lock); 371 372 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 373 ··· 382 return err; 383 } 384 385 cdev_init(&ubi->cdev, &ubi_cdev_operations); 386 - ubi->major = MAJOR(dev); 387 - dbg_msg("%s major is %u", ubi->ubi_name, ubi->major); 388 ubi->cdev.owner = THIS_MODULE; 389 390 - dev = MKDEV(ubi->major, 0); 391 err = cdev_add(&ubi->cdev, dev, 1); 392 if (err) { 393 - ubi_err("cannot add character device %s", ubi->ubi_name); 394 goto out_unreg; 395 } 396 397 err = ubi_sysfs_init(ubi); 398 if (err) 399 - goto out_cdev; 400 401 for (i = 0; i < ubi->vtbl_slots; i++) 402 if (ubi->volumes[i]) { 403 - err = ubi_add_volume(ubi, i); 404 - if (err) 405 goto out_volumes; 406 } 407 408 return 0; 409 410 out_volumes: 411 kill_volumes(ubi); 412 ubi_sysfs_close(ubi); 413 - out_cdev: 414 cdev_del(&ubi->cdev); 415 out_unreg: 416 - unregister_chrdev_region(MKDEV(ubi->major, 0), 417 - ubi->vtbl_slots + 1); 418 return err; 419 } 420 ··· 428 kill_volumes(ubi); 429 ubi_sysfs_close(ubi); 430 cdev_del(&ubi->cdev); 431 - unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); 432 } 433 434 /** ··· 489 * assumed: 490 * o EC header is always at offset zero - this cannot be changed; 491 * o VID header starts just after the EC header at the closest address 492 - * aligned to @io->@hdrs_min_io_size; 493 * o data starts just after the VID header at the closest address aligned to 494 - * @io->@min_io_size 495 * 496 * This function returns zero in case of success and a negative error code in 497 * case of failure. ··· 512 return -EINVAL; 513 } 514 515 /* 516 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 517 * physical eraseblocks maximum. ··· 532 533 /* Make sure minimal I/O unit is power of 2 */ 534 if (!is_power_of_2(ubi->min_io_size)) { 535 - ubi_err("bad min. I/O unit"); 536 return -EINVAL; 537 } 538 ··· 562 } 563 564 /* Similar for the data offset */ 565 - if (ubi->leb_start == 0) { 566 - ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; 567 - ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 568 - } 569 570 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 571 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); ··· 621 } 622 623 /** 624 - * attach_mtd_dev - attach an MTD device. 625 - * @mtd_dev: MTD device name or number string 626 - * @vid_hdr_offset: VID header offset 627 - * @data_offset: data offset 628 * 629 - * This function attaches an MTD device to UBI. It first treats @mtd_dev as the 630 - * MTD device name, and tries to open it by this name. If it is unable to open, 631 - * it tries to convert @mtd_dev to an integer and open the MTD device by its 632 - * number. Returns zero in case of success and a negative error code in case of 633 - * failure. 634 */ 635 - static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, 636 - int data_offset) 637 { 638 - struct ubi_device *ubi; 639 - struct mtd_info *mtd; 640 - int i, err; 641 642 - mtd = get_mtd_device_nm(mtd_dev); 643 - if (IS_ERR(mtd)) { 644 - int mtd_num; 645 - char *endp; 646 647 - if (PTR_ERR(mtd) != -ENODEV) 648 - return PTR_ERR(mtd); 649 650 /* 651 - * Probably this is not MTD device name but MTD device number - 652 - * check this out. 653 */ 654 - mtd_num = simple_strtoul(mtd_dev, &endp, 0); 655 - if (*endp != '\0' || mtd_dev == endp) { 656 - ubi_err("incorrect MTD device: \"%s\"", mtd_dev); 657 - return -ENODEV; 658 - } 659 - 660 - mtd = get_mtd_device(NULL, mtd_num); 661 - if (IS_ERR(mtd)) 662 - return PTR_ERR(mtd); 663 } 664 665 - /* Check if we already have the same MTD device attached */ 666 - for (i = 0; i < ubi_devices_cnt; i++) 667 - if (ubi_devices[i]->mtd->index == mtd->index) { 668 - ubi_err("mtd%d is already attached to ubi%d", 669 mtd->index, i); 670 - err = -EINVAL; 671 - goto out_mtd; 672 } 673 - 674 - ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), 675 - GFP_KERNEL); 676 - if (!ubi) { 677 - err = -ENOMEM; 678 - goto out_mtd; 679 } 680 681 - ubi->ubi_num = ubi_devices_cnt; 682 ubi->mtd = mtd; 683 - 684 - dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", 685 - ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); 686 - 687 ubi->vid_hdr_offset = vid_hdr_offset; 688 - ubi->leb_start = data_offset; 689 err = io_init(ubi); 690 if (err) 691 goto out_free; 692 693 - mutex_init(&ubi->buf_mutex); 694 ubi->peb_buf1 = vmalloc(ubi->peb_size); 695 if (!ubi->peb_buf1) 696 goto out_free; ··· 783 goto out_free; 784 } 785 786 err = uif_init(ubi); 787 if (err) 788 goto out_detach; 789 790 - ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); 791 - ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); 792 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 793 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 794 ubi->peb_size, ubi->peb_size >> 10); ··· 830 wake_up_process(ubi->bgt_thread); 831 } 832 833 - ubi_devices_cnt += 1; 834 - return 0; 835 836 out_detach: 837 ubi_eba_close(ubi); 838 ubi_wl_close(ubi); ··· 846 vfree(ubi->dbg_peb_buf); 847 #endif 848 kfree(ubi); 849 - out_mtd: 850 - put_mtd_device(mtd); 851 - ubi_devices[ubi_devices_cnt] = NULL; 852 return err; 853 } 854 855 /** 856 - * detach_mtd_dev - detach an MTD device. 857 - * @ubi: UBI device description object 858 */ 859 - static void detach_mtd_dev(struct ubi_device *ubi) 860 { 861 - int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; 862 863 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 864 uif_close(ubi); 865 ubi_eba_close(ubi); 866 ubi_wl_close(ubi); ··· 908 #ifdef CONFIG_MTD_UBI_DEBUG 909 vfree(ubi->dbg_peb_buf); 910 #endif 911 - kfree(ubi_devices[ubi_num]); 912 - ubi_devices[ubi_num] = NULL; 913 - ubi_devices_cnt -= 1; 914 - ubi_assert(ubi_devices_cnt >= 0); 915 - ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 916 } 917 918 static int __init ubi_init(void) ··· 950 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 951 952 if (mtd_devs > UBI_MAX_DEVICES) { 953 - printk("UBI error: too many MTD devices, maximum is %d\n", 954 - UBI_MAX_DEVICES); 955 return -EINVAL; 956 } 957 958 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 959 - if (IS_ERR(ubi_class)) 960 - return PTR_ERR(ubi_class); 961 962 err = class_create_file(ubi_class, &ubi_version); 963 - if (err) 964 goto out_class; 965 966 /* Attach MTD devices */ 967 for (i = 0; i < mtd_devs; i++) { 968 struct mtd_dev_param *p = &mtd_dev_param[i]; 969 970 cond_resched(); 971 - err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); 972 - if (err) 973 goto out_detach; 974 } 975 976 return 0; 977 978 out_detach: 979 for (k = 0; k < i; k++) 980 - detach_mtd_dev(ubi_devices[k]); 981 class_remove_file(ubi_class, &ubi_version); 982 out_class: 983 class_destroy(ubi_class); 984 return err; 985 } 986 module_init(ubi_init); 987 988 static void __exit ubi_exit(void) 989 { 990 - int i, n = ubi_devices_cnt; 991 992 - for (i = 0; i < n; i++) 993 - detach_mtd_dev(ubi_devices[i]); 994 class_remove_file(ubi_class, &ubi_version); 995 class_destroy(ubi_class); 996 } ··· 1060 1061 result = simple_strtoul(str, &endp, 0); 1062 if (str == endp || result < 0) { 1063 - printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1064 return -EINVAL; 1065 } 1066 ··· 1071 case 'M': 1072 result *= 1024; 1073 case 'K': 1074 - case 'k': 1075 result *= 1024; 1076 - if (endp[1] == 'i' && (endp[2] == '\0' || 1077 - endp[2] == 'B' || endp[2] == 'b')) 1078 endp += 2; 1079 case '\0': 1080 break; 1081 default: 1082 - printk("UBI error: incorrect bytes count: \"%s\"\n", str); 1083 return -EINVAL; 1084 } 1085 ··· 1099 struct mtd_dev_param *p; 1100 char buf[MTD_PARAM_LEN_MAX]; 1101 char *pbuf = &buf[0]; 1102 - char *tokens[3] = {NULL, NULL, NULL}; 1103 1104 if (mtd_devs == UBI_MAX_DEVICES) { 1105 - printk("UBI error: too many parameters, max. is %d\n", 1106 UBI_MAX_DEVICES); 1107 return -EINVAL; 1108 } 1109 1110 len = strnlen(val, MTD_PARAM_LEN_MAX); 1111 if (len == MTD_PARAM_LEN_MAX) { 1112 - printk("UBI error: parameter \"%s\" is too long, max. is %d\n", 1113 - val, MTD_PARAM_LEN_MAX); 1114 return -EINVAL; 1115 } 1116 1117 if (len == 0) { 1118 - printk("UBI warning: empty 'mtd=' parameter - ignored\n"); 1119 return 0; 1120 } 1121 ··· 1129 if (buf[len - 1] == '\n') 1130 buf[len - 1] = '\0'; 1131 1132 - for (i = 0; i < 3; i++) 1133 tokens[i] = strsep(&pbuf, ","); 1134 1135 if (pbuf) { 1136 - printk("UBI error: too many arguments at \"%s\"\n", val); 1137 return -EINVAL; 1138 } 1139 ··· 1143 1144 if (tokens[1]) 1145 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1146 - if (tokens[2]) 1147 - p->data_offs = bytes_str_to_int(tokens[2]); 1148 1149 if (p->vid_hdr_offs < 0) 1150 return p->vid_hdr_offs; 1151 - if (p->data_offs < 0) 1152 - return p->data_offs; 1153 1154 mtd_devs += 1; 1155 return 0; ··· 1153 1154 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1155 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1156 - "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " 1157 "Multiple \"mtd\" parameters may be specified.\n" 1158 - "MTD devices may be specified by their number or name. " 1159 - "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " 1160 - "specify UBI VID header position and data starting " 1161 - "position to be used by UBI.\n" 1162 - "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" 1163 - "with name content using VID header offset 1984 and data " 1164 - "start 2048, and MTD device number 4 using default " 1165 - "offsets"); 1166 1167 MODULE_VERSION(__stringify(UBI_VERSION)); 1168 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
··· 21 */ 22 23 /* 24 + * This file includes UBI initialization and building of UBI devices. 25 + * 26 + * When UBI is initialized, it attaches all the MTD devices specified as the 27 + * module load parameters or the kernel boot parameters. If MTD devices were 28 + * specified, UBI does not attach any MTD device, but it is possible to do 29 + * later using the "UBI control device". 30 + * 31 + * At the moment we only attach UBI devices by scanning, which will become a 32 + * bottleneck when flashes reach certain large size. Then one may improve UBI 33 + * and add other methods, although it does not seem to be easy to do. 34 */ 35 36 #include <linux/err.h> ··· 33 #include <linux/moduleparam.h> 34 #include <linux/stringify.h> 35 #include <linux/stat.h> 36 + #include <linux/miscdevice.h> 37 #include <linux/log2.h> 38 + #include <linux/kthread.h> 39 #include "ubi.h" 40 41 /* Maximum length of the 'mtd=' parameter */ ··· 43 * struct mtd_dev_param - MTD device parameter description data structure. 44 * @name: MTD device name or number string 45 * @vid_hdr_offs: VID header offset 46 */ 47 struct mtd_dev_param 48 { 49 char name[MTD_PARAM_LEN_MAX]; 50 int vid_hdr_offs; 51 }; 52 53 /* Numbers of elements set in the @mtd_dev_param array */ ··· 58 /* MTD devices specification parameters */ 59 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 60 61 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 62 struct class *ubi_class; 63 + 64 + /* Slab cache for wear-leveling entries */ 65 + struct kmem_cache *ubi_wl_entry_slab; 66 + 67 + /* UBI control character device */ 68 + static struct miscdevice ubi_ctrl_cdev = { 69 + .minor = MISC_DYNAMIC_MINOR, 70 + .name = "ubi_ctrl", 71 + .fops = &ubi_ctrl_cdev_operations, 72 + }; 73 + 74 + /* All UBI devices in system */ 75 + static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 76 + 77 + /* Serializes UBI devices creations and removals */ 78 + DEFINE_MUTEX(ubi_devices_mutex); 79 + 80 + /* Protects @ubi_devices and @ubi->ref_count */ 81 + static DEFINE_SPINLOCK(ubi_devices_lock); 82 83 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 84 static ssize_t ubi_version_show(struct class *class, char *buf) ··· 101 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 102 static struct device_attribute dev_bgt_enabled = 103 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 104 + static struct device_attribute dev_mtd_num = 105 + __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 106 + 107 + /** 108 + * ubi_get_device - get UBI device. 109 + * @ubi_num: UBI device number 110 + * 111 + * This function returns UBI device description object for UBI device number 112 + * @ubi_num, or %NULL if the device does not exist. This function increases the 113 + * device reference count to prevent removal of the device. In other words, the 114 + * device cannot be removed if its reference count is not zero. 115 + */ 116 + struct ubi_device *ubi_get_device(int ubi_num) 117 + { 118 + struct ubi_device *ubi; 119 + 120 + spin_lock(&ubi_devices_lock); 121 + ubi = ubi_devices[ubi_num]; 122 + if (ubi) { 123 + ubi_assert(ubi->ref_count >= 0); 124 + ubi->ref_count += 1; 125 + get_device(&ubi->dev); 126 + } 127 + spin_unlock(&ubi_devices_lock); 128 + 129 + return ubi; 130 + } 131 + 132 + /** 133 + * ubi_put_device - drop an UBI device reference. 134 + * @ubi: UBI device description object 135 + */ 136 + void ubi_put_device(struct ubi_device *ubi) 137 + { 138 + spin_lock(&ubi_devices_lock); 139 + ubi->ref_count -= 1; 140 + put_device(&ubi->dev); 141 + spin_unlock(&ubi_devices_lock); 142 + } 143 + 144 + /** 145 + * ubi_get_by_major - get UBI device description object by character device 146 + * major number. 147 + * @major: major number 148 + * 149 + * This function is similar to 'ubi_get_device()', but it searches the device 150 + * by its major number. 151 + */ 152 + struct ubi_device *ubi_get_by_major(int major) 153 + { 154 + int i; 155 + struct ubi_device *ubi; 156 + 157 + spin_lock(&ubi_devices_lock); 158 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 159 + ubi = ubi_devices[i]; 160 + if (ubi && MAJOR(ubi->cdev.dev) == major) { 161 + ubi_assert(ubi->ref_count >= 0); 162 + ubi->ref_count += 1; 163 + get_device(&ubi->dev); 164 + spin_unlock(&ubi_devices_lock); 165 + return ubi; 166 + } 167 + } 168 + spin_unlock(&ubi_devices_lock); 169 + 170 + return NULL; 171 + } 172 + 173 + /** 174 + * ubi_major2num - get UBI device number by character device major number. 175 + * @major: major number 176 + * 177 + * This function searches UBI device number object by its major number. If UBI 178 + * device was not found, this function returns -ENODEV, otherwise the UBI device 179 + * number is returned. 180 + */ 181 + int ubi_major2num(int major) 182 + { 183 + int i, ubi_num = -ENODEV; 184 + 185 + spin_lock(&ubi_devices_lock); 186 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 187 + struct ubi_device *ubi = ubi_devices[i]; 188 + 189 + if (ubi && MAJOR(ubi->cdev.dev) == major) { 190 + ubi_num = ubi->ubi_num; 191 + break; 192 + } 193 + } 194 + spin_unlock(&ubi_devices_lock); 195 + 196 + return ubi_num; 197 + } 198 199 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 200 static ssize_t dev_attribute_show(struct device *dev, 201 struct device_attribute *attr, char *buf) 202 { 203 + ssize_t ret; 204 + struct ubi_device *ubi; 205 206 + /* 207 + * The below code looks weird, but it actually makes sense. We get the 208 + * UBI device reference from the contained 'struct ubi_device'. But it 209 + * is unclear if the device was removed or not yet. Indeed, if the 210 + * device was removed before we increased its reference count, 211 + * 'ubi_get_device()' will return -ENODEV and we fail. 212 + * 213 + * Remember, 'struct ubi_device' is freed in the release function, so 214 + * we still can use 'ubi->ubi_num'. 215 + */ 216 ubi = container_of(dev, struct ubi_device, dev); 217 + ubi = ubi_get_device(ubi->ubi_num); 218 + if (!ubi) 219 + return -ENODEV; 220 221 + if (attr == &dev_eraseblock_size) 222 + ret = sprintf(buf, "%d\n", ubi->leb_size); 223 + else if (attr == &dev_avail_eraseblocks) 224 + ret = sprintf(buf, "%d\n", ubi->avail_pebs); 225 + else if (attr == &dev_total_eraseblocks) 226 + ret = sprintf(buf, "%d\n", ubi->good_peb_count); 227 + else if (attr == &dev_volumes_count) 228 + ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 229 + else if (attr == &dev_max_ec) 230 + ret = sprintf(buf, "%d\n", ubi->max_ec); 231 + else if (attr == &dev_reserved_for_bad) 232 + ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 233 + else if (attr == &dev_bad_peb_count) 234 + ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 235 + else if (attr == &dev_max_vol_count) 236 + ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 237 + else if (attr == &dev_min_io_size) 238 + ret = sprintf(buf, "%d\n", ubi->min_io_size); 239 + else if (attr == &dev_bgt_enabled) 240 + ret = sprintf(buf, "%d\n", ubi->thread_enabled); 241 + else if (attr == &dev_mtd_num) 242 + ret = sprintf(buf, "%d\n", ubi->mtd->index); 243 + else 244 + ret = -EINVAL; 245 + 246 + ubi_put_device(ubi); 247 + return ret; 248 } 249 250 /* Fake "release" method for UBI devices */ ··· 150 int err; 151 152 ubi->dev.release = dev_release; 153 + ubi->dev.devt = ubi->cdev.dev; 154 ubi->dev.class = ubi_class; 155 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 156 err = device_register(&ubi->dev); 157 if (err) 158 + return err; 159 160 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 161 if (err) 162 + return err; 163 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 164 if (err) 165 + return err; 166 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 167 if (err) 168 + return err; 169 err = device_create_file(&ubi->dev, &dev_volumes_count); 170 if (err) 171 + return err; 172 err = device_create_file(&ubi->dev, &dev_max_ec); 173 if (err) 174 + return err; 175 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 176 if (err) 177 + return err; 178 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 179 if (err) 180 + return err; 181 err = device_create_file(&ubi->dev, &dev_max_vol_count); 182 if (err) 183 + return err; 184 err = device_create_file(&ubi->dev, &dev_min_io_size); 185 if (err) 186 + return err; 187 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 188 if (err) 189 + return err; 190 + err = device_create_file(&ubi->dev, &dev_mtd_num); 191 return err; 192 } 193 ··· 221 */ 222 static void ubi_sysfs_close(struct ubi_device *ubi) 223 { 224 + device_remove_file(&ubi->dev, &dev_mtd_num); 225 device_remove_file(&ubi->dev, &dev_bgt_enabled); 226 device_remove_file(&ubi->dev, &dev_min_io_size); 227 device_remove_file(&ubi->dev, &dev_max_vol_count); ··· 244 245 for (i = 0; i < ubi->vtbl_slots; i++) 246 if (ubi->volumes[i]) 247 + ubi_free_volume(ubi, ubi->volumes[i]); 248 } 249 250 /** ··· 258 { 259 int i, err; 260 dev_t dev; 261 262 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 263 ··· 278 return err; 279 } 280 281 + ubi_assert(MINOR(dev) == 0); 282 cdev_init(&ubi->cdev, &ubi_cdev_operations); 283 + dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); 284 ubi->cdev.owner = THIS_MODULE; 285 286 err = cdev_add(&ubi->cdev, dev, 1); 287 if (err) { 288 + ubi_err("cannot add character device"); 289 goto out_unreg; 290 } 291 292 err = ubi_sysfs_init(ubi); 293 if (err) 294 + goto out_sysfs; 295 296 for (i = 0; i < ubi->vtbl_slots; i++) 297 if (ubi->volumes[i]) { 298 + err = ubi_add_volume(ubi, ubi->volumes[i]); 299 + if (err) { 300 + ubi_err("cannot add volume %d", i); 301 goto out_volumes; 302 + } 303 } 304 305 return 0; 306 307 out_volumes: 308 kill_volumes(ubi); 309 + out_sysfs: 310 ubi_sysfs_close(ubi); 311 cdev_del(&ubi->cdev); 312 out_unreg: 313 + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 314 + ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 315 return err; 316 } 317 ··· 323 kill_volumes(ubi); 324 ubi_sysfs_close(ubi); 325 cdev_del(&ubi->cdev); 326 + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 327 } 328 329 /** ··· 384 * assumed: 385 * o EC header is always at offset zero - this cannot be changed; 386 * o VID header starts just after the EC header at the closest address 387 + * aligned to @io->hdrs_min_io_size; 388 * o data starts just after the VID header at the closest address aligned to 389 + * @io->min_io_size 390 * 391 * This function returns zero in case of success and a negative error code in 392 * case of failure. ··· 407 return -EINVAL; 408 } 409 410 + if (ubi->vid_hdr_offset < 0) 411 + return -EINVAL; 412 + 413 /* 414 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 415 * physical eraseblocks maximum. ··· 424 425 /* Make sure minimal I/O unit is power of 2 */ 426 if (!is_power_of_2(ubi->min_io_size)) { 427 + ubi_err("min. I/O unit (%d) is not power of 2", 428 + ubi->min_io_size); 429 return -EINVAL; 430 } 431 ··· 453 } 454 455 /* Similar for the data offset */ 456 + ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 457 + ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 458 459 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 460 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); ··· 514 } 515 516 /** 517 + * autoresize - re-size the volume which has the "auto-resize" flag set. 518 + * @ubi: UBI device description object 519 + * @vol_id: ID of the volume to re-size 520 * 521 + * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 522 + * the volume table to the largest possible size. See comments in ubi-header.h 523 + * for more description of the flag. Returns zero in case of success and a 524 + * negative error code in case of failure. 525 */ 526 + static int autoresize(struct ubi_device *ubi, int vol_id) 527 { 528 + struct ubi_volume_desc desc; 529 + struct ubi_volume *vol = ubi->volumes[vol_id]; 530 + int err, old_reserved_pebs = vol->reserved_pebs; 531 532 + /* 533 + * Clear the auto-resize flag in the volume in-memory copy of the 534 + * volume table, and 'ubi_resize_volume()' will propogate this change 535 + * to the flash. 536 + */ 537 + ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 538 539 + if (ubi->avail_pebs == 0) { 540 + struct ubi_vtbl_record vtbl_rec; 541 542 /* 543 + * No avalilable PEBs to re-size the volume, clear the flag on 544 + * flash and exit. 545 */ 546 + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 547 + sizeof(struct ubi_vtbl_record)); 548 + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 549 + if (err) 550 + ubi_err("cannot clean auto-resize flag for volume %d", 551 + vol_id); 552 + } else { 553 + desc.vol = vol; 554 + err = ubi_resize_volume(&desc, 555 + old_reserved_pebs + ubi->avail_pebs); 556 + if (err) 557 + ubi_err("cannot auto-resize volume %d", vol_id); 558 } 559 560 + if (err) 561 + return err; 562 + 563 + ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 564 + vol->name, old_reserved_pebs, vol->reserved_pebs); 565 + return 0; 566 + } 567 + 568 + /** 569 + * ubi_attach_mtd_dev - attach an MTD device. 570 + * @mtd_dev: MTD device description object 571 + * @ubi_num: number to assign to the new UBI device 572 + * @vid_hdr_offset: VID header offset 573 + * 574 + * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 575 + * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 576 + * which case this function finds a vacant device nubert and assings it 577 + * automatically. Returns the new UBI device number in case of success and a 578 + * negative error code in case of failure. 579 + * 580 + * Note, the invocations of this function has to be serialized by the 581 + * @ubi_devices_mutex. 582 + */ 583 + int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 584 + { 585 + struct ubi_device *ubi; 586 + int i, err; 587 + 588 + /* 589 + * Check if we already have the same MTD device attached. 590 + * 591 + * Note, this function assumes that UBI devices creations and deletions 592 + * are serialized, so it does not take the &ubi_devices_lock. 593 + */ 594 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 595 + ubi = ubi_devices[i]; 596 + if (ubi && mtd->index == ubi->mtd->index) { 597 + dbg_err("mtd%d is already attached to ubi%d", 598 mtd->index, i); 599 + return -EEXIST; 600 } 601 } 602 603 + /* 604 + * Make sure this MTD device is not emulated on top of an UBI volume 605 + * already. Well, generally this recursion works fine, but there are 606 + * different problems like the UBI module takes a reference to itself 607 + * by attaching (and thus, opening) the emulated MTD device. This 608 + * results in inability to unload the module. And in general it makes 609 + * no sense to attach emulated MTD devices, so we prohibit this. 610 + */ 611 + if (mtd->type == MTD_UBIVOLUME) { 612 + ubi_err("refuse attaching mtd%d - it is already emulated on " 613 + "top of UBI", mtd->index); 614 + return -EINVAL; 615 + } 616 + 617 + if (ubi_num == UBI_DEV_NUM_AUTO) { 618 + /* Search for an empty slot in the @ubi_devices array */ 619 + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 620 + if (!ubi_devices[ubi_num]) 621 + break; 622 + if (ubi_num == UBI_MAX_DEVICES) { 623 + dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); 624 + return -ENFILE; 625 + } 626 + } else { 627 + if (ubi_num >= UBI_MAX_DEVICES) 628 + return -EINVAL; 629 + 630 + /* Make sure ubi_num is not busy */ 631 + if (ubi_devices[ubi_num]) { 632 + dbg_err("ubi%d already exists", ubi_num); 633 + return -EEXIST; 634 + } 635 + } 636 + 637 + ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 638 + if (!ubi) 639 + return -ENOMEM; 640 + 641 ubi->mtd = mtd; 642 + ubi->ubi_num = ubi_num; 643 ubi->vid_hdr_offset = vid_hdr_offset; 644 + ubi->autoresize_vol_id = -1; 645 + 646 + mutex_init(&ubi->buf_mutex); 647 + mutex_init(&ubi->ckvol_mutex); 648 + mutex_init(&ubi->volumes_mutex); 649 + spin_lock_init(&ubi->volumes_lock); 650 + 651 + dbg_msg("attaching mtd%d to ubi%d: VID header offset %d", 652 + mtd->index, ubi_num, vid_hdr_offset); 653 + 654 err = io_init(ubi); 655 if (err) 656 goto out_free; 657 658 ubi->peb_buf1 = vmalloc(ubi->peb_size); 659 if (!ubi->peb_buf1) 660 goto out_free; ··· 605 goto out_free; 606 } 607 608 + if (ubi->autoresize_vol_id != -1) { 609 + err = autoresize(ubi, ubi->autoresize_vol_id); 610 + if (err) 611 + goto out_detach; 612 + } 613 + 614 err = uif_init(ubi); 615 if (err) 616 goto out_detach; 617 618 + ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 619 + if (IS_ERR(ubi->bgt_thread)) { 620 + err = PTR_ERR(ubi->bgt_thread); 621 + ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 622 + err); 623 + goto out_uif; 624 + } 625 + 626 + ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 627 + ubi_msg("MTD device name: \"%s\"", mtd->name); 628 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 629 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 630 ubi->peb_size, ubi->peb_size >> 10); ··· 638 wake_up_process(ubi->bgt_thread); 639 } 640 641 + ubi_devices[ubi_num] = ubi; 642 + return ubi_num; 643 644 + out_uif: 645 + uif_close(ubi); 646 out_detach: 647 ubi_eba_close(ubi); 648 ubi_wl_close(ubi); ··· 652 vfree(ubi->dbg_peb_buf); 653 #endif 654 kfree(ubi); 655 return err; 656 } 657 658 /** 659 + * ubi_detach_mtd_dev - detach an MTD device. 660 + * @ubi_num: UBI device number to detach from 661 + * @anyway: detach MTD even if device reference count is not zero 662 + * 663 + * This function destroys an UBI device number @ubi_num and detaches the 664 + * underlying MTD device. Returns zero in case of success and %-EBUSY if the 665 + * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 666 + * exist. 667 + * 668 + * Note, the invocations of this function has to be serialized by the 669 + * @ubi_devices_mutex. 670 */ 671 + int ubi_detach_mtd_dev(int ubi_num, int anyway) 672 { 673 + struct ubi_device *ubi; 674 675 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 676 + return -EINVAL; 677 + 678 + spin_lock(&ubi_devices_lock); 679 + ubi = ubi_devices[ubi_num]; 680 + if (!ubi) { 681 + spin_unlock(&ubi_devices_lock); 682 + return -EINVAL; 683 + } 684 + 685 + if (ubi->ref_count) { 686 + if (!anyway) { 687 + spin_unlock(&ubi_devices_lock); 688 + return -EBUSY; 689 + } 690 + /* This may only happen if there is a bug */ 691 + ubi_err("%s reference count %d, destroy anyway", 692 + ubi->ubi_name, ubi->ref_count); 693 + } 694 + ubi_devices[ubi_num] = NULL; 695 + spin_unlock(&ubi_devices_lock); 696 + 697 + ubi_assert(ubi_num == ubi->ubi_num); 698 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 699 + 700 + /* 701 + * Before freeing anything, we have to stop the background thread to 702 + * prevent it from doing anything on this device while we are freeing. 703 + */ 704 + if (ubi->bgt_thread) 705 + kthread_stop(ubi->bgt_thread); 706 + 707 uif_close(ubi); 708 ubi_eba_close(ubi); 709 ubi_wl_close(ubi); ··· 677 #ifdef CONFIG_MTD_UBI_DEBUG 678 vfree(ubi->dbg_peb_buf); 679 #endif 680 + ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 681 + kfree(ubi); 682 + return 0; 683 + } 684 + 685 + /** 686 + * find_mtd_device - open an MTD device by its name or number. 687 + * @mtd_dev: name or number of the device 688 + * 689 + * This function tries to open and MTD device described by @mtd_dev string, 690 + * which is first treated as an ASCII number, and if it is not true, it is 691 + * treated as MTD device name. Returns MTD device description object in case of 692 + * success and a negative error code in case of failure. 693 + */ 694 + static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 695 + { 696 + struct mtd_info *mtd; 697 + int mtd_num; 698 + char *endp; 699 + 700 + mtd_num = simple_strtoul(mtd_dev, &endp, 0); 701 + if (*endp != '\0' || mtd_dev == endp) { 702 + /* 703 + * This does not look like an ASCII integer, probably this is 704 + * MTD device name. 705 + */ 706 + mtd = get_mtd_device_nm(mtd_dev); 707 + } else 708 + mtd = get_mtd_device(NULL, mtd_num); 709 + 710 + return mtd; 711 } 712 713 static int __init ubi_init(void) ··· 693 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 694 695 if (mtd_devs > UBI_MAX_DEVICES) { 696 + printk(KERN_ERR "UBI error: too many MTD devices, " 697 + "maximum is %d\n", UBI_MAX_DEVICES); 698 return -EINVAL; 699 } 700 701 + /* Create base sysfs directory and sysfs files */ 702 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 703 + if (IS_ERR(ubi_class)) { 704 + err = PTR_ERR(ubi_class); 705 + printk(KERN_ERR "UBI error: cannot create UBI class\n"); 706 + goto out; 707 + } 708 709 err = class_create_file(ubi_class, &ubi_version); 710 + if (err) { 711 + printk(KERN_ERR "UBI error: cannot create sysfs file\n"); 712 goto out_class; 713 + } 714 + 715 + err = misc_register(&ubi_ctrl_cdev); 716 + if (err) { 717 + printk(KERN_ERR "UBI error: cannot register device\n"); 718 + goto out_version; 719 + } 720 + 721 + ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 722 + sizeof(struct ubi_wl_entry), 723 + 0, 0, NULL); 724 + if (!ubi_wl_entry_slab) 725 + goto out_dev_unreg; 726 727 /* Attach MTD devices */ 728 for (i = 0; i < mtd_devs; i++) { 729 struct mtd_dev_param *p = &mtd_dev_param[i]; 730 + struct mtd_info *mtd; 731 732 cond_resched(); 733 + 734 + mtd = open_mtd_device(p->name); 735 + if (IS_ERR(mtd)) { 736 + err = PTR_ERR(mtd); 737 goto out_detach; 738 + } 739 + 740 + mutex_lock(&ubi_devices_mutex); 741 + err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 742 + p->vid_hdr_offs); 743 + mutex_unlock(&ubi_devices_mutex); 744 + if (err < 0) { 745 + put_mtd_device(mtd); 746 + printk(KERN_ERR "UBI error: cannot attach %s\n", 747 + p->name); 748 + goto out_detach; 749 + } 750 } 751 752 return 0; 753 754 out_detach: 755 for (k = 0; k < i; k++) 756 + if (ubi_devices[k]) { 757 + mutex_lock(&ubi_devices_mutex); 758 + ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 759 + mutex_unlock(&ubi_devices_mutex); 760 + } 761 + kmem_cache_destroy(ubi_wl_entry_slab); 762 + out_dev_unreg: 763 + misc_deregister(&ubi_ctrl_cdev); 764 + out_version: 765 class_remove_file(ubi_class, &ubi_version); 766 out_class: 767 class_destroy(ubi_class); 768 + out: 769 + printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err); 770 return err; 771 } 772 module_init(ubi_init); 773 774 static void __exit ubi_exit(void) 775 { 776 + int i; 777 778 + for (i = 0; i < UBI_MAX_DEVICES; i++) 779 + if (ubi_devices[i]) { 780 + mutex_lock(&ubi_devices_mutex); 781 + ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 782 + mutex_unlock(&ubi_devices_mutex); 783 + } 784 + kmem_cache_destroy(ubi_wl_entry_slab); 785 + misc_deregister(&ubi_ctrl_cdev); 786 class_remove_file(ubi_class, &ubi_version); 787 class_destroy(ubi_class); 788 } ··· 754 755 result = simple_strtoul(str, &endp, 0); 756 if (str == endp || result < 0) { 757 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 758 + str); 759 return -EINVAL; 760 } 761 ··· 764 case 'M': 765 result *= 1024; 766 case 'K': 767 result *= 1024; 768 + if (endp[1] == 'i' && endp[2] == 'B') 769 endp += 2; 770 case '\0': 771 break; 772 default: 773 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 774 + str); 775 return -EINVAL; 776 } 777 ··· 793 struct mtd_dev_param *p; 794 char buf[MTD_PARAM_LEN_MAX]; 795 char *pbuf = &buf[0]; 796 + char *tokens[2] = {NULL, NULL}; 797 + 798 + if (!val) 799 + return -EINVAL; 800 801 if (mtd_devs == UBI_MAX_DEVICES) { 802 + printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 803 UBI_MAX_DEVICES); 804 return -EINVAL; 805 } 806 807 len = strnlen(val, MTD_PARAM_LEN_MAX); 808 if (len == MTD_PARAM_LEN_MAX) { 809 + printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 810 + "max. is %d\n", val, MTD_PARAM_LEN_MAX); 811 return -EINVAL; 812 } 813 814 if (len == 0) { 815 + printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 816 + "ignored\n"); 817 return 0; 818 } 819 ··· 819 if (buf[len - 1] == '\n') 820 buf[len - 1] = '\0'; 821 822 + for (i = 0; i < 2; i++) 823 tokens[i] = strsep(&pbuf, ","); 824 825 if (pbuf) { 826 + printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 827 + val); 828 return -EINVAL; 829 } 830 ··· 832 833 if (tokens[1]) 834 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 835 836 if (p->vid_hdr_offs < 0) 837 return p->vid_hdr_offs; 838 839 mtd_devs += 1; 840 return 0; ··· 846 847 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 848 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 849 + "mtd=<name|num>[,<vid_hdr_offs>].\n" 850 "Multiple \"mtd\" parameters may be specified.\n" 851 + "MTD devices may be specified by their number or name.\n" 852 + "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 853 + "header position and data starting position to be used " 854 + "by UBI.\n" 855 + "Example: mtd=content,1984 mtd=4 - attach MTD device" 856 + "with name \"content\" using VID header offset 1984, and " 857 + "MTD device number 4 with default VID header offset."); 858 859 MODULE_VERSION(__stringify(UBI_VERSION)); 860 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+181 -63
drivers/mtd/ubi/cdev.c
··· 28 * 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 * character devices. 31 */ 32 33 #include <linux/module.h> ··· 43 #include <asm/uaccess.h> 44 #include <asm/div64.h> 45 #include "ubi.h" 46 - 47 - /* 48 - * Maximum sequence numbers of UBI and volume character device IOCTLs (direct 49 - * logical eraseblock erase is a debug-only feature). 50 - */ 51 - #define UBI_CDEV_IOC_MAX_SEQ 2 52 - #ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO 53 - #define VOL_CDEV_IOC_MAX_SEQ 1 54 - #else 55 - #define VOL_CDEV_IOC_MAX_SEQ 2 56 - #endif 57 - 58 - /** 59 - * major_to_device - get UBI device object by character device major number. 60 - * @major: major number 61 - * 62 - * This function returns a pointer to the UBI device object. 63 - */ 64 - static struct ubi_device *major_to_device(int major) 65 - { 66 - int i; 67 - 68 - for (i = 0; i < ubi_devices_cnt; i++) 69 - if (ubi_devices[i] && ubi_devices[i]->major == major) 70 - return ubi_devices[i]; 71 - BUG(); 72 - return NULL; 73 - } 74 75 /** 76 * get_exclusive - get exclusive access to an UBI volume. ··· 101 static int vol_cdev_open(struct inode *inode, struct file *file) 102 { 103 struct ubi_volume_desc *desc; 104 - const struct ubi_device *ubi = major_to_device(imajor(inode)); 105 - int vol_id = iminor(inode) - 1; 106 - int mode; 107 108 if (file->f_mode & FMODE_WRITE) 109 mode = UBI_READWRITE; ··· 114 115 dbg_msg("open volume %d, mode %d", vol_id, mode); 116 117 - desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); 118 if (IS_ERR(desc)) 119 return PTR_ERR(desc); 120 ··· 132 if (vol->updating) { 133 ubi_warn("update of volume %d not finished, volume is damaged", 134 vol->vol_id); 135 vol->updating = 0; 136 vfree(vol->upd_buf); 137 } 138 ··· 191 struct ubi_volume_desc *desc = file->private_data; 192 struct ubi_volume *vol = desc->vol; 193 struct ubi_device *ubi = vol->ubi; 194 - int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; 195 size_t count_save = count; 196 void *tbuf; 197 uint64_t tmp; 198 199 dbg_msg("read %zd bytes from offset %lld of volume %d", 200 - count, *offp, vol_id); 201 202 if (vol->updating) { 203 dbg_err("updating"); ··· 211 return 0; 212 213 if (vol->corrupted) 214 - dbg_msg("read from corrupted volume %d", vol_id); 215 216 if (*offp + count > vol->used_bytes) 217 count_save = count = vol->used_bytes - *offp; ··· 235 if (off + len >= vol->usable_leb_size) 236 len = vol->usable_leb_size - off; 237 238 - err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); 239 if (err) 240 break; 241 ··· 275 struct ubi_volume_desc *desc = file->private_data; 276 struct ubi_volume *vol = desc->vol; 277 struct ubi_device *ubi = vol->ubi; 278 - int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; 279 size_t count_save = count; 280 char *tbuf; 281 uint64_t tmp; 282 283 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 284 - count, *offp, desc->vol->vol_id); 285 286 if (vol->vol_type == UBI_STATIC_VOLUME) 287 return -EROFS; ··· 325 break; 326 } 327 328 - err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, 329 UBI_UNKNOWN); 330 if (err) 331 break; ··· 358 struct ubi_volume *vol = desc->vol; 359 struct ubi_device *ubi = vol->ubi; 360 361 - if (!vol->updating) 362 return vol_cdev_direct_write(file, buf, count, offp); 363 364 - err = ubi_more_update_data(ubi, vol->vol_id, buf, count); 365 if (err < 0) { 366 - ubi_err("cannot write %zd bytes of update data", count); 367 return err; 368 } 369 370 if (err) { 371 /* 372 - * Update is finished, @err contains number of actually written 373 - * bytes now. 374 */ 375 count = err; 376 377 err = ubi_check_volume(ubi, vol->vol_id); 378 if (err < 0) ··· 398 revoke_exclusive(desc, UBI_READWRITE); 399 } 400 401 - *offp += count; 402 return count; 403 } 404 ··· 442 if (err < 0) 443 break; 444 445 - err = ubi_start_update(ubi, vol->vol_id, bytes); 446 if (bytes == 0) 447 revoke_exclusive(desc, UBI_READWRITE); 448 449 - file->f_pos = 0; 450 break; 451 } 452 ··· 497 break; 498 } 499 500 - if (desc->mode == UBI_READONLY) { 501 err = -EROFS; 502 break; 503 } ··· 508 break; 509 } 510 511 - if (vol->vol_type != UBI_DYNAMIC_VOLUME) { 512 - err = -EROFS; 513 - break; 514 - } 515 - 516 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 517 - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); 518 if (err) 519 break; 520 ··· 606 if (!capable(CAP_SYS_RESOURCE)) 607 return -EPERM; 608 609 - ubi = major_to_device(imajor(inode)); 610 - if (IS_ERR(ubi)) 611 - return PTR_ERR(ubi); 612 613 switch (cmd) { 614 /* Create volume command */ ··· 617 struct ubi_mkvol_req req; 618 619 dbg_msg("create volume"); 620 - err = copy_from_user(&req, argp, 621 - sizeof(struct ubi_mkvol_req)); 622 if (err) { 623 err = -EFAULT; 624 break; ··· 629 630 req.name[req.name_len] = '\0'; 631 632 err = ubi_create_volume(ubi, &req); 633 if (err) 634 break; 635 ··· 660 break; 661 } 662 663 err = ubi_remove_volume(desc); 664 - if (err) 665 - ubi_close_volume(desc); 666 667 break; 668 } 669 ··· 681 struct ubi_rsvol_req req; 682 683 dbg_msg("re-size volume"); 684 - err = copy_from_user(&req, argp, 685 - sizeof(struct ubi_rsvol_req)); 686 if (err) { 687 err = -EFAULT; 688 break; ··· 701 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 702 pebs += tmp; 703 704 err = ubi_resize_volume(desc, pebs); 705 ubi_close_volume(desc); 706 break; 707 } 708 ··· 793 794 return err; 795 } 796 797 /* UBI character device operations */ 798 struct file_operations ubi_cdev_operations = {
··· 28 * 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 * character devices. 31 + * 32 + * Well, there is the third kind of character devices - the UBI control 33 + * character device, which allows to manipulate by UBI devices - create and 34 + * delete them. In other words, it is used for attaching and detaching MTD 35 + * devices. 36 */ 37 38 #include <linux/module.h> ··· 38 #include <asm/uaccess.h> 39 #include <asm/div64.h> 40 #include "ubi.h" 41 42 /** 43 * get_exclusive - get exclusive access to an UBI volume. ··· 124 static int vol_cdev_open(struct inode *inode, struct file *file) 125 { 126 struct ubi_volume_desc *desc; 127 + int vol_id = iminor(inode) - 1, mode, ubi_num; 128 + 129 + ubi_num = ubi_major2num(imajor(inode)); 130 + if (ubi_num < 0) 131 + return ubi_num; 132 133 if (file->f_mode & FMODE_WRITE) 134 mode = UBI_READWRITE; ··· 135 136 dbg_msg("open volume %d, mode %d", vol_id, mode); 137 138 + desc = ubi_open_volume(ubi_num, vol_id, mode); 139 if (IS_ERR(desc)) 140 return PTR_ERR(desc); 141 ··· 153 if (vol->updating) { 154 ubi_warn("update of volume %d not finished, volume is damaged", 155 vol->vol_id); 156 + ubi_assert(!vol->changing_leb); 157 vol->updating = 0; 158 + vfree(vol->upd_buf); 159 + } else if (vol->changing_leb) { 160 + dbg_msg("only %lld of %lld bytes received for atomic LEB change" 161 + " for volume %d:%d, cancel", vol->upd_received, 162 + vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 163 + vol->changing_leb = 0; 164 vfree(vol->upd_buf); 165 } 166 ··· 205 struct ubi_volume_desc *desc = file->private_data; 206 struct ubi_volume *vol = desc->vol; 207 struct ubi_device *ubi = vol->ubi; 208 + int err, lnum, off, len, tbuf_size; 209 size_t count_save = count; 210 void *tbuf; 211 uint64_t tmp; 212 213 dbg_msg("read %zd bytes from offset %lld of volume %d", 214 + count, *offp, vol->vol_id); 215 216 if (vol->updating) { 217 dbg_err("updating"); ··· 225 return 0; 226 227 if (vol->corrupted) 228 + dbg_msg("read from corrupted volume %d", vol->vol_id); 229 230 if (*offp + count > vol->used_bytes) 231 count_save = count = vol->used_bytes - *offp; ··· 249 if (off + len >= vol->usable_leb_size) 250 len = vol->usable_leb_size - off; 251 252 + err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); 253 if (err) 254 break; 255 ··· 289 struct ubi_volume_desc *desc = file->private_data; 290 struct ubi_volume *vol = desc->vol; 291 struct ubi_device *ubi = vol->ubi; 292 + int lnum, off, len, tbuf_size, err = 0; 293 size_t count_save = count; 294 char *tbuf; 295 uint64_t tmp; 296 297 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 298 + count, *offp, vol->vol_id); 299 300 if (vol->vol_type == UBI_STATIC_VOLUME) 301 return -EROFS; ··· 339 break; 340 } 341 342 + err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, 343 UBI_UNKNOWN); 344 if (err) 345 break; ··· 372 struct ubi_volume *vol = desc->vol; 373 struct ubi_device *ubi = vol->ubi; 374 375 + if (!vol->updating && !vol->changing_leb) 376 return vol_cdev_direct_write(file, buf, count, offp); 377 378 + if (vol->updating) 379 + err = ubi_more_update_data(ubi, vol, buf, count); 380 + else 381 + err = ubi_more_leb_change_data(ubi, vol, buf, count); 382 + 383 if (err < 0) { 384 + ubi_err("cannot accept more %zd bytes of data, error %d", 385 + count, err); 386 return err; 387 } 388 389 if (err) { 390 /* 391 + * The operation is finished, @err contains number of actually 392 + * written bytes. 393 */ 394 count = err; 395 + 396 + if (vol->changing_leb) { 397 + revoke_exclusive(desc, UBI_READWRITE); 398 + return count; 399 + } 400 401 err = ubi_check_volume(ubi, vol->vol_id); 402 if (err < 0) ··· 402 revoke_exclusive(desc, UBI_READWRITE); 403 } 404 405 return count; 406 } 407 ··· 447 if (err < 0) 448 break; 449 450 + err = ubi_start_update(ubi, vol, bytes); 451 if (bytes == 0) 452 revoke_exclusive(desc, UBI_READWRITE); 453 + break; 454 + } 455 456 + /* Atomic logical eraseblock change command */ 457 + case UBI_IOCEBCH: 458 + { 459 + struct ubi_leb_change_req req; 460 + 461 + err = copy_from_user(&req, argp, 462 + sizeof(struct ubi_leb_change_req)); 463 + if (err) { 464 + err = -EFAULT; 465 + break; 466 + } 467 + 468 + if (desc->mode == UBI_READONLY || 469 + vol->vol_type == UBI_STATIC_VOLUME) { 470 + err = -EROFS; 471 + break; 472 + } 473 + 474 + /* Validate the request */ 475 + err = -EINVAL; 476 + if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 477 + req.bytes < 0 || req.lnum >= vol->usable_leb_size) 478 + break; 479 + if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && 480 + req.dtype != UBI_UNKNOWN) 481 + break; 482 + 483 + err = get_exclusive(desc); 484 + if (err < 0) 485 + break; 486 + 487 + err = ubi_start_leb_change(ubi, vol, &req); 488 + if (req.bytes == 0) 489 + revoke_exclusive(desc, UBI_READWRITE); 490 break; 491 } 492 ··· 467 break; 468 } 469 470 + if (desc->mode == UBI_READONLY || 471 + vol->vol_type == UBI_STATIC_VOLUME) { 472 err = -EROFS; 473 break; 474 } ··· 477 break; 478 } 479 480 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 481 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 482 if (err) 483 break; 484 ··· 580 if (!capable(CAP_SYS_RESOURCE)) 581 return -EPERM; 582 583 + ubi = ubi_get_by_major(imajor(inode)); 584 + if (!ubi) 585 + return -ENODEV; 586 587 switch (cmd) { 588 /* Create volume command */ ··· 591 struct ubi_mkvol_req req; 592 593 dbg_msg("create volume"); 594 + err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 595 if (err) { 596 err = -EFAULT; 597 break; ··· 604 605 req.name[req.name_len] = '\0'; 606 607 + mutex_lock(&ubi->volumes_mutex); 608 err = ubi_create_volume(ubi, &req); 609 + mutex_unlock(&ubi->volumes_mutex); 610 if (err) 611 break; 612 ··· 633 break; 634 } 635 636 + mutex_lock(&ubi->volumes_mutex); 637 err = ubi_remove_volume(desc); 638 + mutex_unlock(&ubi->volumes_mutex); 639 640 + /* 641 + * The volume is deleted (unless an error occurred), and the 642 + * 'struct ubi_volume' object will be freed when 643 + * 'ubi_close_volume()' will call 'put_device()'. 644 + */ 645 + ubi_close_volume(desc); 646 break; 647 } 648 ··· 648 struct ubi_rsvol_req req; 649 650 dbg_msg("re-size volume"); 651 + err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 652 if (err) { 653 err = -EFAULT; 654 break; ··· 669 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 670 pebs += tmp; 671 672 + mutex_lock(&ubi->volumes_mutex); 673 err = ubi_resize_volume(desc, pebs); 674 + mutex_unlock(&ubi->volumes_mutex); 675 ubi_close_volume(desc); 676 + break; 677 + } 678 + 679 + default: 680 + err = -ENOTTY; 681 + break; 682 + } 683 + 684 + ubi_put_device(ubi); 685 + return err; 686 + } 687 + 688 + static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, 689 + unsigned int cmd, unsigned long arg) 690 + { 691 + int err = 0; 692 + void __user *argp = (void __user *)arg; 693 + 694 + if (!capable(CAP_SYS_RESOURCE)) 695 + return -EPERM; 696 + 697 + switch (cmd) { 698 + /* Attach an MTD device command */ 699 + case UBI_IOCATT: 700 + { 701 + struct ubi_attach_req req; 702 + struct mtd_info *mtd; 703 + 704 + dbg_msg("attach MTD device"); 705 + err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 706 + if (err) { 707 + err = -EFAULT; 708 + break; 709 + } 710 + 711 + if (req.mtd_num < 0 || 712 + (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { 713 + err = -EINVAL; 714 + break; 715 + } 716 + 717 + mtd = get_mtd_device(NULL, req.mtd_num); 718 + if (IS_ERR(mtd)) { 719 + err = PTR_ERR(mtd); 720 + break; 721 + } 722 + 723 + /* 724 + * Note, further request verification is done by 725 + * 'ubi_attach_mtd_dev()'. 726 + */ 727 + mutex_lock(&ubi_devices_mutex); 728 + err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 729 + mutex_unlock(&ubi_devices_mutex); 730 + if (err < 0) 731 + put_mtd_device(mtd); 732 + else 733 + /* @err contains UBI device number */ 734 + err = put_user(err, (__user int32_t *)argp); 735 + 736 + break; 737 + } 738 + 739 + /* Detach an MTD device command */ 740 + case UBI_IOCDET: 741 + { 742 + int ubi_num; 743 + 744 + dbg_msg("dettach MTD device"); 745 + err = get_user(ubi_num, (__user int32_t *)argp); 746 + if (err) { 747 + err = -EFAULT; 748 + break; 749 + } 750 + 751 + mutex_lock(&ubi_devices_mutex); 752 + err = ubi_detach_mtd_dev(ubi_num, 0); 753 + mutex_unlock(&ubi_devices_mutex); 754 break; 755 } 756 ··· 681 682 return err; 683 } 684 + 685 + /* UBI control character device operations */ 686 + struct file_operations ubi_ctrl_cdev_operations = { 687 + .ioctl = ctrl_cdev_ioctl, 688 + .owner = THIS_MODULE, 689 + }; 690 691 /* UBI character device operations */ 692 struct file_operations ubi_cdev_operations = {
+7 -14
drivers/mtd/ubi/debug.h
··· 39 40 #ifdef CONFIG_MTD_UBI_DEBUG_MSG 41 /* Generic debugging message */ 42 - #define dbg_msg(fmt, ...) \ 43 - printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) 44 45 #define ubi_dbg_dump_stack() dump_stack() 46 ··· 77 78 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 79 /* Messages from the eraseblock association unit */ 80 - #define dbg_eba(fmt, ...) \ 81 - printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \ 82 - ##__VA_ARGS__) 83 #else 84 #define dbg_eba(fmt, ...) ({}) 85 #endif 86 87 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 88 /* Messages from the wear-leveling unit */ 89 - #define dbg_wl(fmt, ...) \ 90 - printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \ 91 - ##__VA_ARGS__) 92 #else 93 #define dbg_wl(fmt, ...) ({}) 94 #endif 95 96 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 97 /* Messages from the input/output unit */ 98 - #define dbg_io(fmt, ...) \ 99 - printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \ 100 - ##__VA_ARGS__) 101 #else 102 #define dbg_io(fmt, ...) ({}) 103 #endif 104 105 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 106 /* Initialization and build messages */ 107 - #define dbg_bld(fmt, ...) \ 108 - printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \ 109 - ##__VA_ARGS__) 110 #else 111 #define dbg_bld(fmt, ...) ({}) 112 #endif
··· 39 40 #ifdef CONFIG_MTD_UBI_DEBUG_MSG 41 /* Generic debugging message */ 42 + #define dbg_msg(fmt, ...) \ 43 + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 44 + current->pid, __FUNCTION__, ##__VA_ARGS__) 45 46 #define ubi_dbg_dump_stack() dump_stack() 47 ··· 76 77 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 78 /* Messages from the eraseblock association unit */ 79 + #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 80 #else 81 #define dbg_eba(fmt, ...) ({}) 82 #endif 83 84 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 85 /* Messages from the wear-leveling unit */ 86 + #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 87 #else 88 #define dbg_wl(fmt, ...) ({}) 89 #endif 90 91 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 92 /* Messages from the input/output unit */ 93 + #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 94 #else 95 #define dbg_io(fmt, ...) ({}) 96 #endif 97 98 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 99 /* Initialization and build messages */ 100 + #define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 101 #else 102 #define dbg_bld(fmt, ...) ({}) 103 #endif
+175 -150
drivers/mtd/ubi/eba.c
··· 31 * logical eraseblock it is locked for reading or writing. The per-logical 32 * eraseblock locking is implemented by means of the lock tree. The lock tree 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 34 - * lock tree elements are &struct ltree_entry objects. They are indexed by 35 * (@vol_id, @lnum) pairs. 36 * 37 * EBA also maintains the global sequence counter which is incremented each ··· 48 49 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 50 #define EBA_RESERVED_PEBS 1 51 - 52 - /** 53 - * struct ltree_entry - an entry in the lock tree. 54 - * @rb: links RB-tree nodes 55 - * @vol_id: volume ID of the locked logical eraseblock 56 - * @lnum: locked logical eraseblock number 57 - * @users: how many tasks are using this logical eraseblock or wait for it 58 - * @mutex: read/write mutex to implement read/write access serialization to 59 - * the (@vol_id, @lnum) logical eraseblock 60 - * 61 - * When a logical eraseblock is being locked - corresponding &struct ltree_entry 62 - * object is inserted to the lock tree (@ubi->ltree). 63 - */ 64 - struct ltree_entry { 65 - struct rb_node rb; 66 - int vol_id; 67 - int lnum; 68 - int users; 69 - struct rw_semaphore mutex; 70 - }; 71 - 72 - /* Slab cache for lock-tree entries */ 73 - static struct kmem_cache *ltree_slab; 74 75 /** 76 * next_sqnum - get next sequence number. ··· 78 */ 79 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 80 { 81 - if (vol_id == UBI_LAYOUT_VOL_ID) 82 return UBI_LAYOUT_VOLUME_COMPAT; 83 return 0; 84 } ··· 89 * @vol_id: volume ID 90 * @lnum: logical eraseblock number 91 * 92 - * This function returns a pointer to the corresponding &struct ltree_entry 93 * object if the logical eraseblock is locked and %NULL if it is not. 94 * @ubi->ltree_lock has to be locked. 95 */ 96 - static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 97 - int lnum) 98 { 99 struct rb_node *p; 100 101 p = ubi->ltree.rb_node; 102 while (p) { 103 - struct ltree_entry *le; 104 105 - le = rb_entry(p, struct ltree_entry, rb); 106 107 if (vol_id < le->vol_id) 108 p = p->rb_left; ··· 132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 133 * failed. 134 */ 135 - static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 136 - int lnum) 137 { 138 - struct ltree_entry *le, *le1, *le_free; 139 140 - le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 141 if (!le) 142 return ERR_PTR(-ENOMEM); 143 144 le->vol_id = vol_id; 145 le->lnum = lnum; 146 ··· 168 p = &ubi->ltree.rb_node; 169 while (*p) { 170 parent = *p; 171 - le1 = rb_entry(parent, struct ltree_entry, rb); 172 173 if (vol_id < le1->vol_id) 174 p = &(*p)->rb_left; ··· 190 spin_unlock(&ubi->ltree_lock); 191 192 if (le_free) 193 - kmem_cache_free(ltree_slab, le_free); 194 195 return le; 196 } ··· 206 */ 207 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 208 { 209 - struct ltree_entry *le; 210 211 le = ltree_add_entry(ubi, vol_id, lnum); 212 if (IS_ERR(le)) ··· 224 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 225 { 226 int free = 0; 227 - struct ltree_entry *le; 228 229 spin_lock(&ubi->ltree_lock); 230 le = ltree_lookup(ubi, vol_id, lnum); ··· 238 239 up_read(&le->mutex); 240 if (free) 241 - kmem_cache_free(ltree_slab, le); 242 } 243 244 /** ··· 252 */ 253 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 254 { 255 - struct ltree_entry *le; 256 257 le = ltree_add_entry(ubi, vol_id, lnum); 258 if (IS_ERR(le)) 259 return PTR_ERR(le); 260 down_write(&le->mutex); 261 return 0; 262 } 263 264 /** ··· 308 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 309 { 310 int free; 311 - struct ltree_entry *le; 312 313 spin_lock(&ubi->ltree_lock); 314 le = ltree_lookup(ubi, vol_id, lnum); ··· 323 324 up_write(&le->mutex); 325 if (free) 326 - kmem_cache_free(ltree_slab, le); 327 } 328 329 /** 330 * ubi_eba_unmap_leb - un-map logical eraseblock. 331 * @ubi: UBI device description object 332 - * @vol_id: volume ID 333 * @lnum: logical eraseblock number 334 * 335 * This function un-maps logical eraseblock @lnum and schedules corresponding 336 * physical eraseblock for erasure. Returns zero in case of success and a 337 * negative error code in case of failure. 338 */ 339 - int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 340 { 341 - int idx = vol_id2idx(ubi, vol_id), err, pnum; 342 - struct ubi_volume *vol = ubi->volumes[idx]; 343 344 if (ubi->ro_mode) 345 return -EROFS; ··· 366 /** 367 * ubi_eba_read_leb - read data. 368 * @ubi: UBI device description object 369 - * @vol_id: volume ID 370 * @lnum: logical eraseblock number 371 * @buf: buffer to store the read data 372 * @offset: offset from where to read ··· 382 * returned for any volume type if an ECC error was detected by the MTD device 383 * driver. Other negative error cored may be returned in case of other errors. 384 */ 385 - int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 386 - int offset, int len, int check) 387 { 388 - int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 389 struct ubi_vid_hdr *vid_hdr; 390 - struct ubi_volume *vol = ubi->volumes[idx]; 391 uint32_t uninitialized_var(crc); 392 393 err = leb_read_lock(ubi, vol_id, lnum); ··· 594 /** 595 * ubi_eba_write_leb - write data to dynamic volume. 596 * @ubi: UBI device description object 597 - * @vol_id: volume ID 598 * @lnum: logical eraseblock number 599 * @buf: the data to write 600 * @offset: offset within the logical eraseblock where to write ··· 602 * @dtype: data type 603 * 604 * This function writes data to logical eraseblock @lnum of a dynamic volume 605 - * @vol_id. Returns zero in case of success and a negative error code in case 606 * of failure. In case of error, it is possible that something was still 607 * written to the flash media, but may be some garbage. 608 */ 609 - int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 610 const void *buf, int offset, int len, int dtype) 611 { 612 - int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 613 - struct ubi_volume *vol = ubi->volumes[idx]; 614 struct ubi_vid_hdr *vid_hdr; 615 616 if (ubi->ro_mode) ··· 628 if (err) { 629 ubi_warn("failed to write data to PEB %d", pnum); 630 if (err == -EIO && ubi->bad_allowed) 631 - err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 632 if (err) 633 ubi_ro_mode(ubi); 634 } ··· 672 goto write_error; 673 } 674 675 - err = ubi_io_write_data(ubi, buf, pnum, offset, len); 676 - if (err) { 677 - ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 678 - "PEB %d", len, offset, vol_id, lnum, pnum); 679 - goto write_error; 680 } 681 682 vol->eba_tbl[lnum] = pnum; ··· 717 /** 718 * ubi_eba_write_leb_st - write data to static volume. 719 * @ubi: UBI device description object 720 - * @vol_id: volume ID 721 * @lnum: logical eraseblock number 722 * @buf: data to write 723 * @len: how many bytes to write ··· 725 * @used_ebs: how many logical eraseblocks will this volume contain 726 * 727 * This function writes data to logical eraseblock @lnum of static volume 728 - * @vol_id. The @used_ebs argument should contain total number of logical 729 * eraseblock in this static volume. 730 * 731 * When writing to the last logical eraseblock, the @len argument doesn't have ··· 737 * volumes. This function returns zero in case of success and a negative error 738 * code in case of failure. 739 */ 740 - int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 741 - const void *buf, int len, int dtype, int used_ebs) 742 { 743 - int err, pnum, tries = 0, data_size = len; 744 - int idx = vol_id2idx(ubi, vol_id); 745 - struct ubi_volume *vol = ubi->volumes[idx]; 746 struct ubi_vid_hdr *vid_hdr; 747 uint32_t crc; 748 ··· 837 /* 838 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 839 * @ubi: UBI device description object 840 - * @vol_id: volume ID 841 * @lnum: logical eraseblock number 842 * @buf: data to write 843 * @len: how many bytes to write ··· 852 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 853 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 854 */ 855 - int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 856 - const void *buf, int len, int dtype) 857 { 858 - int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 859 - struct ubi_volume *vol = ubi->volumes[idx]; 860 struct ubi_vid_hdr *vid_hdr; 861 uint32_t crc; 862 863 if (ubi->ro_mode) 864 return -EROFS; 865 866 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 867 if (!vid_hdr) ··· 956 } 957 958 /** 959 - * ltree_entry_ctor - lock tree entries slab cache constructor. 960 - * @obj: the lock-tree entry to construct 961 - * @cache: the lock tree entry slab cache 962 - * @flags: constructor flags 963 - */ 964 - static void ltree_entry_ctor(struct kmem_cache *cache, void *obj) 965 - { 966 - struct ltree_entry *le = obj; 967 - 968 - le->users = 0; 969 - init_rwsem(&le->mutex); 970 - } 971 - 972 - /** 973 * ubi_eba_copy_leb - copy logical eraseblock. 974 * @ubi: UBI device description object 975 * @from: physical eraseblock number from where to copy ··· 964 * 965 * This function copies logical eraseblock from physical eraseblock @from to 966 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 967 - * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 968 - * was canceled because bit-flips were detected at the target PEB, and a 969 - * negative error code in case of failure. 970 */ 971 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 972 struct ubi_vid_hdr *vid_hdr) 973 { 974 - int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 975 struct ubi_volume *vol; 976 uint32_t crc; 977 ··· 989 data_size = aldata_size = 990 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 991 992 - /* 993 - * We do not want anybody to write to this logical eraseblock while we 994 - * are moving it, so we lock it. 995 - */ 996 - err = leb_write_lock(ubi, vol_id, lnum); 997 - if (err) 998 - return err; 999 - 1000 - mutex_lock(&ubi->buf_mutex); 1001 - 1002 - /* 1003 - * But the logical eraseblock might have been put by this time. 1004 - * Cancel if it is true. 1005 - */ 1006 idx = vol_id2idx(ubi, vol_id); 1007 - 1008 - /* 1009 - * We may race with volume deletion/re-size, so we have to hold 1010 - * @ubi->volumes_lock. 1011 - */ 1012 spin_lock(&ubi->volumes_lock); 1013 vol = ubi->volumes[idx]; 1014 if (!vol) { 1015 - dbg_eba("volume %d was removed meanwhile", vol_id); 1016 spin_unlock(&ubi->volumes_lock); 1017 - goto out_unlock; 1018 - } 1019 - 1020 - pnum = vol->eba_tbl[lnum]; 1021 - if (pnum != from) { 1022 - dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1023 - "PEB %d, cancel", vol_id, lnum, from, pnum); 1024 - spin_unlock(&ubi->volumes_lock); 1025 - goto out_unlock; 1026 } 1027 spin_unlock(&ubi->volumes_lock); 1028 1029 - /* OK, now the LEB is locked and we can safely start moving it */ 1030 1031 dbg_eba("read %d bytes of data", aldata_size); 1032 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1033 if (err && err != UBI_IO_BITFLIPS) { 1034 ubi_warn("error %d while reading data from PEB %d", 1035 err, from); 1036 - goto out_unlock; 1037 } 1038 1039 /* ··· 1085 1086 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1087 if (err) 1088 - goto out_unlock; 1089 1090 cond_resched(); 1091 ··· 1094 if (err) { 1095 if (err != UBI_IO_BITFLIPS) 1096 ubi_warn("cannot read VID header back from PEB %d", to); 1097 - goto out_unlock; 1098 } 1099 1100 if (data_size > 0) { 1101 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1102 if (err) 1103 - goto out_unlock; 1104 1105 cond_resched(); 1106 ··· 1116 if (err != UBI_IO_BITFLIPS) 1117 ubi_warn("cannot read data back from PEB %d", 1118 to); 1119 - goto out_unlock; 1120 } 1121 1122 cond_resched(); ··· 1126 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1127 ubi_warn("read data back from PEB %d - it is different", 1128 to); 1129 - goto out_unlock; 1130 } 1131 } 1132 1133 ubi_assert(vol->eba_tbl[lnum] == from); 1134 vol->eba_tbl[lnum] = to; 1135 1136 - out_unlock: 1137 mutex_unlock(&ubi->buf_mutex); 1138 leb_write_unlock(ubi, vol_id, lnum); 1139 return err; 1140 } ··· 1161 spin_lock_init(&ubi->ltree_lock); 1162 mutex_init(&ubi->alc_mutex); 1163 ubi->ltree = RB_ROOT; 1164 - 1165 - if (ubi_devices_cnt == 0) { 1166 - ltree_slab = kmem_cache_create("ubi_ltree_slab", 1167 - sizeof(struct ltree_entry), 0, 1168 - 0, &ltree_entry_ctor); 1169 - if (!ltree_slab) 1170 - return -ENOMEM; 1171 - } 1172 1173 ubi->global_sqnum = si->max_sqnum + 1; 1174 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; ··· 1197 } 1198 } 1199 1200 if (ubi->bad_allowed) { 1201 ubi_calculate_reserved(ubi); 1202 ··· 1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1223 } 1224 1225 - if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1226 - ubi_err("no enough physical eraseblocks (%d, need %d)", 1227 - ubi->avail_pebs, EBA_RESERVED_PEBS); 1228 - err = -ENOSPC; 1229 - goto out_free; 1230 - } 1231 - ubi->avail_pebs -= EBA_RESERVED_PEBS; 1232 - ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1233 - 1234 dbg_eba("EBA unit is initialized"); 1235 return 0; 1236 ··· 1231 continue; 1232 kfree(ubi->volumes[i]->eba_tbl); 1233 } 1234 - if (ubi_devices_cnt == 0) 1235 - kmem_cache_destroy(ltree_slab); 1236 return err; 1237 } 1238 ··· 1249 continue; 1250 kfree(ubi->volumes[i]->eba_tbl); 1251 } 1252 - if (ubi_devices_cnt == 1) 1253 - kmem_cache_destroy(ltree_slab); 1254 }
··· 31 * logical eraseblock it is locked for reading or writing. The per-logical 32 * eraseblock locking is implemented by means of the lock tree. The lock tree 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 34 + * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by 35 * (@vol_id, @lnum) pairs. 36 * 37 * EBA also maintains the global sequence counter which is incremented each ··· 48 49 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 50 #define EBA_RESERVED_PEBS 1 51 52 /** 53 * next_sqnum - get next sequence number. ··· 101 */ 102 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 103 { 104 + if (vol_id == UBI_LAYOUT_VOLUME_ID) 105 return UBI_LAYOUT_VOLUME_COMPAT; 106 return 0; 107 } ··· 112 * @vol_id: volume ID 113 * @lnum: logical eraseblock number 114 * 115 + * This function returns a pointer to the corresponding &struct ubi_ltree_entry 116 * object if the logical eraseblock is locked and %NULL if it is not. 117 * @ubi->ltree_lock has to be locked. 118 */ 119 + static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 120 + int lnum) 121 { 122 struct rb_node *p; 123 124 p = ubi->ltree.rb_node; 125 while (p) { 126 + struct ubi_ltree_entry *le; 127 128 + le = rb_entry(p, struct ubi_ltree_entry, rb); 129 130 if (vol_id < le->vol_id) 131 p = p->rb_left; ··· 155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 156 * failed. 157 */ 158 + static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 159 + int vol_id, int lnum) 160 { 161 + struct ubi_ltree_entry *le, *le1, *le_free; 162 163 + le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 164 if (!le) 165 return ERR_PTR(-ENOMEM); 166 167 + le->users = 0; 168 + init_rwsem(&le->mutex); 169 le->vol_id = vol_id; 170 le->lnum = lnum; 171 ··· 189 p = &ubi->ltree.rb_node; 190 while (*p) { 191 parent = *p; 192 + le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 193 194 if (vol_id < le1->vol_id) 195 p = &(*p)->rb_left; ··· 211 spin_unlock(&ubi->ltree_lock); 212 213 if (le_free) 214 + kfree(le_free); 215 216 return le; 217 } ··· 227 */ 228 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 229 { 230 + struct ubi_ltree_entry *le; 231 232 le = ltree_add_entry(ubi, vol_id, lnum); 233 if (IS_ERR(le)) ··· 245 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 246 { 247 int free = 0; 248 + struct ubi_ltree_entry *le; 249 250 spin_lock(&ubi->ltree_lock); 251 le = ltree_lookup(ubi, vol_id, lnum); ··· 259 260 up_read(&le->mutex); 261 if (free) 262 + kfree(le); 263 } 264 265 /** ··· 273 */ 274 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 275 { 276 + struct ubi_ltree_entry *le; 277 278 le = ltree_add_entry(ubi, vol_id, lnum); 279 if (IS_ERR(le)) 280 return PTR_ERR(le); 281 down_write(&le->mutex); 282 return 0; 283 + } 284 + 285 + /** 286 + * leb_write_lock - lock logical eraseblock for writing. 287 + * @ubi: UBI device description object 288 + * @vol_id: volume ID 289 + * @lnum: logical eraseblock number 290 + * 291 + * This function locks a logical eraseblock for writing if there is no 292 + * contention and does nothing if there is contention. Returns %0 in case of 293 + * success, %1 in case of contention, and and a negative error code in case of 294 + * failure. 295 + */ 296 + static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 297 + { 298 + int free; 299 + struct ubi_ltree_entry *le; 300 + 301 + le = ltree_add_entry(ubi, vol_id, lnum); 302 + if (IS_ERR(le)) 303 + return PTR_ERR(le); 304 + if (down_write_trylock(&le->mutex)) 305 + return 0; 306 + 307 + /* Contention, cancel */ 308 + spin_lock(&ubi->ltree_lock); 309 + le->users -= 1; 310 + ubi_assert(le->users >= 0); 311 + if (le->users == 0) { 312 + rb_erase(&le->rb, &ubi->ltree); 313 + free = 1; 314 + } else 315 + free = 0; 316 + spin_unlock(&ubi->ltree_lock); 317 + if (free) 318 + kfree(le); 319 + 320 + return 1; 321 } 322 323 /** ··· 291 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 292 { 293 int free; 294 + struct ubi_ltree_entry *le; 295 296 spin_lock(&ubi->ltree_lock); 297 le = ltree_lookup(ubi, vol_id, lnum); ··· 306 307 up_write(&le->mutex); 308 if (free) 309 + kfree(le); 310 } 311 312 /** 313 * ubi_eba_unmap_leb - un-map logical eraseblock. 314 * @ubi: UBI device description object 315 + * @vol: volume description object 316 * @lnum: logical eraseblock number 317 * 318 * This function un-maps logical eraseblock @lnum and schedules corresponding 319 * physical eraseblock for erasure. Returns zero in case of success and a 320 * negative error code in case of failure. 321 */ 322 + int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 323 + int lnum) 324 { 325 + int err, pnum, vol_id = vol->vol_id; 326 327 if (ubi->ro_mode) 328 return -EROFS; ··· 349 /** 350 * ubi_eba_read_leb - read data. 351 * @ubi: UBI device description object 352 + * @vol: volume description object 353 * @lnum: logical eraseblock number 354 * @buf: buffer to store the read data 355 * @offset: offset from where to read ··· 365 * returned for any volume type if an ECC error was detected by the MTD device 366 * driver. Other negative error cored may be returned in case of other errors. 367 */ 368 + int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 369 + void *buf, int offset, int len, int check) 370 { 371 + int err, pnum, scrub = 0, vol_id = vol->vol_id; 372 struct ubi_vid_hdr *vid_hdr; 373 uint32_t uninitialized_var(crc); 374 375 err = leb_read_lock(ubi, vol_id, lnum); ··· 578 /** 579 * ubi_eba_write_leb - write data to dynamic volume. 580 * @ubi: UBI device description object 581 + * @vol: volume description object 582 * @lnum: logical eraseblock number 583 * @buf: the data to write 584 * @offset: offset within the logical eraseblock where to write ··· 586 * @dtype: data type 587 * 588 * This function writes data to logical eraseblock @lnum of a dynamic volume 589 + * @vol. Returns zero in case of success and a negative error code in case 590 * of failure. In case of error, it is possible that something was still 591 * written to the flash media, but may be some garbage. 592 */ 593 + int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 594 const void *buf, int offset, int len, int dtype) 595 { 596 + int err, pnum, tries = 0, vol_id = vol->vol_id; 597 struct ubi_vid_hdr *vid_hdr; 598 599 if (ubi->ro_mode) ··· 613 if (err) { 614 ubi_warn("failed to write data to PEB %d", pnum); 615 if (err == -EIO && ubi->bad_allowed) 616 + err = recover_peb(ubi, pnum, vol_id, lnum, buf, 617 + offset, len); 618 if (err) 619 ubi_ro_mode(ubi); 620 } ··· 656 goto write_error; 657 } 658 659 + if (len) { 660 + err = ubi_io_write_data(ubi, buf, pnum, offset, len); 661 + if (err) { 662 + ubi_warn("failed to write %d bytes at offset %d of " 663 + "LEB %d:%d, PEB %d", len, offset, vol_id, 664 + lnum, pnum); 665 + goto write_error; 666 + } 667 } 668 669 vol->eba_tbl[lnum] = pnum; ··· 698 /** 699 * ubi_eba_write_leb_st - write data to static volume. 700 * @ubi: UBI device description object 701 + * @vol: volume description object 702 * @lnum: logical eraseblock number 703 * @buf: data to write 704 * @len: how many bytes to write ··· 706 * @used_ebs: how many logical eraseblocks will this volume contain 707 * 708 * This function writes data to logical eraseblock @lnum of static volume 709 + * @vol. The @used_ebs argument should contain total number of logical 710 * eraseblock in this static volume. 711 * 712 * When writing to the last logical eraseblock, the @len argument doesn't have ··· 718 * volumes. This function returns zero in case of success and a negative error 719 * code in case of failure. 720 */ 721 + int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 722 + int lnum, const void *buf, int len, int dtype, 723 + int used_ebs) 724 { 725 + int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 726 struct ubi_vid_hdr *vid_hdr; 727 uint32_t crc; 728 ··· 819 /* 820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 821 * @ubi: UBI device description object 822 + * @vol: volume description object 823 * @lnum: logical eraseblock number 824 * @buf: data to write 825 * @len: how many bytes to write ··· 834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 836 */ 837 + int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 838 + int lnum, const void *buf, int len, int dtype) 839 { 840 + int err, pnum, tries = 0, vol_id = vol->vol_id; 841 struct ubi_vid_hdr *vid_hdr; 842 uint32_t crc; 843 844 if (ubi->ro_mode) 845 return -EROFS; 846 + 847 + if (len == 0) { 848 + /* 849 + * Special case when data length is zero. In this case the LEB 850 + * has to be unmapped and mapped somewhere else. 851 + */ 852 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 853 + if (err) 854 + return err; 855 + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); 856 + } 857 858 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 859 if (!vid_hdr) ··· 928 } 929 930 /** 931 * ubi_eba_copy_leb - copy logical eraseblock. 932 * @ubi: UBI device description object 933 * @from: physical eraseblock number from where to copy ··· 950 * 951 * This function copies logical eraseblock from physical eraseblock @from to 952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 953 + * function. Returns: 954 + * o %0 in case of success; 955 + * o %1 if the operation was canceled and should be tried later (e.g., 956 + * because a bit-flip was detected at the target PEB); 957 + * o %2 if the volume is being deleted and this LEB should not be moved. 958 */ 959 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 960 struct ubi_vid_hdr *vid_hdr) 961 { 962 + int err, vol_id, lnum, data_size, aldata_size, idx; 963 struct ubi_volume *vol; 964 uint32_t crc; 965 ··· 973 data_size = aldata_size = 974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 975 976 idx = vol_id2idx(ubi, vol_id); 977 spin_lock(&ubi->volumes_lock); 978 + /* 979 + * Note, we may race with volume deletion, which means that the volume 980 + * this logical eraseblock belongs to might be being deleted. Since the 981 + * volume deletion unmaps all the volume's logical eraseblocks, it will 982 + * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 983 + */ 984 vol = ubi->volumes[idx]; 985 if (!vol) { 986 + /* No need to do further work, cancel */ 987 + dbg_eba("volume %d is being removed, cancel", vol_id); 988 spin_unlock(&ubi->volumes_lock); 989 + return 2; 990 } 991 spin_unlock(&ubi->volumes_lock); 992 993 + /* 994 + * We do not want anybody to write to this logical eraseblock while we 995 + * are moving it, so lock it. 996 + * 997 + * Note, we are using non-waiting locking here, because we cannot sleep 998 + * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 999 + * unmapping the LEB which is mapped to the PEB we are going to move 1000 + * (@from). This task locks the LEB and goes sleep in the 1001 + * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1002 + * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1003 + * LEB is already locked, we just do not move it and return %1. 1004 + */ 1005 + err = leb_write_trylock(ubi, vol_id, lnum); 1006 + if (err) { 1007 + dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); 1008 + return err; 1009 + } 1010 1011 + /* 1012 + * The LEB might have been put meanwhile, and the task which put it is 1013 + * probably waiting on @ubi->move_mutex. No need to continue the work, 1014 + * cancel it. 1015 + */ 1016 + if (vol->eba_tbl[lnum] != from) { 1017 + dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1018 + "PEB %d, cancel", vol_id, lnum, from, 1019 + vol->eba_tbl[lnum]); 1020 + err = 1; 1021 + goto out_unlock_leb; 1022 + } 1023 + 1024 + /* 1025 + * OK, now the LEB is locked and we can safely start moving iy. Since 1026 + * this function utilizes thie @ubi->peb1_buf buffer which is shared 1027 + * with some other functions, so lock the buffer by taking the 1028 + * @ubi->buf_mutex. 1029 + */ 1030 + mutex_lock(&ubi->buf_mutex); 1031 dbg_eba("read %d bytes of data", aldata_size); 1032 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1033 if (err && err != UBI_IO_BITFLIPS) { 1034 ubi_warn("error %d while reading data from PEB %d", 1035 err, from); 1036 + goto out_unlock_buf; 1037 } 1038 1039 /* ··· 1053 1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1055 if (err) 1056 + goto out_unlock_buf; 1057 1058 cond_resched(); 1059 ··· 1062 if (err) { 1063 if (err != UBI_IO_BITFLIPS) 1064 ubi_warn("cannot read VID header back from PEB %d", to); 1065 + else 1066 + err = 1; 1067 + goto out_unlock_buf; 1068 } 1069 1070 if (data_size > 0) { 1071 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1072 if (err) 1073 + goto out_unlock_buf; 1074 1075 cond_resched(); 1076 ··· 1082 if (err != UBI_IO_BITFLIPS) 1083 ubi_warn("cannot read data back from PEB %d", 1084 to); 1085 + else 1086 + err = 1; 1087 + goto out_unlock_buf; 1088 } 1089 1090 cond_resched(); ··· 1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1091 ubi_warn("read data back from PEB %d - it is different", 1092 to); 1093 + goto out_unlock_buf; 1094 } 1095 } 1096 1097 ubi_assert(vol->eba_tbl[lnum] == from); 1098 vol->eba_tbl[lnum] = to; 1099 1100 + out_unlock_buf: 1101 mutex_unlock(&ubi->buf_mutex); 1102 + out_unlock_leb: 1103 leb_write_unlock(ubi, vol_id, lnum); 1104 return err; 1105 } ··· 1124 spin_lock_init(&ubi->ltree_lock); 1125 mutex_init(&ubi->alc_mutex); 1126 ubi->ltree = RB_ROOT; 1127 1128 ubi->global_sqnum = si->max_sqnum + 1; 1129 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; ··· 1168 } 1169 } 1170 1171 + if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1172 + ubi_err("no enough physical eraseblocks (%d, need %d)", 1173 + ubi->avail_pebs, EBA_RESERVED_PEBS); 1174 + err = -ENOSPC; 1175 + goto out_free; 1176 + } 1177 + ubi->avail_pebs -= EBA_RESERVED_PEBS; 1178 + ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1179 + 1180 if (ubi->bad_allowed) { 1181 ubi_calculate_reserved(ubi); 1182 ··· 1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1185 } 1186 1187 dbg_eba("EBA unit is initialized"); 1188 return 0; 1189 ··· 1202 continue; 1203 kfree(ubi->volumes[i]->eba_tbl); 1204 } 1205 return err; 1206 } 1207 ··· 1222 continue; 1223 kfree(ubi->volumes[i]->eba_tbl); 1224 } 1225 }
+4 -5
drivers/mtd/ubi/gluebi.c
··· 129 if (to_read > total_read) 130 to_read = total_read; 131 132 - err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, 133 - to_read, 0); 134 if (err) 135 break; 136 ··· 186 if (to_write > total_written) 187 to_write = total_written; 188 189 - err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, 190 - to_write, UBI_UNKNOWN); 191 if (err) 192 break; 193 ··· 236 return -EROFS; 237 238 for (i = 0; i < count; i++) { 239 - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); 240 if (err) 241 goto out_err; 242 }
··· 129 if (to_read > total_read) 130 to_read = total_read; 131 132 + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); 133 if (err) 134 break; 135 ··· 187 if (to_write > total_written) 188 to_write = total_written; 189 190 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, 191 + UBI_UNKNOWN); 192 if (err) 193 break; 194 ··· 237 return -EROFS; 238 239 for (i = 0; i < count; i++) { 240 + err = ubi_eba_unmap_leb(ubi, vol, lnum + i); 241 if (err) 242 goto out_err; 243 }
+10
drivers/mtd/ubi/io.c
··· 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 174 "read %zd bytes", err, len, pnum, offset, read); 175 ubi_dbg_dump_stack(); 176 } else { 177 ubi_assert(len == read); 178
··· 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 174 "read %zd bytes", err, len, pnum, offset, read); 175 ubi_dbg_dump_stack(); 176 + 177 + /* 178 + * The driver should never return -EBADMSG if it failed to read 179 + * all the requested data. But some buggy drivers might do 180 + * this, so we change it to -EIO. 181 + */ 182 + if (read != len && err == -EBADMSG) { 183 + ubi_assert(0); 184 + err = -EIO; 185 + } 186 } else { 187 ubi_assert(len == read); 188
+116 -61
drivers/mtd/ubi/kapi.c
··· 30 * @ubi_num: UBI device number 31 * @di: the information is stored here 32 * 33 - * This function returns %0 in case of success and a %-ENODEV if there is no 34 - * such UBI device. 35 */ 36 int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 37 { 38 - const struct ubi_device *ubi; 39 40 - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || 41 - !ubi_devices[ubi_num]) 42 return -ENODEV; 43 44 - ubi = ubi_devices[ubi_num]; 45 di->ubi_num = ubi->ubi_num; 46 di->leb_size = ubi->leb_size; 47 di->min_io_size = ubi->min_io_size; 48 di->ro_mode = ubi->ro_mode; 49 - di->cdev = MKDEV(ubi->major, 0); 50 return 0; 51 } 52 EXPORT_SYMBOL_GPL(ubi_get_device_info); ··· 77 vi->usable_leb_size = vol->usable_leb_size; 78 vi->name_len = vol->name_len; 79 vi->name = vol->name; 80 - vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); 81 } 82 EXPORT_SYMBOL_GPL(ubi_get_volume_info); 83 ··· 108 109 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 110 111 - err = -ENODEV; 112 - if (ubi_num < 0) 113 - return ERR_PTR(err); 114 115 - ubi = ubi_devices[ubi_num]; 116 - 117 - if (!try_module_get(THIS_MODULE)) 118 - return ERR_PTR(err); 119 - 120 - if (ubi_num >= UBI_MAX_DEVICES || !ubi) 121 - goto out_put; 122 - 123 - err = -EINVAL; 124 - if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 125 - goto out_put; 126 if (mode != UBI_READONLY && mode != UBI_READWRITE && 127 mode != UBI_EXCLUSIVE) 128 - goto out_put; 129 130 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 131 if (!desc) { 132 err = -ENOMEM; 133 - goto out_put; 134 } 135 136 spin_lock(&ubi->volumes_lock); 137 vol = ubi->volumes[vol_id]; 138 - if (!vol) { 139 - err = -ENODEV; 140 goto out_unlock; 141 - } 142 143 err = -EBUSY; 144 switch (mode) { ··· 162 vol->exclusive = 1; 163 break; 164 } 165 spin_unlock(&ubi->volumes_lock); 166 167 desc->vol = vol; 168 desc->mode = mode; 169 170 - /* 171 - * To prevent simultaneous checks of the same volume we use @vtbl_mutex, 172 - * although it is not the purpose it was introduced for. 173 - */ 174 - mutex_lock(&ubi->vtbl_mutex); 175 if (!vol->checked) { 176 /* This is the first open - check the volume */ 177 err = ubi_check_volume(ubi, vol_id); 178 if (err < 0) { 179 - mutex_unlock(&ubi->vtbl_mutex); 180 ubi_close_volume(desc); 181 return ERR_PTR(err); 182 } ··· 185 } 186 vol->checked = 1; 187 } 188 - mutex_unlock(&ubi->vtbl_mutex); 189 return desc; 190 191 out_unlock: 192 spin_unlock(&ubi->volumes_lock); 193 - kfree(desc); 194 - out_put: 195 module_put(THIS_MODULE); 196 return ERR_PTR(err); 197 } 198 EXPORT_SYMBOL_GPL(ubi_open_volume); ··· 212 int mode) 213 { 214 int i, vol_id = -1, len; 215 - struct ubi_volume_desc *ret; 216 struct ubi_device *ubi; 217 218 dbg_msg("open volume %s, mode %d", name, mode); 219 ··· 224 if (len > UBI_VOL_NAME_MAX) 225 return ERR_PTR(-EINVAL); 226 227 - ret = ERR_PTR(-ENODEV); 228 - if (!try_module_get(THIS_MODULE)) 229 - return ret; 230 231 - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num]) 232 - goto out_put; 233 - 234 - ubi = ubi_devices[ubi_num]; 235 236 spin_lock(&ubi->volumes_lock); 237 /* Walk all volumes of this UBI device */ ··· 243 } 244 spin_unlock(&ubi->volumes_lock); 245 246 - if (vol_id < 0) 247 - goto out_put; 248 249 - ret = ubi_open_volume(ubi_num, vol_id, mode); 250 - 251 - out_put: 252 - module_put(THIS_MODULE); 253 return ret; 254 } 255 EXPORT_SYMBOL_GPL(ubi_open_volume_nm); ··· 264 void ubi_close_volume(struct ubi_volume_desc *desc) 265 { 266 struct ubi_volume *vol = desc->vol; 267 268 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 270 - spin_lock(&vol->ubi->volumes_lock); 271 switch (desc->mode) { 272 case UBI_READONLY: 273 vol->readers -= 1; ··· 279 case UBI_EXCLUSIVE: 280 vol->exclusive = 0; 281 } 282 - spin_unlock(&vol->ubi->volumes_lock); 283 284 kfree(desc); 285 module_put(THIS_MODULE); 286 } 287 EXPORT_SYMBOL_GPL(ubi_close_volume); ··· 344 if (len == 0) 345 return 0; 346 347 - err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); 348 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 349 ubi_warn("mark volume %d as corrupted", vol_id); 350 vol->corrupted = 1; ··· 411 if (len == 0) 412 return 0; 413 414 - return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); 415 } 416 EXPORT_SYMBOL_GPL(ubi_leb_write); 417 ··· 460 if (len == 0) 461 return 0; 462 463 - return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); 464 } 465 EXPORT_SYMBOL_GPL(ubi_leb_change); 466 ··· 480 { 481 struct ubi_volume *vol = desc->vol; 482 struct ubi_device *ubi = vol->ubi; 483 - int err, vol_id = vol->vol_id; 484 485 - dbg_msg("erase LEB %d:%d", vol_id, lnum); 486 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 488 return -EROFS; ··· 493 if (vol->upd_marker) 494 return -EBADF; 495 496 - err = ubi_eba_unmap_leb(ubi, vol_id, lnum); 497 if (err) 498 return err; 499 ··· 541 { 542 struct ubi_volume *vol = desc->vol; 543 struct ubi_device *ubi = vol->ubi; 544 - int vol_id = vol->vol_id; 545 546 - dbg_msg("unmap LEB %d:%d", vol_id, lnum); 547 548 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 549 return -EROFS; ··· 553 if (vol->upd_marker) 554 return -EBADF; 555 556 - return ubi_eba_unmap_leb(ubi, vol_id, lnum); 557 } 558 EXPORT_SYMBOL_GPL(ubi_leb_unmap); 559 560 /** 561 * ubi_is_mapped - check if logical eraseblock is mapped.
··· 30 * @ubi_num: UBI device number 31 * @di: the information is stored here 32 * 33 + * This function returns %0 in case of success, %-EINVAL if the UBI device 34 + * number is invalid, and %-ENODEV if there is no such UBI device. 35 */ 36 int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 37 { 38 + struct ubi_device *ubi; 39 40 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 41 + return -EINVAL; 42 + 43 + ubi = ubi_get_device(ubi_num); 44 + if (!ubi) 45 return -ENODEV; 46 47 di->ubi_num = ubi->ubi_num; 48 di->leb_size = ubi->leb_size; 49 di->min_io_size = ubi->min_io_size; 50 di->ro_mode = ubi->ro_mode; 51 + di->cdev = ubi->cdev.dev; 52 + 53 + ubi_put_device(ubi); 54 return 0; 55 } 56 EXPORT_SYMBOL_GPL(ubi_get_device_info); ··· 73 vi->usable_leb_size = vol->usable_leb_size; 74 vi->name_len = vol->name_len; 75 vi->name = vol->name; 76 + vi->cdev = vol->cdev.dev; 77 } 78 EXPORT_SYMBOL_GPL(ubi_get_volume_info); 79 ··· 104 105 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 106 107 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 108 + return ERR_PTR(-EINVAL); 109 110 if (mode != UBI_READONLY && mode != UBI_READWRITE && 111 mode != UBI_EXCLUSIVE) 112 + return ERR_PTR(-EINVAL); 113 + 114 + /* 115 + * First of all, we have to get the UBI device to prevent its removal. 116 + */ 117 + ubi = ubi_get_device(ubi_num); 118 + if (!ubi) 119 + return ERR_PTR(-ENODEV); 120 + 121 + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) { 122 + err = -EINVAL; 123 + goto out_put_ubi; 124 + } 125 126 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 127 if (!desc) { 128 err = -ENOMEM; 129 + goto out_put_ubi; 130 } 131 + 132 + err = -ENODEV; 133 + if (!try_module_get(THIS_MODULE)) 134 + goto out_free; 135 136 spin_lock(&ubi->volumes_lock); 137 vol = ubi->volumes[vol_id]; 138 + if (!vol) 139 goto out_unlock; 140 141 err = -EBUSY; 142 switch (mode) { ··· 156 vol->exclusive = 1; 157 break; 158 } 159 + get_device(&vol->dev); 160 + vol->ref_count += 1; 161 spin_unlock(&ubi->volumes_lock); 162 163 desc->vol = vol; 164 desc->mode = mode; 165 166 + mutex_lock(&ubi->ckvol_mutex); 167 if (!vol->checked) { 168 /* This is the first open - check the volume */ 169 err = ubi_check_volume(ubi, vol_id); 170 if (err < 0) { 171 + mutex_unlock(&ubi->ckvol_mutex); 172 ubi_close_volume(desc); 173 return ERR_PTR(err); 174 } ··· 181 } 182 vol->checked = 1; 183 } 184 + mutex_unlock(&ubi->ckvol_mutex); 185 + 186 return desc; 187 188 out_unlock: 189 spin_unlock(&ubi->volumes_lock); 190 module_put(THIS_MODULE); 191 + out_free: 192 + kfree(desc); 193 + out_put_ubi: 194 + ubi_put_device(ubi); 195 return ERR_PTR(err); 196 } 197 EXPORT_SYMBOL_GPL(ubi_open_volume); ··· 205 int mode) 206 { 207 int i, vol_id = -1, len; 208 struct ubi_device *ubi; 209 + struct ubi_volume_desc *ret; 210 211 dbg_msg("open volume %s, mode %d", name, mode); 212 ··· 217 if (len > UBI_VOL_NAME_MAX) 218 return ERR_PTR(-EINVAL); 219 220 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 221 + return ERR_PTR(-EINVAL); 222 223 + ubi = ubi_get_device(ubi_num); 224 + if (!ubi) 225 + return ERR_PTR(-ENODEV); 226 227 spin_lock(&ubi->volumes_lock); 228 /* Walk all volumes of this UBI device */ ··· 238 } 239 spin_unlock(&ubi->volumes_lock); 240 241 + if (vol_id >= 0) 242 + ret = ubi_open_volume(ubi_num, vol_id, mode); 243 + else 244 + ret = ERR_PTR(-ENODEV); 245 246 + /* 247 + * We should put the UBI device even in case of success, because 248 + * 'ubi_open_volume()' took a reference as well. 249 + */ 250 + ubi_put_device(ubi); 251 return ret; 252 } 253 EXPORT_SYMBOL_GPL(ubi_open_volume_nm); ··· 256 void ubi_close_volume(struct ubi_volume_desc *desc) 257 { 258 struct ubi_volume *vol = desc->vol; 259 + struct ubi_device *ubi = vol->ubi; 260 261 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 262 263 + spin_lock(&ubi->volumes_lock); 264 switch (desc->mode) { 265 case UBI_READONLY: 266 vol->readers -= 1; ··· 270 case UBI_EXCLUSIVE: 271 vol->exclusive = 0; 272 } 273 + vol->ref_count -= 1; 274 + spin_unlock(&ubi->volumes_lock); 275 276 kfree(desc); 277 + put_device(&vol->dev); 278 + ubi_put_device(ubi); 279 module_put(THIS_MODULE); 280 } 281 EXPORT_SYMBOL_GPL(ubi_close_volume); ··· 332 if (len == 0) 333 return 0; 334 335 + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 336 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 337 ubi_warn("mark volume %d as corrupted", vol_id); 338 vol->corrupted = 1; ··· 399 if (len == 0) 400 return 0; 401 402 + return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype); 403 } 404 EXPORT_SYMBOL_GPL(ubi_leb_write); 405 ··· 448 if (len == 0) 449 return 0; 450 451 + return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype); 452 } 453 EXPORT_SYMBOL_GPL(ubi_leb_change); 454 ··· 468 { 469 struct ubi_volume *vol = desc->vol; 470 struct ubi_device *ubi = vol->ubi; 471 + int err; 472 473 + dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 474 475 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 476 return -EROFS; ··· 481 if (vol->upd_marker) 482 return -EBADF; 483 484 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 485 if (err) 486 return err; 487 ··· 529 { 530 struct ubi_volume *vol = desc->vol; 531 struct ubi_device *ubi = vol->ubi; 532 533 + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 534 535 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 536 return -EROFS; ··· 542 if (vol->upd_marker) 543 return -EBADF; 544 545 + return ubi_eba_unmap_leb(ubi, vol, lnum); 546 } 547 EXPORT_SYMBOL_GPL(ubi_leb_unmap); 548 + 549 + /** 550 + * ubi_leb_map - map logical erasblock to a physical eraseblock. 551 + * @desc: volume descriptor 552 + * @lnum: logical eraseblock number 553 + * @dtype: expected data type 554 + * 555 + * This function maps an un-mapped logical eraseblock @lnum to a physical 556 + * eraseblock. This means, that after a successfull invocation of this 557 + * function the logical eraseblock @lnum will be empty (contain only %0xFF 558 + * bytes) and be mapped to a physical eraseblock, even if an unclean reboot 559 + * happens. 560 + * 561 + * This function returns zero in case of success, %-EBADF if the volume is 562 + * damaged because of an interrupted update, %-EBADMSG if the logical 563 + * eraseblock is already mapped, and other negative error codes in case of 564 + * other failures. 565 + */ 566 + int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) 567 + { 568 + struct ubi_volume *vol = desc->vol; 569 + struct ubi_device *ubi = vol->ubi; 570 + 571 + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 572 + 573 + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 574 + return -EROFS; 575 + 576 + if (lnum < 0 || lnum >= vol->reserved_pebs) 577 + return -EINVAL; 578 + 579 + if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && 580 + dtype != UBI_UNKNOWN) 581 + return -EINVAL; 582 + 583 + if (vol->upd_marker) 584 + return -EBADF; 585 + 586 + if (vol->eba_tbl[lnum] >= 0) 587 + return -EBADMSG; 588 + 589 + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); 590 + } 591 + EXPORT_SYMBOL_GPL(ubi_leb_map); 592 593 /** 594 * ubi_is_mapped - check if logical eraseblock is mapped.
+1 -1
drivers/mtd/ubi/misc.c
··· 79 else 80 size = vol->usable_leb_size; 81 82 - err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); 83 if (err) { 84 if (err == -EBADMSG) 85 err = 1;
··· 79 else 80 size = vol->usable_leb_size; 81 82 + err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); 83 if (err) { 84 if (err == -EBADMSG) 85 err = 1;
+8 -4
drivers/mtd/ubi/scan.c
··· 286 * FIXME: but this is anyway obsolete and will be removed at 287 * some point. 288 */ 289 - 290 dbg_bld("using old crappy leb_ver stuff"); 291 292 abs = v1 - v2; 293 if (abs < 0) ··· 395 vfree(buf); 396 out_free_vidh: 397 ubi_free_vid_hdr(ubi, vh); 398 - ubi_assert(err < 0); 399 return err; 400 } 401 ··· 773 */ 774 static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 775 { 776 - long long ec; 777 int err, bitflips = 0, vol_id, ec_corr = 0; 778 779 dbg_bld("scan PEB %d", pnum); ··· 858 } 859 860 vol_id = be32_to_cpu(vidh->vol_id); 861 - if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { 862 int lnum = be32_to_cpu(vidh->lnum); 863 864 /* Unsupported internal volume */
··· 286 * FIXME: but this is anyway obsolete and will be removed at 287 * some point. 288 */ 289 dbg_bld("using old crappy leb_ver stuff"); 290 + 291 + if (v1 == v2) { 292 + ubi_err("PEB %d and PEB %d have the same version %lld", 293 + seb->pnum, pnum, v1); 294 + return -EINVAL; 295 + } 296 297 abs = v1 - v2; 298 if (abs < 0) ··· 390 vfree(buf); 391 out_free_vidh: 392 ubi_free_vid_hdr(ubi, vh); 393 return err; 394 } 395 ··· 769 */ 770 static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 771 { 772 + long long uninitialized_var(ec); 773 int err, bitflips = 0, vol_id, ec_corr = 0; 774 775 dbg_bld("scan PEB %d", pnum); ··· 854 } 855 856 vol_id = be32_to_cpu(vidh->vol_id); 857 + if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { 858 int lnum = be32_to_cpu(vidh->lnum); 859 860 /* Unsupported internal volume */
+124 -47
drivers/mtd/ubi/ubi.h
··· 94 UBI_IO_BITFLIPS 95 }; 96 97 - extern int ubi_devices_cnt; 98 - extern struct ubi_device *ubi_devices[]; 99 100 struct ubi_volume_desc; 101 ··· 140 * @cdev: character device object to create character device 141 * @ubi: reference to the UBI device description object 142 * @vol_id: volume ID 143 * @readers: number of users holding this volume in read-only mode 144 * @writers: number of users holding this volume in read-write mode 145 * @exclusive: whether somebody holds this volume in exclusive mode 146 - * @removed: if the volume was removed 147 - * @checked: if this static volume was checked 148 * 149 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 150 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) ··· 151 * @used_ebs: how many logical eraseblocks in this volume contain data 152 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock 153 * @used_bytes: how many bytes of data this volume contains 154 - * @upd_marker: non-zero if the update marker is set for this volume 155 - * @corrupted: non-zero if the volume is corrupted (static volumes only) 156 * @alignment: volume alignment 157 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 158 - * satisfy the requested alignment 159 * @name_len: volume name length 160 * @name: volume name 161 * 162 - * @updating: whether the volume is being updated 163 * @upd_ebs: how many eraseblocks are expected to be updated 164 - * @upd_bytes: how many bytes are expected to be received 165 - * @upd_received: how many update bytes were already received 166 - * @upd_buf: update buffer which is used to collect update data 167 * 168 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 169 * 170 * @gluebi_desc: gluebi UBI volume descriptor 171 * @gluebi_refcount: reference count of the gluebi MTD device ··· 193 struct cdev cdev; 194 struct ubi_device *ubi; 195 int vol_id; 196 int readers; 197 int writers; 198 int exclusive; 199 - int removed; 200 - int checked; 201 202 int reserved_pebs; 203 int vol_type; ··· 204 int used_ebs; 205 int last_eb_bytes; 206 long long used_bytes; 207 - int upd_marker; 208 - int corrupted; 209 int alignment; 210 int data_pad; 211 int name_len; 212 char name[UBI_VOL_NAME_MAX+1]; 213 214 - int updating; 215 int upd_ebs; 216 long long upd_bytes; 217 long long upd_received; 218 void *upd_buf; 219 220 int *eba_tbl; 221 222 #ifdef CONFIG_MTD_UBI_GLUEBI 223 - /* Gluebi-related stuff may be compiled out */ 224 struct ubi_volume_desc *gluebi_desc; 225 int gluebi_refcount; 226 struct mtd_info gluebi_mtd; ··· 250 251 /** 252 * struct ubi_device - UBI device description structure 253 - * @dev: class device object to use the the Linux device model 254 * @cdev: character device object to create character device 255 * @ubi_num: UBI device number 256 * @ubi_name: UBI device name 257 - * @major: character device major number 258 * @vol_count: number of volumes in this UBI device 259 * @volumes: volumes of this UBI device 260 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 261 - * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, 262 - * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and 263 - * @vol->eba_tbl. 264 * 265 * @rsvd_pebs: count of reserved physical eraseblocks 266 * @avail_pebs: count of available physical eraseblocks 267 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB 268 - * handling 269 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 270 * 271 * @vtbl_slots: how many slots are available in the volume table 272 * @vtbl_size: size of the volume table in bytes 273 * @vtbl: in-RAM volume table copy 274 - * @vtbl_mutex: protects on-flash volume table 275 * 276 * @max_ec: current highest erase counter value 277 * @mean_ec: current mean erase counter value ··· 291 * @prot.pnum: protection tree indexed by physical eraseblock numbers 292 * @prot.aec: protection tree indexed by absolute erase counter value 293 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 294 - * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 295 - * fields 296 * @wl_scheduled: non-zero if the wear-leveling was scheduled 297 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 298 - * physical eraseblock 299 * @abs_ec: absolute erase counter 300 * @move_from: physical eraseblock from where the data is being moved 301 * @move_to: physical eraseblock where the data is being moved to 302 - * @move_from_put: if the "from" PEB was put 303 * @move_to_put: if the "to" PEB was put 304 * @works: list of pending works 305 * @works_count: count of pending works ··· 326 * @hdrs_min_io_size 327 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 328 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 329 - * not 330 * @mtd: MTD device descriptor 331 * 332 * @peb_buf1: a buffer of PEB size used for different purposes 333 * @peb_buf2: another buffer of PEB size used for different purposes 334 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 335 - * @dbg_peb_buf: buffer of PEB size used for debugging 336 * @dbg_buf_mutex: proptects @dbg_peb_buf 337 */ 338 struct ubi_device { ··· 340 struct device dev; 341 int ubi_num; 342 char ubi_name[sizeof(UBI_NAME_STR)+5]; 343 - int major; 344 int vol_count; 345 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 346 spinlock_t volumes_lock; 347 348 int rsvd_pebs; 349 int avail_pebs; 350 int beb_rsvd_pebs; 351 int beb_rsvd_level; 352 353 int vtbl_slots; 354 int vtbl_size; 355 struct ubi_vtbl_record *vtbl; 356 - struct mutex vtbl_mutex; 357 358 int max_ec; 359 int mean_ec; 360 361 /* EBA unit's stuff */ ··· 375 struct rb_root aec; 376 } prot; 377 spinlock_t wl_lock; 378 int wl_scheduled; 379 struct ubi_wl_entry **lookuptbl; 380 unsigned long long abs_ec; 381 struct ubi_wl_entry *move_from; 382 struct ubi_wl_entry *move_to; 383 - int move_from_put; 384 int move_to_put; 385 struct list_head works; 386 int works_count; ··· 411 void *peb_buf1; 412 void *peb_buf2; 413 struct mutex buf_mutex; 414 #ifdef CONFIG_MTD_UBI_DEBUG 415 void *dbg_peb_buf; 416 struct mutex dbg_buf_mutex; 417 #endif 418 }; 419 420 extern struct file_operations ubi_cdev_operations; 421 extern struct file_operations ubi_vol_cdev_operations; 422 extern struct class *ubi_class; 423 424 /* vtbl.c */ 425 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, ··· 434 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 435 int ubi_remove_volume(struct ubi_volume_desc *desc); 436 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 437 - int ubi_add_volume(struct ubi_device *ubi, int vol_id); 438 - void ubi_free_volume(struct ubi_device *ubi, int vol_id); 439 440 /* upd.c */ 441 - int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); 442 - int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 443 const void __user *buf, int count); 444 445 /* misc.c */ 446 int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); ··· 464 #endif 465 466 /* eba.c */ 467 - int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); 468 - int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 469 - int offset, int len, int check); 470 - int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 471 const void *buf, int offset, int len, int dtype); 472 - int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 473 - const void *buf, int len, int dtype, 474 int used_ebs); 475 - int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 476 - const void *buf, int len, int dtype); 477 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 478 struct ubi_vid_hdr *vid_hdr); 479 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); ··· 487 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 488 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 489 void ubi_wl_close(struct ubi_device *ubi); 490 491 /* io.c */ 492 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, ··· 505 struct ubi_vid_hdr *vid_hdr, int verbose); 506 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 507 struct ubi_vid_hdr *vid_hdr); 508 509 /* 510 * ubi_rb_for_each_entry - walk an RB-tree. ··· 598 */ 599 static inline void ubi_ro_mode(struct ubi_device *ubi) 600 { 601 - ubi->ro_mode = 1; 602 - ubi_warn("switch to read-only mode"); 603 } 604 605 /**
··· 94 UBI_IO_BITFLIPS 95 }; 96 97 + /** 98 + * struct ubi_wl_entry - wear-leveling entry. 99 + * @rb: link in the corresponding RB-tree 100 + * @ec: erase counter 101 + * @pnum: physical eraseblock number 102 + * 103 + * This data structure is used in the WL unit. Each physical eraseblock has a 104 + * corresponding &struct wl_entry object which may be kept in different 105 + * RB-trees. See WL unit for details. 106 + */ 107 + struct ubi_wl_entry { 108 + struct rb_node rb; 109 + int ec; 110 + int pnum; 111 + }; 112 + 113 + /** 114 + * struct ubi_ltree_entry - an entry in the lock tree. 115 + * @rb: links RB-tree nodes 116 + * @vol_id: volume ID of the locked logical eraseblock 117 + * @lnum: locked logical eraseblock number 118 + * @users: how many tasks are using this logical eraseblock or wait for it 119 + * @mutex: read/write mutex to implement read/write access serialization to 120 + * the (@vol_id, @lnum) logical eraseblock 121 + * 122 + * This data structure is used in the EBA unit to implement per-LEB locking. 123 + * When a logical eraseblock is being locked - corresponding 124 + * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). 125 + * See EBA unit for details. 126 + */ 127 + struct ubi_ltree_entry { 128 + struct rb_node rb; 129 + int vol_id; 130 + int lnum; 131 + int users; 132 + struct rw_semaphore mutex; 133 + }; 134 135 struct ubi_volume_desc; 136 ··· 105 * @cdev: character device object to create character device 106 * @ubi: reference to the UBI device description object 107 * @vol_id: volume ID 108 + * @ref_count: volume reference count 109 * @readers: number of users holding this volume in read-only mode 110 * @writers: number of users holding this volume in read-write mode 111 * @exclusive: whether somebody holds this volume in exclusive mode 112 * 113 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 114 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) ··· 117 * @used_ebs: how many logical eraseblocks in this volume contain data 118 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock 119 * @used_bytes: how many bytes of data this volume contains 120 * @alignment: volume alignment 121 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 122 + * satisfy the requested alignment 123 * @name_len: volume name length 124 * @name: volume name 125 * 126 * @upd_ebs: how many eraseblocks are expected to be updated 127 + * @ch_lnum: LEB number which is being changing by the atomic LEB change 128 + * operation 129 + * @ch_dtype: data persistency type which is being changing by the atomic LEB 130 + * change operation 131 + * @upd_bytes: how many bytes are expected to be received for volume update or 132 + * atomic LEB change 133 + * @upd_received: how many bytes were already received for volume update or 134 + * atomic LEB change 135 + * @upd_buf: update buffer which is used to collect update data or data for 136 + * atomic LEB change 137 * 138 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 139 + * @checked: %1 if this static volume was checked 140 + * @corrupted: %1 if the volume is corrupted (static volumes only) 141 + * @upd_marker: %1 if the update marker is set for this volume 142 + * @updating: %1 if the volume is being updated 143 + * @changing_leb: %1 if the atomic LEB change ioctl command is in progress 144 * 145 * @gluebi_desc: gluebi UBI volume descriptor 146 * @gluebi_refcount: reference count of the gluebi MTD device ··· 150 struct cdev cdev; 151 struct ubi_device *ubi; 152 int vol_id; 153 + int ref_count; 154 int readers; 155 int writers; 156 int exclusive; 157 158 int reserved_pebs; 159 int vol_type; ··· 162 int used_ebs; 163 int last_eb_bytes; 164 long long used_bytes; 165 int alignment; 166 int data_pad; 167 int name_len; 168 char name[UBI_VOL_NAME_MAX+1]; 169 170 int upd_ebs; 171 + int ch_lnum; 172 + int ch_dtype; 173 long long upd_bytes; 174 long long upd_received; 175 void *upd_buf; 176 177 int *eba_tbl; 178 + int checked:1; 179 + int corrupted:1; 180 + int upd_marker:1; 181 + int updating:1; 182 + int changing_leb:1; 183 184 #ifdef CONFIG_MTD_UBI_GLUEBI 185 + /* 186 + * Gluebi-related stuff may be compiled out. 187 + * TODO: this should not be built into UBI but should be a separate 188 + * ubimtd driver which works on top of UBI and emulates MTD devices. 189 + */ 190 struct ubi_volume_desc *gluebi_desc; 191 int gluebi_refcount; 192 struct mtd_info gluebi_mtd; ··· 200 201 /** 202 * struct ubi_device - UBI device description structure 203 + * @dev: UBI device object to use the the Linux device model 204 * @cdev: character device object to create character device 205 * @ubi_num: UBI device number 206 * @ubi_name: UBI device name 207 * @vol_count: number of volumes in this UBI device 208 * @volumes: volumes of this UBI device 209 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 210 + * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, 211 + * @vol->readers, @vol->writers, @vol->exclusive, 212 + * @vol->ref_count, @vol->mapping and @vol->eba_tbl. 213 + * @ref_count: count of references on the UBI device 214 * 215 * @rsvd_pebs: count of reserved physical eraseblocks 216 * @avail_pebs: count of available physical eraseblocks 217 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB 218 + * handling 219 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 220 * 221 + * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 222 + * of UBI ititializetion 223 * @vtbl_slots: how many slots are available in the volume table 224 * @vtbl_size: size of the volume table in bytes 225 * @vtbl: in-RAM volume table copy 226 + * @volumes_mutex: protects on-flash volume table and serializes volume 227 + * changes, like creation, deletion, update, resize 228 * 229 * @max_ec: current highest erase counter value 230 * @mean_ec: current mean erase counter value ··· 238 * @prot.pnum: protection tree indexed by physical eraseblock numbers 239 * @prot.aec: protection tree indexed by absolute erase counter value 240 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 241 + * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 242 + * fields 243 + * @move_mutex: serializes eraseblock moves 244 * @wl_scheduled: non-zero if the wear-leveling was scheduled 245 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 246 + * physical eraseblock 247 * @abs_ec: absolute erase counter 248 * @move_from: physical eraseblock from where the data is being moved 249 * @move_to: physical eraseblock where the data is being moved to 250 * @move_to_put: if the "to" PEB was put 251 * @works: list of pending works 252 * @works_count: count of pending works ··· 273 * @hdrs_min_io_size 274 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 275 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 276 + * not 277 * @mtd: MTD device descriptor 278 * 279 * @peb_buf1: a buffer of PEB size used for different purposes 280 * @peb_buf2: another buffer of PEB size used for different purposes 281 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 282 + * @dbg_peb_buf: buffer of PEB size used for debugging 283 * @dbg_buf_mutex: proptects @dbg_peb_buf 284 */ 285 struct ubi_device { ··· 287 struct device dev; 288 int ubi_num; 289 char ubi_name[sizeof(UBI_NAME_STR)+5]; 290 int vol_count; 291 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 292 spinlock_t volumes_lock; 293 + int ref_count; 294 295 int rsvd_pebs; 296 int avail_pebs; 297 int beb_rsvd_pebs; 298 int beb_rsvd_level; 299 300 + int autoresize_vol_id; 301 int vtbl_slots; 302 int vtbl_size; 303 struct ubi_vtbl_record *vtbl; 304 + struct mutex volumes_mutex; 305 306 int max_ec; 307 + /* TODO: mean_ec is not updated run-time, fix */ 308 int mean_ec; 309 310 /* EBA unit's stuff */ ··· 320 struct rb_root aec; 321 } prot; 322 spinlock_t wl_lock; 323 + struct mutex move_mutex; 324 + struct rw_semaphore work_sem; 325 int wl_scheduled; 326 struct ubi_wl_entry **lookuptbl; 327 unsigned long long abs_ec; 328 struct ubi_wl_entry *move_from; 329 struct ubi_wl_entry *move_to; 330 int move_to_put; 331 struct list_head works; 332 int works_count; ··· 355 void *peb_buf1; 356 void *peb_buf2; 357 struct mutex buf_mutex; 358 + struct mutex ckvol_mutex; 359 #ifdef CONFIG_MTD_UBI_DEBUG 360 void *dbg_peb_buf; 361 struct mutex dbg_buf_mutex; 362 #endif 363 }; 364 365 + extern struct kmem_cache *ubi_wl_entry_slab; 366 + extern struct file_operations ubi_ctrl_cdev_operations; 367 extern struct file_operations ubi_cdev_operations; 368 extern struct file_operations ubi_vol_cdev_operations; 369 extern struct class *ubi_class; 370 + extern struct mutex ubi_devices_mutex; 371 372 /* vtbl.c */ 373 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, ··· 374 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 375 int ubi_remove_volume(struct ubi_volume_desc *desc); 376 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 377 + int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); 378 + void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); 379 380 /* upd.c */ 381 + int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, 382 + long long bytes); 383 + int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, 384 const void __user *buf, int count); 385 + int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 386 + const struct ubi_leb_change_req *req); 387 + int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, 388 + const void __user *buf, int count); 389 390 /* misc.c */ 391 int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); ··· 399 #endif 400 401 /* eba.c */ 402 + int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 403 + int lnum); 404 + int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 405 + void *buf, int offset, int len, int check); 406 + int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 407 const void *buf, int offset, int len, int dtype); 408 + int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 409 + int lnum, const void *buf, int len, int dtype, 410 int used_ebs); 411 + int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 412 + int lnum, const void *buf, int len, int dtype); 413 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 414 struct ubi_vid_hdr *vid_hdr); 415 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); ··· 421 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 422 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 423 void ubi_wl_close(struct ubi_device *ubi); 424 + int ubi_thread(void *u); 425 426 /* io.c */ 427 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, ··· 438 struct ubi_vid_hdr *vid_hdr, int verbose); 439 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 440 struct ubi_vid_hdr *vid_hdr); 441 + 442 + /* build.c */ 443 + int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); 444 + int ubi_detach_mtd_dev(int ubi_num, int anyway); 445 + struct ubi_device *ubi_get_device(int ubi_num); 446 + void ubi_put_device(struct ubi_device *ubi); 447 + struct ubi_device *ubi_get_by_major(int major); 448 + int ubi_major2num(int major); 449 450 /* 451 * ubi_rb_for_each_entry - walk an RB-tree. ··· 523 */ 524 static inline void ubi_ro_mode(struct ubi_device *ubi) 525 { 526 + if (!ubi->ro_mode) { 527 + ubi->ro_mode = 1; 528 + ubi_warn("switch to read-only mode"); 529 + } 530 } 531 532 /**
+137 -48
drivers/mtd/ubi/upd.c
··· 22 */ 23 24 /* 25 - * This file contains implementation of the volume update functionality. 26 * 27 * The update operation is based on the per-volume update marker which is 28 * stored in the volume table. The update marker is set before the update ··· 46 /** 47 * set_update_marker - set update marker. 48 * @ubi: UBI device description object 49 - * @vol_id: volume ID 50 * 51 - * This function sets the update marker flag for volume @vol_id. Returns zero 52 * in case of success and a negative error code in case of failure. 53 */ 54 - static int set_update_marker(struct ubi_device *ubi, int vol_id) 55 { 56 int err; 57 struct ubi_vtbl_record vtbl_rec; 58 - struct ubi_volume *vol = ubi->volumes[vol_id]; 59 60 - dbg_msg("set update marker for volume %d", vol_id); 61 62 if (vol->upd_marker) { 63 - ubi_assert(ubi->vtbl[vol_id].upd_marker); 64 dbg_msg("already set"); 65 return 0; 66 } 67 68 - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 69 vtbl_rec.upd_marker = 1; 70 71 - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 72 vol->upd_marker = 1; 73 return err; 74 } ··· 78 /** 79 * clear_update_marker - clear update marker. 80 * @ubi: UBI device description object 81 - * @vol_id: volume ID 82 * @bytes: new data size in bytes 83 * 84 - * This function clears the update marker for volume @vol_id, sets new volume 85 * data size and clears the "corrupted" flag (static volumes only). Returns 86 * zero in case of success and a negative error code in case of failure. 87 */ 88 - static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) 89 { 90 int err; 91 uint64_t tmp; 92 struct ubi_vtbl_record vtbl_rec; 93 - struct ubi_volume *vol = ubi->volumes[vol_id]; 94 95 - dbg_msg("clear update marker for volume %d", vol_id); 96 97 - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 98 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 99 vtbl_rec.upd_marker = 0; 100 ··· 110 vol->last_eb_bytes = vol->usable_leb_size; 111 } 112 113 - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 114 vol->upd_marker = 0; 115 return err; 116 } ··· 120 /** 121 * ubi_start_update - start volume update. 122 * @ubi: UBI device description object 123 - * @vol_id: volume ID 124 * @bytes: update bytes 125 * 126 * This function starts volume update operation. If @bytes is zero, the volume 127 * is just wiped out. Returns zero in case of success and a negative error code 128 * in case of failure. 129 */ 130 - int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) 131 { 132 int i, err; 133 uint64_t tmp; 134 - struct ubi_volume *vol = ubi->volumes[vol_id]; 135 136 - dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); 137 vol->updating = 1; 138 139 - err = set_update_marker(ubi, vol_id); 140 if (err) 141 return err; 142 143 /* Before updating - wipe out the volume */ 144 for (i = 0; i < vol->reserved_pebs; i++) { 145 - err = ubi_eba_unmap_leb(ubi, vol_id, i); 146 if (err) 147 return err; 148 } 149 150 if (bytes == 0) { 151 - err = clear_update_marker(ubi, vol_id, 0); 152 if (err) 153 return err; 154 err = ubi_wl_flush(ubi); ··· 170 } 171 172 /** 173 * write_leb - write update data. 174 * @ubi: UBI device description object 175 - * @vol_id: volume ID 176 * @lnum: logical eraseblock number 177 * @buf: data to write 178 * @len: data size ··· 231 * This function returns zero in case of success and a negative error code in 232 * case of failure. 233 */ 234 - static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 235 - int len, int used_ebs) 236 { 237 - int err, l; 238 - struct ubi_volume *vol = ubi->volumes[vol_id]; 239 240 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 241 - l = ALIGN(len, ubi->min_io_size); 242 - memset(buf + len, 0xFF, l - len); 243 244 - l = ubi_calc_data_len(ubi, buf, l); 245 - if (l == 0) { 246 dbg_msg("all %d bytes contain 0xFF - skip", len); 247 return 0; 248 } 249 - if (len != l) 250 - dbg_msg("skip last %d bytes (0xFF)", len - l); 251 252 - err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, 253 - UBI_UNKNOWN); 254 } else { 255 /* 256 * When writing static volume, and this is the last logical ··· 258 * contain zeros, not random trash. 259 */ 260 memset(buf + len, 0, vol->usable_leb_size - len); 261 - err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, 262 UBI_UNKNOWN, used_ebs); 263 } 264 ··· 272 * @count: how much bytes to write 273 * 274 * This function writes more data to the volume which is being updated. It may 275 - * be called arbitrary number of times until all of the update data arrive. 276 - * This function returns %0 in case of success, number of bytes written during 277 - * the last call if the whole volume update was successfully finished, and a 278 * negative error code in case of failure. 279 */ 280 - int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 281 const void __user *buf, int count) 282 { 283 uint64_t tmp; 284 - struct ubi_volume *vol = ubi->volumes[vol_id]; 285 int lnum, offs, err = 0, len, to_write = count; 286 287 dbg_msg("write %d of %lld bytes, %lld already passed", ··· 325 * is the last chunk, it's time to flush the buffer. 326 */ 327 ubi_assert(flush_len <= vol->usable_leb_size); 328 - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, 329 - flush_len, vol->upd_ebs); 330 if (err) 331 return err; 332 } ··· 353 354 if (len == vol->usable_leb_size || 355 vol->upd_received + len == vol->upd_bytes) { 356 - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, 357 - vol->upd_ebs); 358 if (err) 359 break; 360 } ··· 368 ubi_assert(vol->upd_received <= vol->upd_bytes); 369 if (vol->upd_received == vol->upd_bytes) { 370 /* The update is finished, clear the update marker */ 371 - err = clear_update_marker(ubi, vol_id, vol->upd_bytes); 372 if (err) 373 return err; 374 err = ubi_wl_flush(ubi); 375 if (err == 0) { 376 err = to_write; 377 vfree(vol->upd_buf); 378 - vol->updating = 0; 379 } 380 } 381 382 return err;
··· 22 */ 23 24 /* 25 + * This file contains implementation of the volume update and atomic LEB change 26 + * functionality. 27 * 28 * The update operation is based on the per-volume update marker which is 29 * stored in the volume table. The update marker is set before the update ··· 45 /** 46 * set_update_marker - set update marker. 47 * @ubi: UBI device description object 48 + * @vol: volume description object 49 * 50 + * This function sets the update marker flag for volume @vol. Returns zero 51 * in case of success and a negative error code in case of failure. 52 */ 53 + static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) 54 { 55 int err; 56 struct ubi_vtbl_record vtbl_rec; 57 58 + dbg_msg("set update marker for volume %d", vol->vol_id); 59 60 if (vol->upd_marker) { 61 + ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); 62 dbg_msg("already set"); 63 return 0; 64 } 65 66 + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 67 + sizeof(struct ubi_vtbl_record)); 68 vtbl_rec.upd_marker = 1; 69 70 + mutex_lock(&ubi->volumes_mutex); 71 + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 72 + mutex_unlock(&ubi->volumes_mutex); 73 vol->upd_marker = 1; 74 return err; 75 } ··· 75 /** 76 * clear_update_marker - clear update marker. 77 * @ubi: UBI device description object 78 + * @vol: volume description object 79 * @bytes: new data size in bytes 80 * 81 + * This function clears the update marker for volume @vol, sets new volume 82 * data size and clears the "corrupted" flag (static volumes only). Returns 83 * zero in case of success and a negative error code in case of failure. 84 */ 85 + static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, 86 + long long bytes) 87 { 88 int err; 89 uint64_t tmp; 90 struct ubi_vtbl_record vtbl_rec; 91 92 + dbg_msg("clear update marker for volume %d", vol->vol_id); 93 94 + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 95 + sizeof(struct ubi_vtbl_record)); 96 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 97 vtbl_rec.upd_marker = 0; 98 ··· 106 vol->last_eb_bytes = vol->usable_leb_size; 107 } 108 109 + mutex_lock(&ubi->volumes_mutex); 110 + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 111 + mutex_unlock(&ubi->volumes_mutex); 112 vol->upd_marker = 0; 113 return err; 114 } ··· 114 /** 115 * ubi_start_update - start volume update. 116 * @ubi: UBI device description object 117 + * @vol: volume description object 118 * @bytes: update bytes 119 * 120 * This function starts volume update operation. If @bytes is zero, the volume 121 * is just wiped out. Returns zero in case of success and a negative error code 122 * in case of failure. 123 */ 124 + int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, 125 + long long bytes) 126 { 127 int i, err; 128 uint64_t tmp; 129 130 + dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); 131 + ubi_assert(!vol->updating && !vol->changing_leb); 132 vol->updating = 1; 133 134 + err = set_update_marker(ubi, vol); 135 if (err) 136 return err; 137 138 /* Before updating - wipe out the volume */ 139 for (i = 0; i < vol->reserved_pebs; i++) { 140 + err = ubi_eba_unmap_leb(ubi, vol, i); 141 if (err) 142 return err; 143 } 144 145 if (bytes == 0) { 146 + err = clear_update_marker(ubi, vol, 0); 147 if (err) 148 return err; 149 err = ubi_wl_flush(ubi); ··· 163 } 164 165 /** 166 + * ubi_start_leb_change - start atomic LEB change. 167 + * @ubi: UBI device description object 168 + * @vol: volume description object 169 + * @req: operation request 170 + * 171 + * This function starts atomic LEB change operation. Returns zero in case of 172 + * success and a negative error code in case of failure. 173 + */ 174 + int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 175 + const struct ubi_leb_change_req *req) 176 + { 177 + ubi_assert(!vol->updating && !vol->changing_leb); 178 + 179 + dbg_msg("start changing LEB %d:%d, %u bytes", 180 + vol->vol_id, req->lnum, req->bytes); 181 + if (req->bytes == 0) 182 + return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, 183 + req->dtype); 184 + 185 + vol->upd_bytes = req->bytes; 186 + vol->upd_received = 0; 187 + vol->changing_leb = 1; 188 + vol->ch_lnum = req->lnum; 189 + vol->ch_dtype = req->dtype; 190 + 191 + vol->upd_buf = vmalloc(req->bytes); 192 + if (!vol->upd_buf) 193 + return -ENOMEM; 194 + 195 + return 0; 196 + } 197 + 198 + /** 199 * write_leb - write update data. 200 * @ubi: UBI device description object 201 + * @vol: volume description object 202 * @lnum: logical eraseblock number 203 * @buf: data to write 204 * @len: data size ··· 191 * This function returns zero in case of success and a negative error code in 192 * case of failure. 193 */ 194 + static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 195 + void *buf, int len, int used_ebs) 196 { 197 + int err; 198 199 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 200 + len = ALIGN(len, ubi->min_io_size); 201 + memset(buf + len, 0xFF, len - len); 202 203 + len = ubi_calc_data_len(ubi, buf, len); 204 + if (len == 0) { 205 dbg_msg("all %d bytes contain 0xFF - skip", len); 206 return 0; 207 } 208 209 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); 210 } else { 211 /* 212 * When writing static volume, and this is the last logical ··· 222 * contain zeros, not random trash. 223 */ 224 memset(buf + len, 0, vol->usable_leb_size - len); 225 + err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, 226 UBI_UNKNOWN, used_ebs); 227 } 228 ··· 236 * @count: how much bytes to write 237 * 238 * This function writes more data to the volume which is being updated. It may 239 + * be called arbitrary number of times until all the update data arriveis. This 240 + * function returns %0 in case of success, number of bytes written during the 241 + * last call if the whole volume update has been successfully finished, and a 242 * negative error code in case of failure. 243 */ 244 + int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, 245 const void __user *buf, int count) 246 { 247 uint64_t tmp; 248 int lnum, offs, err = 0, len, to_write = count; 249 250 dbg_msg("write %d of %lld bytes, %lld already passed", ··· 290 * is the last chunk, it's time to flush the buffer. 291 */ 292 ubi_assert(flush_len <= vol->usable_leb_size); 293 + err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len, 294 + vol->upd_ebs); 295 if (err) 296 return err; 297 } ··· 318 319 if (len == vol->usable_leb_size || 320 vol->upd_received + len == vol->upd_bytes) { 321 + err = write_leb(ubi, vol, lnum, vol->upd_buf, 322 + len, vol->upd_ebs); 323 if (err) 324 break; 325 } ··· 333 ubi_assert(vol->upd_received <= vol->upd_bytes); 334 if (vol->upd_received == vol->upd_bytes) { 335 /* The update is finished, clear the update marker */ 336 + err = clear_update_marker(ubi, vol, vol->upd_bytes); 337 if (err) 338 return err; 339 err = ubi_wl_flush(ubi); 340 if (err == 0) { 341 + vol->updating = 0; 342 err = to_write; 343 vfree(vol->upd_buf); 344 } 345 + } 346 + 347 + return err; 348 + } 349 + 350 + /** 351 + * ubi_more_leb_change_data - accept more data for atomic LEB change. 352 + * @vol: volume description object 353 + * @buf: write data (user-space memory buffer) 354 + * @count: how much bytes to write 355 + * 356 + * This function accepts more data to the volume which is being under the 357 + * "atomic LEB change" operation. It may be called arbitrary number of times 358 + * until all data arrives. This function returns %0 in case of success, number 359 + * of bytes written during the last call if the whole "atomic LEB change" 360 + * operation has been successfully finished, and a negative error code in case 361 + * of failure. 362 + */ 363 + int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, 364 + const void __user *buf, int count) 365 + { 366 + int err; 367 + 368 + dbg_msg("write %d of %lld bytes, %lld already passed", 369 + count, vol->upd_bytes, vol->upd_received); 370 + 371 + if (ubi->ro_mode) 372 + return -EROFS; 373 + 374 + if (vol->upd_received + count > vol->upd_bytes) 375 + count = vol->upd_bytes - vol->upd_received; 376 + 377 + err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count); 378 + if (err) 379 + return -EFAULT; 380 + 381 + vol->upd_received += count; 382 + 383 + if (vol->upd_received == vol->upd_bytes) { 384 + int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 385 + 386 + memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); 387 + len = ubi_calc_data_len(ubi, vol->upd_buf, len); 388 + err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 389 + vol->upd_buf, len, UBI_UNKNOWN); 390 + if (err) 391 + return err; 392 + } 393 + 394 + ubi_assert(vol->upd_received <= vol->upd_bytes); 395 + if (vol->upd_received == vol->upd_bytes) { 396 + vol->changing_leb = 0; 397 + err = count; 398 + vfree(vol->upd_buf); 399 } 400 401 return err;
+116 -92
drivers/mtd/ubi/vmt.c
··· 63 * B. process 2 removes volume Y; 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 65 * 66 - * What we want to do in a situation like that is to return error when the file 67 - * is read. This is done by means of the 'removed' flag and the 'vol_lock' of 68 - * the UBI volume description object. 69 */ 70 static ssize_t vol_attribute_show(struct device *dev, 71 struct device_attribute *attr, char *buf) 72 { 73 int ret; 74 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 75 76 - spin_lock(&vol->ubi->volumes_lock); 77 - if (vol->removed) { 78 - spin_unlock(&vol->ubi->volumes_lock); 79 return -ENODEV; 80 } 81 if (attr == &attr_vol_reserved_ebs) 82 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 83 else if (attr == &attr_vol_type) { ··· 103 ret = sprintf(buf, "%d\n", vol->corrupted); 104 else if (attr == &attr_vol_alignment) 105 ret = sprintf(buf, "%d\n", vol->alignment); 106 - else if (attr == &attr_vol_usable_eb_size) { 107 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 108 - } else if (attr == &attr_vol_data_bytes) 109 ret = sprintf(buf, "%lld\n", vol->used_bytes); 110 else if (attr == &attr_vol_upd_marker) 111 ret = sprintf(buf, "%d\n", vol->upd_marker); 112 else 113 - BUG(); 114 - spin_unlock(&vol->ubi->volumes_lock); 115 return ret; 116 } 117 ··· 126 static void vol_release(struct device *dev) 127 { 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 129 - ubi_assert(vol->removed); 130 kfree(vol); 131 } 132 ··· 168 if (err) 169 return err; 170 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 171 - if (err) 172 - return err; 173 - return 0; 174 } 175 176 /** ··· 194 * @req: volume creation request 195 * 196 * This function creates volume described by @req. If @req->vol_id id 197 - * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume 198 * and saves it in @req->vol_id. Returns zero in case of success and a negative 199 - * error code in case of failure. 200 */ 201 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 202 { 203 - int i, err, vol_id = req->vol_id; 204 struct ubi_volume *vol; 205 struct ubi_vtbl_record vtbl_rec; 206 uint64_t bytes; 207 208 if (ubi->ro_mode) 209 return -EROFS; ··· 215 return -ENOMEM; 216 217 spin_lock(&ubi->volumes_lock); 218 - 219 if (vol_id == UBI_VOL_NUM_AUTO) { 220 /* Find unused volume ID */ 221 dbg_msg("search for vacant volume ID"); ··· 267 } 268 ubi->avail_pebs -= vol->reserved_pebs; 269 ubi->rsvd_pebs += vol->reserved_pebs; 270 271 vol->vol_id = vol_id; 272 vol->alignment = req->alignment; ··· 275 vol->vol_type = req->vol_type; 276 vol->name_len = req->name_len; 277 memcpy(vol->name, req->name, vol->name_len + 1); 278 - vol->exclusive = 1; 279 vol->ubi = ubi; 280 - ubi->volumes[vol_id] = vol; 281 - spin_unlock(&ubi->volumes_lock); 282 283 /* 284 * Finish all pending erases because there may be some LEBs belonging ··· 312 /* Register character device for the volume */ 313 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 314 vol->cdev.owner = THIS_MODULE; 315 - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); 316 if (err) { 317 - ubi_err("cannot add character device for volume %d", vol_id); 318 goto out_mapping; 319 } 320 ··· 325 326 vol->dev.release = vol_release; 327 vol->dev.parent = &ubi->dev; 328 - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 329 vol->dev.class = ubi_class; 330 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 331 err = device_register(&vol->dev); 332 - if (err) 333 goto out_gluebi; 334 335 err = volume_sysfs_init(ubi, vol); 336 if (err) ··· 356 goto out_sysfs; 357 358 spin_lock(&ubi->volumes_lock); 359 ubi->vol_count += 1; 360 - vol->exclusive = 0; 361 spin_unlock(&ubi->volumes_lock); 362 363 paranoid_check_volumes(ubi); 364 return 0; 365 366 out_gluebi: 367 - err = ubi_destroy_gluebi(vol); 368 out_cdev: 369 cdev_del(&vol->cdev); 370 out_mapping: ··· 385 spin_lock(&ubi->volumes_lock); 386 ubi->rsvd_pebs -= vol->reserved_pebs; 387 ubi->avail_pebs += vol->reserved_pebs; 388 - ubi->volumes[vol_id] = NULL; 389 out_unlock: 390 spin_unlock(&ubi->volumes_lock); 391 - kfree(vol); 392 - return err; 393 - 394 - /* 395 - * We are registered, so @vol is destroyed in the release function and 396 - * we have to de-initialize differently. 397 - */ 398 - out_sysfs: 399 - err = ubi_destroy_gluebi(vol); 400 - cdev_del(&vol->cdev); 401 - kfree(vol->eba_tbl); 402 - spin_lock(&ubi->volumes_lock); 403 - ubi->rsvd_pebs -= vol->reserved_pebs; 404 - ubi->avail_pebs += vol->reserved_pebs; 405 - ubi->volumes[vol_id] = NULL; 406 - spin_unlock(&ubi->volumes_lock); 407 - volume_sysfs_close(vol); 408 return err; 409 } 410 ··· 401 * 402 * This function removes volume described by @desc. The volume has to be opened 403 * in "exclusive" mode. Returns zero in case of success and a negative error 404 - * code in case of failure. 405 */ 406 int ubi_remove_volume(struct ubi_volume_desc *desc) 407 { ··· 417 if (ubi->ro_mode) 418 return -EROFS; 419 420 err = ubi_destroy_gluebi(vol); 421 if (err) 422 - return err; 423 424 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 425 if (err) 426 - return err; 427 428 for (i = 0; i < vol->reserved_pebs; i++) { 429 - err = ubi_eba_unmap_leb(ubi, vol_id, i); 430 if (err) 431 - return err; 432 } 433 - 434 - spin_lock(&ubi->volumes_lock); 435 - vol->removed = 1; 436 - ubi->volumes[vol_id] = NULL; 437 - spin_unlock(&ubi->volumes_lock); 438 439 kfree(vol->eba_tbl); 440 vol->eba_tbl = NULL; 441 cdev_del(&vol->cdev); 442 volume_sysfs_close(vol); 443 - kfree(desc); 444 445 spin_lock(&ubi->volumes_lock); 446 ubi->rsvd_pebs -= reserved_pebs; ··· 464 spin_unlock(&ubi->volumes_lock); 465 466 paranoid_check_volumes(ubi); 467 - module_put(THIS_MODULE); 468 return 0; 469 } 470 471 /** ··· 480 * @desc: volume descriptor 481 * @reserved_pebs: new size in physical eraseblocks 482 * 483 - * This function returns zero in case of success, and a negative error code in 484 - * case of failure. 485 */ 486 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 487 { ··· 497 498 dbg_msg("re-size volume %d to from %d to %d PEBs", 499 vol_id, vol->reserved_pebs, reserved_pebs); 500 - ubi_assert(desc->mode == UBI_EXCLUSIVE); 501 - ubi_assert(vol == ubi->volumes[vol_id]); 502 503 if (vol->vol_type == UBI_STATIC_VOLUME && 504 reserved_pebs < vol->used_ebs) { ··· 515 516 for (i = 0; i < reserved_pebs; i++) 517 new_mapping[i] = UBI_LEB_UNMAPPED; 518 519 /* Reserve physical eraseblocks */ 520 pebs = reserved_pebs - vol->reserved_pebs; ··· 553 554 if (pebs < 0) { 555 for (i = 0; i < -pebs; i++) { 556 - err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); 557 if (err) 558 goto out_acc; 559 } ··· 602 /** 603 * ubi_add_volume - add volume. 604 * @ubi: UBI device description object 605 - * @vol_id: volume ID 606 * 607 - * This function adds an existin volume and initializes all its data 608 - * structures. Returnes zero in case of success and a negative error code in 609 * case of failure. 610 */ 611 - int ubi_add_volume(struct ubi_device *ubi, int vol_id) 612 { 613 - int err; 614 - struct ubi_volume *vol = ubi->volumes[vol_id]; 615 616 dbg_msg("add volume %d", vol_id); 617 ubi_dbg_dump_vol_info(vol); 618 - ubi_assert(vol); 619 620 /* Register character device for the volume */ 621 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 622 vol->cdev.owner = THIS_MODULE; 623 - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); 624 if (err) { 625 - ubi_err("cannot add character device for volume %d", vol_id); 626 return err; 627 } 628 ··· 633 634 vol->dev.release = vol_release; 635 vol->dev.parent = &ubi->dev; 636 - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 637 vol->dev.class = ubi_class; 638 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 639 err = device_register(&vol->dev); ··· 661 /** 662 * ubi_free_volume - free volume. 663 * @ubi: UBI device description object 664 - * @vol_id: volume ID 665 * 666 - * This function frees all resources for volume @vol_id but does not remove it. 667 * Used only when the UBI device is detached. 668 */ 669 - void ubi_free_volume(struct ubi_device *ubi, int vol_id) 670 { 671 int err; 672 - struct ubi_volume *vol = ubi->volumes[vol_id]; 673 674 - dbg_msg("free volume %d", vol_id); 675 - ubi_assert(vol); 676 677 - vol->removed = 1; 678 err = ubi_destroy_gluebi(vol); 679 - ubi->volumes[vol_id] = NULL; 680 cdev_del(&vol->cdev); 681 volume_sysfs_close(vol); 682 } ··· 743 goto fail; 744 } 745 746 - if (vol->upd_marker != 0 && vol->upd_marker != 1) { 747 - ubi_err("bad upd_marker"); 748 - goto fail; 749 - } 750 - 751 if (vol->upd_marker && vol->corrupted) { 752 dbg_err("update marker and corrupted simultaneously"); 753 goto fail; ··· 777 778 n = (long long)vol->used_ebs * vol->usable_leb_size; 779 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 780 - if (vol->corrupted != 0) { 781 ubi_err("corrupted dynamic volume"); 782 goto fail; 783 } ··· 794 goto fail; 795 } 796 } else { 797 - if (vol->corrupted != 0 && vol->corrupted != 1) { 798 - ubi_err("bad corrupted"); 799 - goto fail; 800 - } 801 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 802 ubi_err("bad used_ebs"); 803 goto fail; ··· 846 { 847 int i; 848 849 - mutex_lock(&ubi->vtbl_mutex); 850 for (i = 0; i < ubi->vtbl_slots; i++) 851 paranoid_check_volume(ubi, i); 852 - mutex_unlock(&ubi->vtbl_mutex); 853 } 854 #endif
··· 63 * B. process 2 removes volume Y; 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 65 * 66 + * In this situation, this function will return %-ENODEV because it will find 67 + * out that the volume was removed from the @ubi->volumes array. 68 */ 69 static ssize_t vol_attribute_show(struct device *dev, 70 struct device_attribute *attr, char *buf) 71 { 72 int ret; 73 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 74 + struct ubi_device *ubi; 75 76 + ubi = ubi_get_device(vol->ubi->ubi_num); 77 + if (!ubi) 78 + return -ENODEV; 79 + 80 + spin_lock(&ubi->volumes_lock); 81 + if (!ubi->volumes[vol->vol_id]) { 82 + spin_unlock(&ubi->volumes_lock); 83 + ubi_put_device(ubi); 84 return -ENODEV; 85 } 86 + /* Take a reference to prevent volume removal */ 87 + vol->ref_count += 1; 88 + spin_unlock(&ubi->volumes_lock); 89 + 90 if (attr == &attr_vol_reserved_ebs) 91 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 92 else if (attr == &attr_vol_type) { ··· 94 ret = sprintf(buf, "%d\n", vol->corrupted); 95 else if (attr == &attr_vol_alignment) 96 ret = sprintf(buf, "%d\n", vol->alignment); 97 + else if (attr == &attr_vol_usable_eb_size) 98 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 99 + else if (attr == &attr_vol_data_bytes) 100 ret = sprintf(buf, "%lld\n", vol->used_bytes); 101 else if (attr == &attr_vol_upd_marker) 102 ret = sprintf(buf, "%d\n", vol->upd_marker); 103 else 104 + /* This must be a bug */ 105 + ret = -EINVAL; 106 + 107 + /* We've done the operation, drop volume and UBI device references */ 108 + spin_lock(&ubi->volumes_lock); 109 + vol->ref_count -= 1; 110 + ubi_assert(vol->ref_count >= 0); 111 + spin_unlock(&ubi->volumes_lock); 112 + ubi_put_device(ubi); 113 return ret; 114 } 115 ··· 110 static void vol_release(struct device *dev) 111 { 112 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 113 + 114 kfree(vol); 115 } 116 ··· 152 if (err) 153 return err; 154 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 155 + return err; 156 } 157 158 /** ··· 180 * @req: volume creation request 181 * 182 * This function creates volume described by @req. If @req->vol_id id 183 + * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume 184 * and saves it in @req->vol_id. Returns zero in case of success and a negative 185 + * error code in case of failure. Note, the caller has to have the 186 + * @ubi->volumes_mutex locked. 187 */ 188 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 189 { 190 + int i, err, vol_id = req->vol_id, dont_free = 0; 191 struct ubi_volume *vol; 192 struct ubi_vtbl_record vtbl_rec; 193 uint64_t bytes; 194 + dev_t dev; 195 196 if (ubi->ro_mode) 197 return -EROFS; ··· 199 return -ENOMEM; 200 201 spin_lock(&ubi->volumes_lock); 202 if (vol_id == UBI_VOL_NUM_AUTO) { 203 /* Find unused volume ID */ 204 dbg_msg("search for vacant volume ID"); ··· 252 } 253 ubi->avail_pebs -= vol->reserved_pebs; 254 ubi->rsvd_pebs += vol->reserved_pebs; 255 + spin_unlock(&ubi->volumes_lock); 256 257 vol->vol_id = vol_id; 258 vol->alignment = req->alignment; ··· 259 vol->vol_type = req->vol_type; 260 vol->name_len = req->name_len; 261 memcpy(vol->name, req->name, vol->name_len + 1); 262 vol->ubi = ubi; 263 264 /* 265 * Finish all pending erases because there may be some LEBs belonging ··· 299 /* Register character device for the volume */ 300 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 301 vol->cdev.owner = THIS_MODULE; 302 + dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); 303 + err = cdev_add(&vol->cdev, dev, 1); 304 if (err) { 305 + ubi_err("cannot add character device"); 306 goto out_mapping; 307 } 308 ··· 311 312 vol->dev.release = vol_release; 313 vol->dev.parent = &ubi->dev; 314 + vol->dev.devt = dev; 315 vol->dev.class = ubi_class; 316 + 317 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 318 err = device_register(&vol->dev); 319 + if (err) { 320 + ubi_err("cannot register device"); 321 goto out_gluebi; 322 + } 323 324 err = volume_sysfs_init(ubi, vol); 325 if (err) ··· 339 goto out_sysfs; 340 341 spin_lock(&ubi->volumes_lock); 342 + ubi->volumes[vol_id] = vol; 343 ubi->vol_count += 1; 344 spin_unlock(&ubi->volumes_lock); 345 346 paranoid_check_volumes(ubi); 347 return 0; 348 349 + out_sysfs: 350 + /* 351 + * We have registered our device, we should not free the volume* 352 + * description object in this function in case of an error - it is 353 + * freed by the release function. 354 + * 355 + * Get device reference to prevent the release function from being 356 + * called just after sysfs has been closed. 357 + */ 358 + dont_free = 1; 359 + get_device(&vol->dev); 360 + volume_sysfs_close(vol); 361 out_gluebi: 362 + ubi_destroy_gluebi(vol); 363 out_cdev: 364 cdev_del(&vol->cdev); 365 out_mapping: ··· 356 spin_lock(&ubi->volumes_lock); 357 ubi->rsvd_pebs -= vol->reserved_pebs; 358 ubi->avail_pebs += vol->reserved_pebs; 359 out_unlock: 360 spin_unlock(&ubi->volumes_lock); 361 + if (dont_free) 362 + put_device(&vol->dev); 363 + else 364 + kfree(vol); 365 + ubi_err("cannot create volume %d, error %d", vol_id, err); 366 return err; 367 } 368 ··· 385 * 386 * This function removes volume described by @desc. The volume has to be opened 387 * in "exclusive" mode. Returns zero in case of success and a negative error 388 + * code in case of failure. The caller has to have the @ubi->volumes_mutex 389 + * locked. 390 */ 391 int ubi_remove_volume(struct ubi_volume_desc *desc) 392 { ··· 400 if (ubi->ro_mode) 401 return -EROFS; 402 403 + spin_lock(&ubi->volumes_lock); 404 + if (vol->ref_count > 1) { 405 + /* 406 + * The volume is busy, probably someone is reading one of its 407 + * sysfs files. 408 + */ 409 + err = -EBUSY; 410 + goto out_unlock; 411 + } 412 + ubi->volumes[vol_id] = NULL; 413 + spin_unlock(&ubi->volumes_lock); 414 + 415 err = ubi_destroy_gluebi(vol); 416 if (err) 417 + goto out_err; 418 419 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 420 if (err) 421 + goto out_err; 422 423 for (i = 0; i < vol->reserved_pebs; i++) { 424 + err = ubi_eba_unmap_leb(ubi, vol, i); 425 if (err) 426 + goto out_err; 427 } 428 429 kfree(vol->eba_tbl); 430 vol->eba_tbl = NULL; 431 cdev_del(&vol->cdev); 432 volume_sysfs_close(vol); 433 434 spin_lock(&ubi->volumes_lock); 435 ubi->rsvd_pebs -= reserved_pebs; ··· 441 spin_unlock(&ubi->volumes_lock); 442 443 paranoid_check_volumes(ubi); 444 return 0; 445 + 446 + out_err: 447 + ubi_err("cannot remove volume %d, error %d", vol_id, err); 448 + spin_lock(&ubi->volumes_lock); 449 + ubi->volumes[vol_id] = vol; 450 + out_unlock: 451 + spin_unlock(&ubi->volumes_lock); 452 + return err; 453 } 454 455 /** ··· 450 * @desc: volume descriptor 451 * @reserved_pebs: new size in physical eraseblocks 452 * 453 + * This function re-sizes the volume and returns zero in case of success, and a 454 + * negative error code in case of failure. The caller has to have the 455 + * @ubi->volumes_mutex locked. 456 */ 457 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 458 { ··· 466 467 dbg_msg("re-size volume %d to from %d to %d PEBs", 468 vol_id, vol->reserved_pebs, reserved_pebs); 469 470 if (vol->vol_type == UBI_STATIC_VOLUME && 471 reserved_pebs < vol->used_ebs) { ··· 486 487 for (i = 0; i < reserved_pebs; i++) 488 new_mapping[i] = UBI_LEB_UNMAPPED; 489 + 490 + spin_lock(&ubi->volumes_lock); 491 + if (vol->ref_count > 1) { 492 + spin_unlock(&ubi->volumes_lock); 493 + err = -EBUSY; 494 + goto out_free; 495 + } 496 + spin_unlock(&ubi->volumes_lock); 497 498 /* Reserve physical eraseblocks */ 499 pebs = reserved_pebs - vol->reserved_pebs; ··· 516 517 if (pebs < 0) { 518 for (i = 0; i < -pebs; i++) { 519 + err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); 520 if (err) 521 goto out_acc; 522 } ··· 565 /** 566 * ubi_add_volume - add volume. 567 * @ubi: UBI device description object 568 + * @vol: volume description object 569 * 570 + * This function adds an existing volume and initializes all its data 571 + * structures. Returns zero in case of success and a negative error code in 572 * case of failure. 573 */ 574 + int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) 575 { 576 + int err, vol_id = vol->vol_id; 577 + dev_t dev; 578 579 dbg_msg("add volume %d", vol_id); 580 ubi_dbg_dump_vol_info(vol); 581 582 /* Register character device for the volume */ 583 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 584 vol->cdev.owner = THIS_MODULE; 585 + dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); 586 + err = cdev_add(&vol->cdev, dev, 1); 587 if (err) { 588 + ubi_err("cannot add character device for volume %d, error %d", 589 + vol_id, err); 590 return err; 591 } 592 ··· 595 596 vol->dev.release = vol_release; 597 vol->dev.parent = &ubi->dev; 598 + vol->dev.devt = dev; 599 vol->dev.class = ubi_class; 600 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 601 err = device_register(&vol->dev); ··· 623 /** 624 * ubi_free_volume - free volume. 625 * @ubi: UBI device description object 626 + * @vol: volume description object 627 * 628 + * This function frees all resources for volume @vol but does not remove it. 629 * Used only when the UBI device is detached. 630 */ 631 + void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) 632 { 633 int err; 634 635 + dbg_msg("free volume %d", vol->vol_id); 636 637 + ubi->volumes[vol->vol_id] = NULL; 638 err = ubi_destroy_gluebi(vol); 639 cdev_del(&vol->cdev); 640 volume_sysfs_close(vol); 641 } ··· 708 goto fail; 709 } 710 711 if (vol->upd_marker && vol->corrupted) { 712 dbg_err("update marker and corrupted simultaneously"); 713 goto fail; ··· 747 748 n = (long long)vol->used_ebs * vol->usable_leb_size; 749 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 750 + if (vol->corrupted) { 751 ubi_err("corrupted dynamic volume"); 752 goto fail; 753 } ··· 764 goto fail; 765 } 766 } else { 767 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 768 ubi_err("bad used_ebs"); 769 goto fail; ··· 820 { 821 int i; 822 823 for (i = 0; i < ubi->vtbl_slots; i++) 824 paranoid_check_volume(ubi, i); 825 } 826 #endif
+27 -18
drivers/mtd/ubi/vtbl.c
··· 86 { 87 int i, err; 88 uint32_t crc; 89 90 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 91 92 if (!vtbl_rec) 93 vtbl_rec = &empty_vtbl_record; ··· 98 vtbl_rec->crc = cpu_to_be32(crc); 99 } 100 101 - mutex_lock(&ubi->vtbl_mutex); 102 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 103 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 104 - err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); 105 - if (err) { 106 - mutex_unlock(&ubi->vtbl_mutex); 107 return err; 108 - } 109 - err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, 110 ubi->vtbl_size, UBI_LONGTERM); 111 - if (err) { 112 - mutex_unlock(&ubi->vtbl_mutex); 113 return err; 114 - } 115 } 116 117 paranoid_vtbl_check(ubi); 118 - mutex_unlock(&ubi->vtbl_mutex); 119 - return ubi_wl_flush(ubi); 120 } 121 122 /** 123 - * vol_til_check - check if volume table is not corrupted and contains sensible 124 - * data. 125 - * 126 * @ubi: UBI device description object 127 * @vtbl: volume table 128 * ··· 269 * this volume table copy was found during scanning. It has to be wiped 270 * out. 271 */ 272 - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 273 if (sv) 274 old_seb = ubi_scan_find_seb(sv, copy); 275 ··· 281 } 282 283 vid_hdr->vol_type = UBI_VID_DYNAMIC; 284 - vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID); 285 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 286 vid_hdr->data_size = vid_hdr->used_ebs = 287 vid_hdr->data_pad = cpu_to_be32(0); ··· 514 vol->name[vol->name_len] = '\0'; 515 vol->vol_id = i; 516 517 ubi_assert(!ubi->volumes[i]); 518 ubi->volumes[i] = vol; 519 ubi->vol_count += 1; ··· 575 vol->last_eb_bytes = sv->last_data_size; 576 } 577 578 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 579 if (!vol) 580 return -ENOMEM; ··· 590 vol->last_eb_bytes = vol->reserved_pebs; 591 vol->used_bytes = 592 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 593 - vol->vol_id = UBI_LAYOUT_VOL_ID; 594 595 ubi_assert(!ubi->volumes[i]); 596 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; ··· 743 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 744 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 745 746 - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 747 if (!sv) { 748 /* 749 * No logical eraseblocks belonging to the layout volume were
··· 86 { 87 int i, err; 88 uint32_t crc; 89 + struct ubi_volume *layout_vol; 90 91 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 92 + layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; 93 94 if (!vtbl_rec) 95 vtbl_rec = &empty_vtbl_record; ··· 96 vtbl_rec->crc = cpu_to_be32(crc); 97 } 98 99 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 100 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 101 + err = ubi_eba_unmap_leb(ubi, layout_vol, i); 102 + if (err) 103 return err; 104 + 105 + err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, 106 ubi->vtbl_size, UBI_LONGTERM); 107 + if (err) 108 return err; 109 } 110 111 paranoid_vtbl_check(ubi); 112 + return 0; 113 } 114 115 /** 116 + * vtbl_check - check if volume table is not corrupted and contains sensible 117 + * data. 118 * @ubi: UBI device description object 119 * @vtbl: volume table 120 * ··· 273 * this volume table copy was found during scanning. It has to be wiped 274 * out. 275 */ 276 + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); 277 if (sv) 278 old_seb = ubi_scan_find_seb(sv, copy); 279 ··· 285 } 286 287 vid_hdr->vol_type = UBI_VID_DYNAMIC; 288 + vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); 289 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 290 vid_hdr->data_size = vid_hdr->used_ebs = 291 vid_hdr->data_pad = cpu_to_be32(0); ··· 518 vol->name[vol->name_len] = '\0'; 519 vol->vol_id = i; 520 521 + if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 522 + /* Auto re-size flag may be set only for one volume */ 523 + if (ubi->autoresize_vol_id != -1) { 524 + ubi_err("more then one auto-resize volume (%d " 525 + "and %d)", ubi->autoresize_vol_id, i); 526 + return -EINVAL; 527 + } 528 + 529 + ubi->autoresize_vol_id = i; 530 + } 531 + 532 ubi_assert(!ubi->volumes[i]); 533 ubi->volumes[i] = vol; 534 ubi->vol_count += 1; ··· 568 vol->last_eb_bytes = sv->last_data_size; 569 } 570 571 + /* And add the layout volume */ 572 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 573 if (!vol) 574 return -ENOMEM; ··· 582 vol->last_eb_bytes = vol->reserved_pebs; 583 vol->used_bytes = 584 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 585 + vol->vol_id = UBI_LAYOUT_VOLUME_ID; 586 + vol->ref_count = 1; 587 588 ubi_assert(!ubi->volumes[i]); 589 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; ··· 734 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 735 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 736 737 + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); 738 if (!sv) { 739 /* 740 * No logical eraseblocks belonging to the layout volume were
+184 -154
drivers/mtd/ubi/wl.c
··· 117 #define WL_MAX_FAILURES 32 118 119 /** 120 - * struct ubi_wl_entry - wear-leveling entry. 121 - * @rb: link in the corresponding RB-tree 122 - * @ec: erase counter 123 - * @pnum: physical eraseblock number 124 - * 125 - * Each physical eraseblock has a corresponding &struct wl_entry object which 126 - * may be kept in different RB-trees. 127 - */ 128 - struct ubi_wl_entry { 129 - struct rb_node rb; 130 - int ec; 131 - int pnum; 132 - }; 133 - 134 - /** 135 * struct ubi_wl_prot_entry - PEB protection entry. 136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 137 * @rb_aec: link in the @wl->prot.aec RB-tree ··· 201 #define paranoid_check_in_wl_tree(e, root) 202 #endif 203 204 - /* Slab cache for wear-leveling entries */ 205 - static struct kmem_cache *wl_entries_slab; 206 - 207 /** 208 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 209 * @e: the wear-leveling entry to add ··· 249 int err; 250 struct ubi_work *wrk; 251 252 - spin_lock(&ubi->wl_lock); 253 254 if (list_empty(&ubi->works)) { 255 spin_unlock(&ubi->wl_lock); 256 return 0; 257 } 258 259 wrk = list_entry(ubi->works.next, struct ubi_work, list); 260 list_del(&wrk->list); 261 spin_unlock(&ubi->wl_lock); 262 263 /* ··· 279 err = wrk->func(ubi, wrk, 0); 280 if (err) 281 ubi_err("work failed with error code %d", err); 282 283 - spin_lock(&ubi->wl_lock); 284 - ubi->works_count -= 1; 285 - ubi_assert(ubi->works_count >= 0); 286 - spin_unlock(&ubi->wl_lock); 287 return err; 288 } 289 ··· 539 * prot_tree_del - remove a physical eraseblock from the protection trees 540 * @ubi: UBI device description object 541 * @pnum: the physical eraseblock to remove 542 */ 543 - static void prot_tree_del(struct ubi_device *ubi, int pnum) 544 { 545 struct rb_node *p; 546 struct ubi_wl_prot_entry *pe = NULL; ··· 555 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 556 557 if (pnum == pe->e->pnum) 558 - break; 559 560 if (pnum < pe->e->pnum) 561 p = p->rb_left; ··· 563 p = p->rb_right; 564 } 565 566 ubi_assert(pe->e->pnum == pnum); 567 rb_erase(&pe->rb_aec, &ubi->prot.aec); 568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 569 kfree(pe); 570 } 571 572 /** ··· 742 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 743 int cancel) 744 { 745 - int err, put = 0; 746 struct ubi_wl_entry *e1, *e2; 747 struct ubi_vid_hdr *vid_hdr; 748 ··· 756 if (!vid_hdr) 757 return -ENOMEM; 758 759 spin_lock(&ubi->wl_lock); 760 761 - /* 762 - * Only one WL worker at a time is supported at this implementation, so 763 - * make sure a PEB is not being moved already. 764 - */ 765 - if (ubi->move_to || !ubi->free.rb_node || 766 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 767 /* 768 - * Only one WL worker at a time is supported at this 769 - * implementation, so if a LEB is already being moved, cancel. 770 - * 771 - * No free physical eraseblocks? Well, we cancel wear-leveling 772 - * then. It will be triggered again when a free physical 773 - * eraseblock appears. 774 * 775 * No used physical eraseblocks? They must be temporarily 776 * protected from being moved. They will be moved to the ··· 775 */ 776 dbg_wl("cancel WL, a list is empty: free %d, used %d", 777 !ubi->free.rb_node, !ubi->used.rb_node); 778 - ubi->wl_scheduled = 0; 779 - spin_unlock(&ubi->wl_lock); 780 - ubi_free_vid_hdr(ubi, vid_hdr); 781 - return 0; 782 } 783 784 if (!ubi->scrub.rb_node) { ··· 790 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 791 dbg_wl("no WL needed: min used EC %d, max free EC %d", 792 e1->ec, e2->ec); 793 - ubi->wl_scheduled = 0; 794 - spin_unlock(&ubi->wl_lock); 795 - ubi_free_vid_hdr(ubi, vid_hdr); 796 - return 0; 797 } 798 paranoid_check_in_wl_tree(e1, &ubi->used); 799 rb_erase(&e1->rb, &ubi->used); 800 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 801 e1->pnum, e1->ec, e2->pnum, e2->ec); 802 } else { 803 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 804 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 805 paranoid_check_in_wl_tree(e1, &ubi->scrub); 806 - rb_erase(&e1->rb, &ubi->scrub); 807 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 808 } 809 810 paranoid_check_in_wl_tree(e2, &ubi->free); 811 rb_erase(&e2->rb, &ubi->free); 812 - ubi_assert(!ubi->move_from && !ubi->move_to); 813 - ubi_assert(!ubi->move_to_put && !ubi->move_from_put); 814 ubi->move_from = e1; 815 ubi->move_to = e2; 816 spin_unlock(&ubi->wl_lock); ··· 817 * We so far do not know which logical eraseblock our physical 818 * eraseblock (@e1) belongs to. We have to read the volume identifier 819 * header first. 820 */ 821 822 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); ··· 835 * likely have the VID header in place. 836 */ 837 dbg_wl("PEB %d has no VID header", e1->pnum); 838 - err = 0; 839 - } else { 840 - ubi_err("error %d while reading VID header from PEB %d", 841 - err, e1->pnum); 842 - if (err > 0) 843 - err = -EIO; 844 } 845 - goto error; 846 } 847 848 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 849 if (err) { 850 - if (err == UBI_IO_BITFLIPS) 851 - err = 0; 852 - goto error; 853 } 854 855 ubi_free_vid_hdr(ubi, vid_hdr); 856 spin_lock(&ubi->wl_lock); 857 if (!ubi->move_to_put) 858 wl_tree_add(e2, &ubi->used); 859 else 860 put = 1; 861 ubi->move_from = ubi->move_to = NULL; 862 - ubi->move_from_put = ubi->move_to_put = 0; 863 - ubi->wl_scheduled = 0; 864 spin_unlock(&ubi->wl_lock); 865 866 if (put) { ··· 889 */ 890 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 891 err = schedule_erase(ubi, e2, 0); 892 - if (err) { 893 - kmem_cache_free(wl_entries_slab, e2); 894 - ubi_ro_mode(ubi); 895 - } 896 } 897 898 - err = schedule_erase(ubi, e1, 0); 899 - if (err) { 900 - kmem_cache_free(wl_entries_slab, e1); 901 - ubi_ro_mode(ubi); 902 } 903 904 dbg_wl("done"); 905 - return err; 906 907 /* 908 - * Some error occurred. @e1 was not changed, so return it back. @e2 909 - * might be changed, schedule it for erasure. 910 */ 911 - error: 912 - if (err) 913 - dbg_wl("error %d occurred, cancel operation", err); 914 - ubi_assert(err <= 0); 915 - 916 ubi_free_vid_hdr(ubi, vid_hdr); 917 spin_lock(&ubi->wl_lock); 918 - ubi->wl_scheduled = 0; 919 - if (ubi->move_from_put) 920 - put = 1; 921 else 922 wl_tree_add(e1, &ubi->used); 923 ubi->move_from = ubi->move_to = NULL; 924 - ubi->move_from_put = ubi->move_to_put = 0; 925 spin_unlock(&ubi->wl_lock); 926 927 - if (put) { 928 - /* 929 - * Well, the target PEB was put meanwhile, schedule it for 930 - * erasure. 931 - */ 932 - dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); 933 - err = schedule_erase(ubi, e1, 0); 934 - if (err) { 935 - kmem_cache_free(wl_entries_slab, e1); 936 - ubi_ro_mode(ubi); 937 - } 938 - } 939 - 940 err = schedule_erase(ubi, e2, 0); 941 - if (err) { 942 - kmem_cache_free(wl_entries_slab, e2); 943 - ubi_ro_mode(ubi); 944 - } 945 946 - yield(); 947 return err; 948 } 949 950 /** ··· 1037 if (cancel) { 1038 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1039 kfree(wl_wrk); 1040 - kmem_cache_free(wl_entries_slab, e); 1041 return 0; 1042 } 1043 ··· 1066 1067 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1068 kfree(wl_wrk); 1069 - kmem_cache_free(wl_entries_slab, e); 1070 1071 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1072 err == -EBUSY) { ··· 1136 } 1137 1138 /** 1139 - * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1140 - * unit. 1141 * @ubi: UBI device description object 1142 * @pnum: physical eraseblock to return 1143 * @torture: if this physical eraseblock has to be tortured ··· 1144 * This function is called to return physical eraseblock @pnum to the pool of 1145 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1146 * occurred to this @pnum and it has to be tested. This function returns zero 1147 - * in case of success and a negative error code in case of failure. 1148 */ 1149 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1150 { ··· 1155 ubi_assert(pnum >= 0); 1156 ubi_assert(pnum < ubi->peb_count); 1157 1158 spin_lock(&ubi->wl_lock); 1159 - 1160 e = ubi->lookuptbl[pnum]; 1161 if (e == ubi->move_from) { 1162 /* ··· 1164 * be moved. It will be scheduled for erasure in the 1165 * wear-leveling worker. 1166 */ 1167 - dbg_wl("PEB %d is being moved", pnum); 1168 - ubi_assert(!ubi->move_from_put); 1169 - ubi->move_from_put = 1; 1170 spin_unlock(&ubi->wl_lock); 1171 - return 0; 1172 } else if (e == ubi->move_to) { 1173 /* 1174 * User is putting the physical eraseblock which was selected 1175 * as the target the data is moved to. It may happen if the EBA 1176 - * unit already re-mapped the LEB but the WL unit did has not 1177 - * put the PEB to the "used" tree. 1178 */ 1179 dbg_wl("PEB %d is the target of data moving", pnum); 1180 ubi_assert(!ubi->move_to_put); ··· 1193 } else if (in_wl_tree(e, &ubi->scrub)) { 1194 paranoid_check_in_wl_tree(e, &ubi->scrub); 1195 rb_erase(&e->rb, &ubi->scrub); 1196 - } else 1197 - prot_tree_del(ubi, e->pnum); 1198 } 1199 spin_unlock(&ubi->wl_lock); 1200 ··· 1255 if (in_wl_tree(e, &ubi->used)) { 1256 paranoid_check_in_wl_tree(e, &ubi->used); 1257 rb_erase(&e->rb, &ubi->used); 1258 - } else 1259 - prot_tree_del(ubi, pnum); 1260 1261 wl_tree_add(e, &ubi->scrub); 1262 spin_unlock(&ubi->wl_lock); ··· 1286 */ 1287 int ubi_wl_flush(struct ubi_device *ubi) 1288 { 1289 - int err, pending_count; 1290 - 1291 - pending_count = ubi->works_count; 1292 - 1293 - dbg_wl("flush (%d pending works)", pending_count); 1294 1295 /* 1296 * Erase while the pending works queue is not empty, but not more then 1297 * the number of currently pending works. 1298 */ 1299 - while (pending_count-- > 0) { 1300 err = do_work(ubi); 1301 if (err) 1302 return err; ··· 1346 rb->rb_right = NULL; 1347 } 1348 1349 - kmem_cache_free(wl_entries_slab, e); 1350 } 1351 } 1352 } ··· 1355 * ubi_thread - UBI background thread. 1356 * @u: the UBI device description object pointer 1357 */ 1358 - static int ubi_thread(void *u) 1359 { 1360 int failures = 0; 1361 struct ubi_device *ubi = u; ··· 1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1448 spin_lock_init(&ubi->wl_lock); 1449 ubi->max_ec = si->max_ec; 1450 INIT_LIST_HEAD(&ubi->works); 1451 1452 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1453 1454 - ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 1455 - if (IS_ERR(ubi->bgt_thread)) { 1456 - err = PTR_ERR(ubi->bgt_thread); 1457 - ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 1458 - err); 1459 - return err; 1460 - } 1461 - 1462 - if (ubi_devices_cnt == 0) { 1463 - wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1464 - sizeof(struct ubi_wl_entry), 1465 - 0, 0, NULL); 1466 - if (!wl_entries_slab) 1467 - return -ENOMEM; 1468 - } 1469 - 1470 err = -ENOMEM; 1471 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1472 if (!ubi->lookuptbl) 1473 - goto out_free; 1474 1475 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1476 cond_resched(); 1477 1478 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1479 if (!e) 1480 goto out_free; 1481 ··· 1469 e->ec = seb->ec; 1470 ubi->lookuptbl[e->pnum] = e; 1471 if (schedule_erase(ubi, e, 0)) { 1472 - kmem_cache_free(wl_entries_slab, e); 1473 goto out_free; 1474 } 1475 } ··· 1477 list_for_each_entry(seb, &si->free, u.list) { 1478 cond_resched(); 1479 1480 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1481 if (!e) 1482 goto out_free; 1483 ··· 1491 list_for_each_entry(seb, &si->corr, u.list) { 1492 cond_resched(); 1493 1494 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1495 if (!e) 1496 goto out_free; 1497 ··· 1499 e->ec = seb->ec; 1500 ubi->lookuptbl[e->pnum] = e; 1501 if (schedule_erase(ubi, e, 0)) { 1502 - kmem_cache_free(wl_entries_slab, e); 1503 goto out_free; 1504 } 1505 } ··· 1508 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1509 cond_resched(); 1510 1511 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1512 if (!e) 1513 goto out_free; 1514 ··· 1548 tree_destroy(&ubi->free); 1549 tree_destroy(&ubi->scrub); 1550 kfree(ubi->lookuptbl); 1551 - if (ubi_devices_cnt == 0) 1552 - kmem_cache_destroy(wl_entries_slab); 1553 return err; 1554 } 1555 ··· 1577 rb->rb_right = NULL; 1578 } 1579 1580 - kmem_cache_free(wl_entries_slab, pe->e); 1581 kfree(pe); 1582 } 1583 } ··· 1589 */ 1590 void ubi_wl_close(struct ubi_device *ubi) 1591 { 1592 - dbg_wl("disable \"%s\"", ubi->bgt_name); 1593 - if (ubi->bgt_thread) 1594 - kthread_stop(ubi->bgt_thread); 1595 - 1596 dbg_wl("close the UBI wear-leveling unit"); 1597 1598 cancel_pending(ubi); ··· 1597 tree_destroy(&ubi->free); 1598 tree_destroy(&ubi->scrub); 1599 kfree(ubi->lookuptbl); 1600 - if (ubi_devices_cnt == 1) 1601 - kmem_cache_destroy(wl_entries_slab); 1602 } 1603 1604 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
··· 117 #define WL_MAX_FAILURES 32 118 119 /** 120 * struct ubi_wl_prot_entry - PEB protection entry. 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree 122 * @rb_aec: link in the @wl->prot.aec RB-tree ··· 216 #define paranoid_check_in_wl_tree(e, root) 217 #endif 218 219 /** 220 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 221 * @e: the wear-leveling entry to add ··· 267 int err; 268 struct ubi_work *wrk; 269 270 + cond_resched(); 271 272 + /* 273 + * @ubi->work_sem is used to synchronize with the workers. Workers take 274 + * it in read mode, so many of them may be doing works at a time. But 275 + * the queue flush code has to be sure the whole queue of works is 276 + * done, and it takes the mutex in write mode. 277 + */ 278 + down_read(&ubi->work_sem); 279 + spin_lock(&ubi->wl_lock); 280 if (list_empty(&ubi->works)) { 281 spin_unlock(&ubi->wl_lock); 282 + up_read(&ubi->work_sem); 283 return 0; 284 } 285 286 wrk = list_entry(ubi->works.next, struct ubi_work, list); 287 list_del(&wrk->list); 288 + ubi->works_count -= 1; 289 + ubi_assert(ubi->works_count >= 0); 290 spin_unlock(&ubi->wl_lock); 291 292 /* ··· 286 err = wrk->func(ubi, wrk, 0); 287 if (err) 288 ubi_err("work failed with error code %d", err); 289 + up_read(&ubi->work_sem); 290 291 return err; 292 } 293 ··· 549 * prot_tree_del - remove a physical eraseblock from the protection trees 550 * @ubi: UBI device description object 551 * @pnum: the physical eraseblock to remove 552 + * 553 + * This function returns PEB @pnum from the protection trees and returns zero 554 + * in case of success and %-ENODEV if the PEB was not found in the protection 555 + * trees. 556 */ 557 + static int prot_tree_del(struct ubi_device *ubi, int pnum) 558 { 559 struct rb_node *p; 560 struct ubi_wl_prot_entry *pe = NULL; ··· 561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 562 563 if (pnum == pe->e->pnum) 564 + goto found; 565 566 if (pnum < pe->e->pnum) 567 p = p->rb_left; ··· 569 p = p->rb_right; 570 } 571 572 + return -ENODEV; 573 + 574 + found: 575 ubi_assert(pe->e->pnum == pnum); 576 rb_erase(&pe->rb_aec, &ubi->prot.aec); 577 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 578 kfree(pe); 579 + return 0; 580 } 581 582 /** ··· 744 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 745 int cancel) 746 { 747 + int err, put = 0, scrubbing = 0, protect = 0; 748 + struct ubi_wl_prot_entry *uninitialized_var(pe); 749 struct ubi_wl_entry *e1, *e2; 750 struct ubi_vid_hdr *vid_hdr; 751 ··· 757 if (!vid_hdr) 758 return -ENOMEM; 759 760 + mutex_lock(&ubi->move_mutex); 761 spin_lock(&ubi->wl_lock); 762 + ubi_assert(!ubi->move_from && !ubi->move_to); 763 + ubi_assert(!ubi->move_to_put); 764 765 + if (!ubi->free.rb_node || 766 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 767 /* 768 + * No free physical eraseblocks? Well, they must be waiting in 769 + * the queue to be erased. Cancel movement - it will be 770 + * triggered again when a free physical eraseblock appears. 771 * 772 * No used physical eraseblocks? They must be temporarily 773 * protected from being moved. They will be moved to the ··· 780 */ 781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 782 !ubi->free.rb_node, !ubi->used.rb_node); 783 + goto out_cancel; 784 } 785 786 if (!ubi->scrub.rb_node) { ··· 798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 800 e1->ec, e2->ec); 801 + goto out_cancel; 802 } 803 paranoid_check_in_wl_tree(e1, &ubi->used); 804 rb_erase(&e1->rb, &ubi->used); 805 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 806 e1->pnum, e1->ec, e2->pnum, e2->ec); 807 } else { 808 + /* Perform scrubbing */ 809 + scrubbing = 1; 810 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 811 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 812 paranoid_check_in_wl_tree(e1, &ubi->scrub); 813 + rb_erase(&e1->rb, &ubi->scrub); 814 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 815 } 816 817 paranoid_check_in_wl_tree(e2, &ubi->free); 818 rb_erase(&e2->rb, &ubi->free); 819 ubi->move_from = e1; 820 ubi->move_to = e2; 821 spin_unlock(&ubi->wl_lock); ··· 828 * We so far do not know which logical eraseblock our physical 829 * eraseblock (@e1) belongs to. We have to read the volume identifier 830 * header first. 831 + * 832 + * Note, we are protected from this PEB being unmapped and erased. The 833 + * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB 834 + * which is being moved was unmapped. 835 */ 836 837 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); ··· 842 * likely have the VID header in place. 843 */ 844 dbg_wl("PEB %d has no VID header", e1->pnum); 845 + goto out_not_moved; 846 } 847 + 848 + ubi_err("error %d while reading VID header from PEB %d", 849 + err, e1->pnum); 850 + if (err > 0) 851 + err = -EIO; 852 + goto out_error; 853 } 854 855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 856 if (err) { 857 + 858 + if (err < 0) 859 + goto out_error; 860 + if (err == 1) 861 + goto out_not_moved; 862 + 863 + /* 864 + * For some reason the LEB was not moved - it might be because 865 + * the volume is being deleted. We should prevent this PEB from 866 + * being selected for wear-levelling movement for some "time", 867 + * so put it to the protection tree. 868 + */ 869 + 870 + dbg_wl("cancelled moving PEB %d", e1->pnum); 871 + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 872 + if (!pe) { 873 + err = -ENOMEM; 874 + goto out_error; 875 + } 876 + 877 + protect = 1; 878 } 879 880 ubi_free_vid_hdr(ubi, vid_hdr); 881 spin_lock(&ubi->wl_lock); 882 + if (protect) 883 + prot_tree_add(ubi, e1, pe, protect); 884 if (!ubi->move_to_put) 885 wl_tree_add(e2, &ubi->used); 886 else 887 put = 1; 888 ubi->move_from = ubi->move_to = NULL; 889 + ubi->move_to_put = ubi->wl_scheduled = 0; 890 spin_unlock(&ubi->wl_lock); 891 892 if (put) { ··· 877 */ 878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 879 err = schedule_erase(ubi, e2, 0); 880 + if (err) 881 + goto out_error; 882 } 883 884 + if (!protect) { 885 + err = schedule_erase(ubi, e1, 0); 886 + if (err) 887 + goto out_error; 888 } 889 + 890 891 dbg_wl("done"); 892 + mutex_unlock(&ubi->move_mutex); 893 + return 0; 894 895 /* 896 + * For some reasons the LEB was not moved, might be an error, might be 897 + * something else. @e1 was not changed, so return it back. @e2 might 898 + * be changed, schedule it for erasure. 899 */ 900 + out_not_moved: 901 ubi_free_vid_hdr(ubi, vid_hdr); 902 spin_lock(&ubi->wl_lock); 903 + if (scrubbing) 904 + wl_tree_add(e1, &ubi->scrub); 905 else 906 wl_tree_add(e1, &ubi->used); 907 ubi->move_from = ubi->move_to = NULL; 908 + ubi->move_to_put = ubi->wl_scheduled = 0; 909 spin_unlock(&ubi->wl_lock); 910 911 err = schedule_erase(ubi, e2, 0); 912 + if (err) 913 + goto out_error; 914 915 + mutex_unlock(&ubi->move_mutex); 916 + return 0; 917 + 918 + out_error: 919 + ubi_err("error %d while moving PEB %d to PEB %d", 920 + err, e1->pnum, e2->pnum); 921 + 922 + ubi_free_vid_hdr(ubi, vid_hdr); 923 + spin_lock(&ubi->wl_lock); 924 + ubi->move_from = ubi->move_to = NULL; 925 + ubi->move_to_put = ubi->wl_scheduled = 0; 926 + spin_unlock(&ubi->wl_lock); 927 + 928 + kmem_cache_free(ubi_wl_entry_slab, e1); 929 + kmem_cache_free(ubi_wl_entry_slab, e2); 930 + ubi_ro_mode(ubi); 931 + 932 + mutex_unlock(&ubi->move_mutex); 933 return err; 934 + 935 + out_cancel: 936 + ubi->wl_scheduled = 0; 937 + spin_unlock(&ubi->wl_lock); 938 + mutex_unlock(&ubi->move_mutex); 939 + ubi_free_vid_hdr(ubi, vid_hdr); 940 + return 0; 941 } 942 943 /** ··· 1020 if (cancel) { 1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1022 kfree(wl_wrk); 1023 + kmem_cache_free(ubi_wl_entry_slab, e); 1024 return 0; 1025 } 1026 ··· 1049 1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1051 kfree(wl_wrk); 1052 + kmem_cache_free(ubi_wl_entry_slab, e); 1053 1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1055 err == -EBUSY) { ··· 1119 } 1120 1121 /** 1122 + * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. 1123 * @ubi: UBI device description object 1124 * @pnum: physical eraseblock to return 1125 * @torture: if this physical eraseblock has to be tortured ··· 1128 * This function is called to return physical eraseblock @pnum to the pool of 1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1130 * occurred to this @pnum and it has to be tested. This function returns zero 1131 + * in case of success, and a negative error code in case of failure. 1132 */ 1133 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1134 { ··· 1139 ubi_assert(pnum >= 0); 1140 ubi_assert(pnum < ubi->peb_count); 1141 1142 + retry: 1143 spin_lock(&ubi->wl_lock); 1144 e = ubi->lookuptbl[pnum]; 1145 if (e == ubi->move_from) { 1146 /* ··· 1148 * be moved. It will be scheduled for erasure in the 1149 * wear-leveling worker. 1150 */ 1151 + dbg_wl("PEB %d is being moved, wait", pnum); 1152 spin_unlock(&ubi->wl_lock); 1153 + 1154 + /* Wait for the WL worker by taking the @ubi->move_mutex */ 1155 + mutex_lock(&ubi->move_mutex); 1156 + mutex_unlock(&ubi->move_mutex); 1157 + goto retry; 1158 } else if (e == ubi->move_to) { 1159 /* 1160 * User is putting the physical eraseblock which was selected 1161 * as the target the data is moved to. It may happen if the EBA 1162 + * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but 1163 + * the WL unit has not put the PEB to the "used" tree yet, but 1164 + * it is about to do this. So we just set a flag which will 1165 + * tell the WL worker that the PEB is not needed anymore and 1166 + * should be scheduled for erasure. 1167 */ 1168 dbg_wl("PEB %d is the target of data moving", pnum); 1169 ubi_assert(!ubi->move_to_put); ··· 1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1174 rb_erase(&e->rb, &ubi->scrub); 1175 + } else { 1176 + err = prot_tree_del(ubi, e->pnum); 1177 + if (err) { 1178 + ubi_err("PEB %d not found", pnum); 1179 + ubi_ro_mode(ubi); 1180 + spin_unlock(&ubi->wl_lock); 1181 + return err; 1182 + } 1183 + } 1184 } 1185 spin_unlock(&ubi->wl_lock); 1186 ··· 1227 if (in_wl_tree(e, &ubi->used)) { 1228 paranoid_check_in_wl_tree(e, &ubi->used); 1229 rb_erase(&e->rb, &ubi->used); 1230 + } else { 1231 + int err; 1232 + 1233 + err = prot_tree_del(ubi, e->pnum); 1234 + if (err) { 1235 + ubi_err("PEB %d not found", pnum); 1236 + ubi_ro_mode(ubi); 1237 + spin_unlock(&ubi->wl_lock); 1238 + return err; 1239 + } 1240 + } 1241 1242 wl_tree_add(e, &ubi->scrub); 1243 spin_unlock(&ubi->wl_lock); ··· 1249 */ 1250 int ubi_wl_flush(struct ubi_device *ubi) 1251 { 1252 + int err; 1253 1254 /* 1255 * Erase while the pending works queue is not empty, but not more then 1256 * the number of currently pending works. 1257 */ 1258 + dbg_wl("flush (%d pending works)", ubi->works_count); 1259 + while (ubi->works_count) { 1260 + err = do_work(ubi); 1261 + if (err) 1262 + return err; 1263 + } 1264 + 1265 + /* 1266 + * Make sure all the works which have been done in parallel are 1267 + * finished. 1268 + */ 1269 + down_write(&ubi->work_sem); 1270 + up_write(&ubi->work_sem); 1271 + 1272 + /* 1273 + * And in case last was the WL worker and it cancelled the LEB 1274 + * movement, flush again. 1275 + */ 1276 + while (ubi->works_count) { 1277 + dbg_wl("flush more (%d pending works)", ubi->works_count); 1278 err = do_work(ubi); 1279 if (err) 1280 return err; ··· 1294 rb->rb_right = NULL; 1295 } 1296 1297 + kmem_cache_free(ubi_wl_entry_slab, e); 1298 } 1299 } 1300 } ··· 1303 * ubi_thread - UBI background thread. 1304 * @u: the UBI device description object pointer 1305 */ 1306 + int ubi_thread(void *u) 1307 { 1308 int failures = 0; 1309 struct ubi_device *ubi = u; ··· 1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1396 spin_lock_init(&ubi->wl_lock); 1397 + mutex_init(&ubi->move_mutex); 1398 + init_rwsem(&ubi->work_sem); 1399 ubi->max_ec = si->max_ec; 1400 INIT_LIST_HEAD(&ubi->works); 1401 1402 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1403 1404 err = -ENOMEM; 1405 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1406 if (!ubi->lookuptbl) 1407 + return err; 1408 1409 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1410 cond_resched(); 1411 1412 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1413 if (!e) 1414 goto out_free; 1415 ··· 1431 e->ec = seb->ec; 1432 ubi->lookuptbl[e->pnum] = e; 1433 if (schedule_erase(ubi, e, 0)) { 1434 + kmem_cache_free(ubi_wl_entry_slab, e); 1435 goto out_free; 1436 } 1437 } ··· 1439 list_for_each_entry(seb, &si->free, u.list) { 1440 cond_resched(); 1441 1442 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1443 if (!e) 1444 goto out_free; 1445 ··· 1453 list_for_each_entry(seb, &si->corr, u.list) { 1454 cond_resched(); 1455 1456 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1457 if (!e) 1458 goto out_free; 1459 ··· 1461 e->ec = seb->ec; 1462 ubi->lookuptbl[e->pnum] = e; 1463 if (schedule_erase(ubi, e, 0)) { 1464 + kmem_cache_free(ubi_wl_entry_slab, e); 1465 goto out_free; 1466 } 1467 } ··· 1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1471 cond_resched(); 1472 1473 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1474 if (!e) 1475 goto out_free; 1476 ··· 1510 tree_destroy(&ubi->free); 1511 tree_destroy(&ubi->scrub); 1512 kfree(ubi->lookuptbl); 1513 return err; 1514 } 1515 ··· 1541 rb->rb_right = NULL; 1542 } 1543 1544 + kmem_cache_free(ubi_wl_entry_slab, pe->e); 1545 kfree(pe); 1546 } 1547 } ··· 1553 */ 1554 void ubi_wl_close(struct ubi_device *ubi) 1555 { 1556 dbg_wl("close the UBI wear-leveling unit"); 1557 1558 cancel_pending(ubi); ··· 1565 tree_destroy(&ubi->free); 1566 tree_destroy(&ubi->scrub); 1567 kfree(ubi->lookuptbl); 1568 } 1569 1570 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+4 -2
fs/jffs2/acl.c
··· 176 spin_unlock(&inode->i_lock); 177 } 178 179 - struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 180 { 181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 182 struct posix_acl *acl; ··· 345 if (!clone) 346 return -ENOMEM; 347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode); 348 - if (rc < 0) 349 return rc; 350 if (rc > 0) 351 jffs2_iset_acl(inode, &f->i_acl_access, clone); 352
··· 176 spin_unlock(&inode->i_lock); 177 } 178 179 + static struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 180 { 181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 182 struct posix_acl *acl; ··· 345 if (!clone) 346 return -ENOMEM; 347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode); 348 + if (rc < 0) { 349 + posix_acl_release(clone); 350 return rc; 351 + } 352 if (rc > 0) 353 jffs2_iset_acl(inode, &f->i_acl_access, clone); 354
-2
fs/jffs2/acl.h
··· 28 29 #define JFFS2_ACL_NOT_CACHED ((void *)-1) 30 31 - extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type); 32 extern int jffs2_permission(struct inode *, int, struct nameidata *); 33 extern int jffs2_acl_chmod(struct inode *); 34 extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); ··· 39 40 #else 41 42 - #define jffs2_get_acl(inode, type) (NULL) 43 #define jffs2_permission (NULL) 44 #define jffs2_acl_chmod(inode) (0) 45 #define jffs2_init_acl_pre(dir_i,inode,mode) (0)
··· 28 29 #define JFFS2_ACL_NOT_CACHED ((void *)-1) 30 31 extern int jffs2_permission(struct inode *, int, struct nameidata *); 32 extern int jffs2_acl_chmod(struct inode *); 33 extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); ··· 40 41 #else 42 43 #define jffs2_permission (NULL) 44 #define jffs2_acl_chmod(inode) (0) 45 #define jffs2_init_acl_pre(dir_i,inode,mode) (0)
+1 -5
fs/jffs2/fs.c
··· 97 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 98 99 if (ivalid & ATTR_MODE) 100 - if (iattr->ia_mode & S_ISGID && 101 - !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) 102 - ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); 103 - else 104 - ri->mode = cpu_to_jemode(iattr->ia_mode); 105 else 106 ri->mode = cpu_to_jemode(inode->i_mode); 107
··· 97 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 98 99 if (ivalid & ATTR_MODE) 100 + ri->mode = cpu_to_jemode(iattr->ia_mode); 101 else 102 ri->mode = cpu_to_jemode(inode->i_mode); 103
+6 -3
fs/jffs2/nodelist.c
··· 32 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { 33 /* Duplicate. Free one */ 34 if (new->version < (*prev)->version) { 35 - dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", 36 (*prev)->name, (*prev)->ino); 37 jffs2_mark_node_obsolete(c, new->raw); 38 jffs2_free_full_dirent(new); 39 } else { 40 - dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", 41 (*prev)->name, (*prev)->ino); 42 new->next = (*prev)->next; 43 - jffs2_mark_node_obsolete(c, ((*prev)->raw)); 44 jffs2_free_full_dirent(*prev); 45 *prev = new; 46 }
··· 32 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { 33 /* Duplicate. Free one */ 34 if (new->version < (*prev)->version) { 35 + dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", 36 (*prev)->name, (*prev)->ino); 37 jffs2_mark_node_obsolete(c, new->raw); 38 jffs2_free_full_dirent(new); 39 } else { 40 + dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", 41 (*prev)->name, (*prev)->ino); 42 new->next = (*prev)->next; 43 + /* It may have been a 'placeholder' deletion dirent, 44 + if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ 45 + if ((*prev)->raw) 46 + jffs2_mark_node_obsolete(c, ((*prev)->raw)); 47 jffs2_free_full_dirent(*prev); 48 *prev = new; 49 }
+15 -14
fs/jffs2/readinode.c
··· 37 38 BUG_ON(tn->csize == 0); 39 40 - if (!jffs2_is_writebuffered(c)) 41 - goto adj_acc; 42 - 43 /* Calculate how many bytes were already checked */ 44 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); 45 - len = ofs % c->wbuf_pagesize; 46 - if (likely(len)) 47 - len = c->wbuf_pagesize - len; 48 49 - if (len >= tn->csize) { 50 - dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", 51 - ref_offset(ref), tn->csize, ofs); 52 - goto adj_acc; 53 } 54 - 55 - ofs += len; 56 - len = tn->csize - len; 57 58 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", 59 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); ··· 64 * adding and jffs2_flash_read_end() interface. */ 65 if (c->mtd->point) { 66 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 67 - if (!err && retlen < tn->csize) { 68 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 69 c->mtd->unpoint(c->mtd, buffer, ofs, retlen); 70 } else if (err)
··· 37 38 BUG_ON(tn->csize == 0); 39 40 /* Calculate how many bytes were already checked */ 41 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); 42 + len = tn->csize; 43 44 + if (jffs2_is_writebuffered(c)) { 45 + int adj = ofs % c->wbuf_pagesize; 46 + if (likely(adj)) 47 + adj = c->wbuf_pagesize - adj; 48 + 49 + if (adj >= tn->csize) { 50 + dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", 51 + ref_offset(ref), tn->csize, ofs); 52 + goto adj_acc; 53 + } 54 + 55 + ofs += adj; 56 + len -= adj; 57 } 58 59 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", 60 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); ··· 63 * adding and jffs2_flash_read_end() interface. */ 64 if (c->mtd->point) { 65 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 66 + if (!err && retlen < len) { 67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 68 c->mtd->unpoint(c->mtd, buffer, ofs, retlen); 69 } else if (err)
+15 -13
fs/jffs2/write.c
··· 582 jffs2_add_fd_to_list(c, fd, &dir_f->dents); 583 up(&dir_f->sem); 584 } else { 585 - struct jffs2_full_dirent **prev = &dir_f->dents; 586 uint32_t nhash = full_name_hash(name, namelen); 587 588 /* We don't actually want to reserve any space, but we do ··· 590 down(&c->alloc_sem); 591 down(&dir_f->sem); 592 593 - while ((*prev) && (*prev)->nhash <= nhash) { 594 - if ((*prev)->nhash == nhash && 595 - !memcmp((*prev)->name, name, namelen) && 596 - !(*prev)->name[namelen]) { 597 - struct jffs2_full_dirent *this = *prev; 598 599 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", 600 - this->ino, ref_offset(this->raw))); 601 - 602 - *prev = this->next; 603 - jffs2_mark_node_obsolete(c, (this->raw)); 604 - jffs2_free_full_dirent(this); 605 break; 606 } 607 - prev = &((*prev)->next); 608 } 609 up(&dir_f->sem); 610 } ··· 631 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", 632 fd->name, dead_f->inocache->ino)); 633 } 634 - jffs2_mark_node_obsolete(c, fd->raw); 635 jffs2_free_full_dirent(fd); 636 } 637 }
··· 582 jffs2_add_fd_to_list(c, fd, &dir_f->dents); 583 up(&dir_f->sem); 584 } else { 585 + struct jffs2_full_dirent *fd = dir_f->dents; 586 uint32_t nhash = full_name_hash(name, namelen); 587 588 /* We don't actually want to reserve any space, but we do ··· 590 down(&c->alloc_sem); 591 down(&dir_f->sem); 592 593 + for (fd = dir_f->dents; fd; fd = fd->next) { 594 + if (fd->nhash == nhash && 595 + !memcmp(fd->name, name, namelen) && 596 + !fd->name[namelen]) { 597 598 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", 599 + fd->ino, ref_offset(fd->raw))); 600 + jffs2_mark_node_obsolete(c, fd->raw); 601 + /* We don't want to remove it from the list immediately, 602 + because that screws up getdents()/seek() semantics even 603 + more than they're screwed already. Turn it into a 604 + node-less deletion dirent instead -- a placeholder */ 605 + fd->raw = NULL; 606 + fd->ino = 0; 607 break; 608 } 609 } 610 up(&dir_f->sem); 611 } ··· 630 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", 631 fd->name, dead_f->inocache->ino)); 632 } 633 + if (fd->raw) 634 + jffs2_mark_node_obsolete(c, fd->raw); 635 jffs2_free_full_dirent(fd); 636 } 637 }
+12
include/linux/mtd/cfi.h
··· 98 #define CFI_DEVICETYPE_X32 (32 / 8) 99 #define CFI_DEVICETYPE_X64 (64 / 8) 100 101 /* NB: We keep these structures in memory in HOST byteorder, except 102 * where individually noted. 103 */
··· 98 #define CFI_DEVICETYPE_X32 (32 / 8) 99 #define CFI_DEVICETYPE_X64 (64 / 8) 100 101 + 102 + /* Device Interface Code Assignments from the "Common Flash Memory Interface 103 + * Publication 100" dated December 1, 2001. 104 + */ 105 + #define CFI_INTERFACE_X8_ASYNC 0x0000 106 + #define CFI_INTERFACE_X16_ASYNC 0x0001 107 + #define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 108 + #define CFI_INTERFACE_X32_ASYNC 0x0003 109 + #define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 110 + #define CFI_INTERFACE_NOT_ALLOWED 0xffff 111 + 112 + 113 /* NB: We keep these structures in memory in HOST byteorder, except 114 * where individually noted. 115 */
+9
include/linux/mtd/mtd.h
··· 152 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 153 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 154 155 int (*read_oob) (struct mtd_info *mtd, loff_t from, 156 struct mtd_oob_ops *ops); 157 int (*write_oob) (struct mtd_info *mtd, loff_t to,
··· 152 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 153 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 154 155 + /* In blackbox flight recorder like scenarios we want to make successful 156 + writes in interrupt context. panic_write() is only intended to be 157 + called when its known the kernel is about to panic and we need the 158 + write to succeed. Since the kernel is not going to be running for much 159 + longer, this function can break locks and delay to ensure the write 160 + succeeds (but not sleep). */ 161 + 162 + int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 163 + 164 int (*read_oob) (struct mtd_info *mtd, loff_t from, 165 struct mtd_oob_ops *ops); 166 int (*write_oob) (struct mtd_info *mtd, loff_t to,
+8
include/linux/mtd/mtdram.h
···
··· 1 + #ifndef __MTD_MTDRAM_H__ 2 + #define __MTD_MTDRAM_H__ 3 + 4 + #include <linux/mtd/mtd.h> 5 + int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, 6 + unsigned long size, char *name); 7 + 8 + #endif /* __MTD_MTDRAM_H__ */
+1
include/linux/mtd/onenand_regs.h
··· 67 /* 68 * Device ID Register F001h (R) 69 */ 70 #define ONENAND_DEVICE_DENSITY_SHIFT (4) 71 #define ONENAND_DEVICE_IS_DDP (1 << 3) 72 #define ONENAND_DEVICE_IS_DEMUX (1 << 2)
··· 67 /* 68 * Device ID Register F001h (R) 69 */ 70 + #define ONENAND_DEVICE_DENSITY_MASK (0xf) 71 #define ONENAND_DEVICE_DENSITY_SHIFT (4) 72 #define ONENAND_DEVICE_IS_DDP (1 << 3) 73 #define ONENAND_DEVICE_IS_DEMUX (1 << 2)
+8 -1
include/linux/mtd/partitions.h
··· 71 72 #define put_partition_parser(p) do { module_put((p)->owner); } while(0) 73 74 - #endif 75
··· 71 72 #define put_partition_parser(p) do { module_put((p)->owner); } while(0) 73 74 + struct device; 75 + struct device_node; 76 77 + int __devinit of_mtd_parse_partitions(struct device *dev, 78 + struct mtd_info *mtd, 79 + struct device_node *node, 80 + struct mtd_partition **pparts); 81 + 82 + #endif
+1 -17
include/linux/mtd/ubi.h
··· 26 #include <mtd/ubi-user.h> 27 28 /* 29 - * UBI data type hint constants. 30 - * 31 - * UBI_LONGTERM: long-term data 32 - * UBI_SHORTTERM: short-term data 33 - * UBI_UNKNOWN: data persistence is unknown 34 - * 35 - * These constants are used when data is written to UBI volumes in order to 36 - * help the UBI wear-leveling unit to find more appropriate physical 37 - * eraseblocks. 38 - */ 39 - enum { 40 - UBI_LONGTERM = 1, 41 - UBI_SHORTTERM, 42 - UBI_UNKNOWN 43 - }; 44 - 45 - /* 46 * enum ubi_open_mode - UBI volume open mode constants. 47 * 48 * UBI_READONLY: read-only mode ··· 150 int len, int dtype); 151 int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); 152 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 153 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 154 155 /*
··· 26 #include <mtd/ubi-user.h> 27 28 /* 29 * enum ubi_open_mode - UBI volume open mode constants. 30 * 31 * UBI_READONLY: read-only mode ··· 167 int len, int dtype); 168 int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); 169 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 170 + int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); 171 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 172 173 /*
+1 -1
include/mtd/mtd-abi.h
··· 29 #define MTD_WRITEABLE 0x400 /* Device is writeable */ 30 #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ 31 #define MTD_NO_ERASE 0x1000 /* No erase necessary */ 32 - #define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */ 33 34 // Some common devices / combinations of capabilities 35 #define MTD_CAP_ROM 0
··· 29 #define MTD_WRITEABLE 0x400 /* Device is writeable */ 30 #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ 31 #define MTD_NO_ERASE 0x1000 /* No erase necessary */ 32 + #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ 33 34 // Some common devices / combinations of capabilities 35 #define MTD_CAP_ROM 0
+44 -3
include/mtd/ubi-header.h
··· 58 }; 59 60 /* 61 * Compatibility constants used by internal volumes. 62 * 63 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written ··· 299 300 /* The layout volume contains the volume table */ 301 302 - #define UBI_LAYOUT_VOL_ID UBI_INTERNAL_VOL_START 303 #define UBI_LAYOUT_VOLUME_EBS 2 304 #define UBI_LAYOUT_VOLUME_NAME "layout volume" 305 #define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT ··· 328 * @upd_marker: if volume update was started but not finished 329 * @name_len: volume name length 330 * @name: the volume name 331 - * @padding2: reserved, zeroes 332 * @crc: a CRC32 checksum of the record 333 * 334 * The volume table records are stored in the volume table, which is stored in ··· 364 __u8 upd_marker; 365 __be16 name_len; 366 __u8 name[UBI_VOL_NAME_MAX+1]; 367 - __u8 padding2[24]; 368 __be32 crc; 369 } __attribute__ ((packed)); 370
··· 58 }; 59 60 /* 61 + * Volume flags used in the volume table record. 62 + * 63 + * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume 64 + * 65 + * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume 66 + * table. UBI automatically re-sizes the volume which has this flag and makes 67 + * the volume to be of largest possible size. This means that if after the 68 + * initialization UBI finds out that there are available physical eraseblocks 69 + * present on the device, it automatically appends all of them to the volume 70 + * (the physical eraseblocks reserved for bad eraseblocks handling and other 71 + * reserved physical eraseblocks are not taken). So, if there is a volume with 72 + * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical 73 + * eraseblocks will be zero after UBI is loaded, because all of them will be 74 + * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared 75 + * after the volume had been initialized. 76 + * 77 + * The auto-resize feature is useful for device production purposes. For 78 + * example, different NAND flash chips may have different amount of initial bad 79 + * eraseblocks, depending of particular chip instance. Manufacturers of NAND 80 + * chips usually guarantee that the amount of initial bad eraseblocks does not 81 + * exceed certain percent, e.g. 2%. When one creates an UBI image which will be 82 + * flashed to the end devices in production, he does not know the exact amount 83 + * of good physical eraseblocks the NAND chip on the device will have, but this 84 + * number is required to calculate the volume sized and put them to the volume 85 + * table of the UBI image. In this case, one of the volumes (e.g., the one 86 + * which will store the root file system) is marked as "auto-resizable", and 87 + * UBI will adjust its size on the first boot if needed. 88 + * 89 + * Note, first UBI reserves some amount of physical eraseblocks for bad 90 + * eraseblock handling, and then re-sizes the volume, not vice-versa. This 91 + * means that the pool of reserved physical eraseblocks will always be present. 92 + */ 93 + enum { 94 + UBI_VTBL_AUTORESIZE_FLG = 0x01, 95 + }; 96 + 97 + /* 98 * Compatibility constants used by internal volumes. 99 * 100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written ··· 262 263 /* The layout volume contains the volume table */ 264 265 + #define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START 266 + #define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC 267 + #define UBI_LAYOUT_VOLUME_ALIGN 1 268 #define UBI_LAYOUT_VOLUME_EBS 2 269 #define UBI_LAYOUT_VOLUME_NAME "layout volume" 270 #define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT ··· 289 * @upd_marker: if volume update was started but not finished 290 * @name_len: volume name length 291 * @name: the volume name 292 + * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG) 293 + * @padding: reserved, zeroes 294 * @crc: a CRC32 checksum of the record 295 * 296 * The volume table records are stored in the volume table, which is stored in ··· 324 __u8 upd_marker; 325 __be16 name_len; 326 __u8 name[UBI_VOL_NAME_MAX+1]; 327 + __u8 flags; 328 + __u8 padding[23]; 329 __be32 crc; 330 } __attribute__ ((packed)); 331
+117 -10
include/mtd/ubi-user.h
··· 22 #define __UBI_USER_H__ 23 24 /* 25 * UBI volume creation 26 * ~~~~~~~~~~~~~~~~~~~ 27 * ··· 63 * 64 * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the 65 * corresponding UBI volume character device. A pointer to a 64-bit update 66 - * size should be passed to the IOCTL. After then, UBI expects user to write 67 * this number of bytes to the volume character device. The update is finished 68 * when the claimed number of bytes is passed. So, the volume update sequence 69 * is something like: ··· 72 * ioctl(fd, UBI_IOCVOLUP, &image_size); 73 * write(fd, buf, image_size); 74 * close(fd); 75 */ 76 77 /* 78 - * When a new volume is created, users may either specify the volume number they 79 - * want to create or to let UBI automatically assign a volume number using this 80 - * constant. 81 */ 82 #define UBI_VOL_NUM_AUTO (-1) 83 84 /* Maximum volume name length */ 85 #define UBI_MAX_VOLUME_NAME 127 ··· 105 /* Re-size an UBI volume */ 106 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 107 108 /* IOCTL commands of UBI volume character devices */ 109 110 #define UBI_VOL_IOC_MAGIC 'O' ··· 122 #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t) 123 /* An eraseblock erasure command, used for debugging, disabled by default */ 124 #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t) 125 126 /* 127 * UBI volume type constants. ··· 153 */ 154 enum { 155 UBI_DYNAMIC_VOLUME = 3, 156 - UBI_STATIC_VOLUME = 4 157 }; 158 159 /** 160 * struct ubi_mkvol_req - volume description data structure used in 161 - * volume creation requests. 162 * @vol_id: volume number 163 * @alignment: volume alignment 164 * @bytes: volume size in bytes 165 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 166 - * @padding1: reserved for future, not used 167 * @name_len: volume name length 168 - * @padding2: reserved for future, not used 169 * @name: volume name 170 * 171 - * This structure is used by userspace programs when creating new volumes. The 172 * @used_bytes field is only necessary when creating static volumes. 173 * 174 * The @alignment field specifies the required alignment of the volume logical ··· 231 int8_t padding1; 232 int16_t name_len; 233 int8_t padding2[4]; 234 - char name[UBI_MAX_VOLUME_NAME+1]; 235 } __attribute__ ((packed)); 236 237 /** ··· 248 struct ubi_rsvol_req { 249 int64_t bytes; 250 int32_t vol_id; 251 } __attribute__ ((packed)); 252 253 #endif /* __UBI_USER_H__ */
··· 22 #define __UBI_USER_H__ 23 24 /* 25 + * UBI device creation (the same as MTD device attachment) 26 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 + * 28 + * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI 29 + * control device. The caller has to properly fill and pass 30 + * &struct ubi_attach_req object - UBI will attach the MTD device specified in 31 + * the request and return the newly created UBI device number as the ioctl 32 + * return value. 33 + * 34 + * UBI device deletion (the same as MTD device detachment) 35 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 36 + * 37 + * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI 38 + * control device. 39 + * 40 * UBI volume creation 41 * ~~~~~~~~~~~~~~~~~~~ 42 * ··· 48 * 49 * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the 50 * corresponding UBI volume character device. A pointer to a 64-bit update 51 + * size should be passed to the IOCTL. After this, UBI expects user to write 52 * this number of bytes to the volume character device. The update is finished 53 * when the claimed number of bytes is passed. So, the volume update sequence 54 * is something like: ··· 57 * ioctl(fd, UBI_IOCVOLUP, &image_size); 58 * write(fd, buf, image_size); 59 * close(fd); 60 + * 61 + * Atomic eraseblock change 62 + * ~~~~~~~~~~~~~~~~~~~~~~~~ 63 + * 64 + * Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL 65 + * command of the corresponding UBI volume character device. A pointer to 66 + * &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is 67 + * expected to write the requested amount of bytes. This is similar to the 68 + * "volume update" IOCTL. 69 */ 70 71 /* 72 + * When a new UBI volume or UBI device is created, users may either specify the 73 + * volume/device number they want to create or to let UBI automatically assign 74 + * the number using these constants. 75 */ 76 #define UBI_VOL_NUM_AUTO (-1) 77 + #define UBI_DEV_NUM_AUTO (-1) 78 79 /* Maximum volume name length */ 80 #define UBI_MAX_VOLUME_NAME 127 ··· 80 /* Re-size an UBI volume */ 81 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 82 83 + /* IOCTL commands of the UBI control character device */ 84 + 85 + #define UBI_CTRL_IOC_MAGIC 'o' 86 + 87 + /* Attach an MTD device */ 88 + #define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req) 89 + /* Detach an MTD device */ 90 + #define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t) 91 + 92 /* IOCTL commands of UBI volume character devices */ 93 94 #define UBI_VOL_IOC_MAGIC 'O' ··· 88 #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t) 89 /* An eraseblock erasure command, used for debugging, disabled by default */ 90 #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t) 91 + /* An atomic eraseblock change command */ 92 + #define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t) 93 + 94 + /* Maximum MTD device name length supported by UBI */ 95 + #define MAX_UBI_MTD_NAME_LEN 127 96 + 97 + /* 98 + * UBI data type hint constants. 99 + * 100 + * UBI_LONGTERM: long-term data 101 + * UBI_SHORTTERM: short-term data 102 + * UBI_UNKNOWN: data persistence is unknown 103 + * 104 + * These constants are used when data is written to UBI volumes in order to 105 + * help the UBI wear-leveling unit to find more appropriate physical 106 + * eraseblocks. 107 + */ 108 + enum { 109 + UBI_LONGTERM = 1, 110 + UBI_SHORTTERM = 2, 111 + UBI_UNKNOWN = 3, 112 + }; 113 114 /* 115 * UBI volume type constants. ··· 97 */ 98 enum { 99 UBI_DYNAMIC_VOLUME = 3, 100 + UBI_STATIC_VOLUME = 4, 101 + }; 102 + 103 + /** 104 + * struct ubi_attach_req - attach MTD device request. 105 + * @ubi_num: UBI device number to create 106 + * @mtd_num: MTD device number to attach 107 + * @vid_hdr_offset: VID header offset (use defaults if %0) 108 + * @padding: reserved for future, not used, has to be zeroed 109 + * 110 + * This data structure is used to specify MTD device UBI has to attach and the 111 + * parameters it has to use. The number which should be assigned to the new UBI 112 + * device is passed in @ubi_num. UBI may automatically assign the number if 113 + * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in 114 + * @ubi_num. 115 + * 116 + * Most applications should pass %0 in @vid_hdr_offset to make UBI use default 117 + * offset of the VID header within physical eraseblocks. The default offset is 118 + * the next min. I/O unit after the EC header. For example, it will be offset 119 + * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or 120 + * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 121 + * 122 + * But in rare cases, if this optimizes things, the VID header may be placed to 123 + * a different offset. For example, the boot-loader might do things faster if the 124 + * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As 125 + * the boot-loader would not normally need to read EC headers (unless it needs 126 + * UBI in RW mode), it might be faster to calculate ECC. This is weird example, 127 + * but it real-life example. So, in this example, @vid_hdr_offer would be 128 + * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 129 + * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page 130 + * of the first page and add needed padding. 131 + */ 132 + struct ubi_attach_req { 133 + int32_t ubi_num; 134 + int32_t mtd_num; 135 + int32_t vid_hdr_offset; 136 + uint8_t padding[12]; 137 }; 138 139 /** 140 * struct ubi_mkvol_req - volume description data structure used in 141 + * volume creation requests. 142 * @vol_id: volume number 143 * @alignment: volume alignment 144 * @bytes: volume size in bytes 145 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 146 + * @padding1: reserved for future, not used, has to be zeroed 147 * @name_len: volume name length 148 + * @padding2: reserved for future, not used, has to be zeroed 149 * @name: volume name 150 * 151 + * This structure is used by user-space programs when creating new volumes. The 152 * @used_bytes field is only necessary when creating static volumes. 153 * 154 * The @alignment field specifies the required alignment of the volume logical ··· 139 int8_t padding1; 140 int16_t name_len; 141 int8_t padding2[4]; 142 + char name[UBI_MAX_VOLUME_NAME + 1]; 143 } __attribute__ ((packed)); 144 145 /** ··· 156 struct ubi_rsvol_req { 157 int64_t bytes; 158 int32_t vol_id; 159 + } __attribute__ ((packed)); 160 + 161 + /** 162 + * struct ubi_leb_change_req - a data structure used in atomic logical 163 + * eraseblock change requests. 164 + * @lnum: logical eraseblock number to change 165 + * @bytes: how many bytes will be written to the logical eraseblock 166 + * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) 167 + * @padding: reserved for future, not used, has to be zeroed 168 + */ 169 + struct ubi_leb_change_req { 170 + int32_t lnum; 171 + int32_t bytes; 172 + uint8_t dtype; 173 + uint8_t padding[7]; 174 } __attribute__ ((packed)); 175 176 #endif /* __UBI_USER_H__ */