Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6: (120 commits)
[MTD] Fix mtdoops.c compilation
[MTD] [NOR] fix startup lock when using multiple nor flash chips
[MTD] [DOC200x] eccbuf is statically defined and always evaluate to true
[MTD] Fix maps/physmap.c compilation with CONFIG_PM
[MTD] onenand: Add panic_write function to the onenand driver
[MTD] mtdoops: Use the panic_write function when present
[MTD] Add mtd panic_write function pointer
[MTD] [NAND] Freescale enhanced Local Bus Controller FCM NAND support.
[MTD] physmap.c: Add support for multiple resources
[MTD] [NAND] Fix misparenthesization introduced by commit 78b65179...
[MTD] [NAND] Fix Blackfin NFC ECC calculating bug with page size 512 bytes
[MTD] [NAND] Remove wrong operation in PM function of the BF54x NFC driver
[MTD] [NAND] Remove unused variable in plat_nand_remove
[MTD] Unlocking all Intel flash that is locked on power up.
[MTD] [NAND] at91_nand: Make mtdparts option can override board info
[MTD] mtdoops: Various minor cleanups
[MTD] mtdoops: Ensure sequential write to the buffer
[MTD] mtdoops: Perform write operations in a workqueue
[MTD] mtdoops: Add further error return code checking
[MTD] [NOR] Test devtype, not definition in flash_probe(), drivers/mtd/devices/lart.c
...

+4871 -2108
+11
drivers/mtd/Kconfig
··· 150 150 for your particular device. It won't happen automatically. The 151 151 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. 152 152 153 + config MTD_OF_PARTS 154 + tristate "Flash partition map based on OF description" 155 + depends on PPC_OF && MTD_PARTITIONS 156 + help 157 + This provides a partition parsing function which derives 158 + the partition map from the children of the flash node, 159 + as described in Documentation/powerpc/booting-without-of.txt. 160 + 153 161 comment "User Modules And Translation Layers" 154 162 155 163 config MTD_CHAR ··· 293 285 This enables panic and oops messages to be logged to a circular 294 286 buffer in a flash partition where it can be read back at some 295 287 later point. 288 + 289 + To use, add console=ttyMTDx to the kernel command line, 290 + where x is the MTD device number to use. 296 291 297 292 source "drivers/mtd/chips/Kconfig" 298 293
+1
drivers/mtd/Makefile
··· 11 11 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o 12 12 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o 13 13 obj-$(CONFIG_MTD_AFS_PARTS) += afs.o 14 + obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o 14 15 15 16 # 'Users' - code which presents functionality to userspace. 16 17 obj-$(CONFIG_MTD_CHAR) += mtdchar.o
+71 -7
drivers/mtd/chips/cfi_cmdset_0001.c
··· 50 50 #define I82802AC 0x00ac 51 51 #define MANUFACTURER_ST 0x0020 52 52 #define M50LPW080 0x002F 53 + #define AT49BV640D 0x02de 53 54 54 55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 55 56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ··· 158 157 } 159 158 #endif 160 159 160 + /* Atmel chips don't use the same PRI format as Intel chips */ 161 + static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param) 162 + { 163 + struct map_info *map = mtd->priv; 164 + struct cfi_private *cfi = map->fldrv_priv; 165 + struct cfi_pri_intelext *extp = cfi->cmdset_priv; 166 + struct cfi_pri_atmel atmel_pri; 167 + uint32_t features = 0; 168 + 169 + /* Reverse byteswapping */ 170 + extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport); 171 + extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask); 172 + extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr); 173 + 174 + memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 175 + memset((char *)extp + 5, 0, sizeof(*extp) - 5); 176 + 177 + printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features); 178 + 179 + if (atmel_pri.Features & 0x01) /* chip erase supported */ 180 + features |= (1<<0); 181 + if (atmel_pri.Features & 0x02) /* erase suspend supported */ 182 + features |= (1<<1); 183 + if (atmel_pri.Features & 0x04) /* program suspend supported */ 184 + features |= (1<<2); 185 + if (atmel_pri.Features & 0x08) /* simultaneous operations supported */ 186 + features |= (1<<9); 187 + if (atmel_pri.Features & 0x20) /* page mode read supported */ 188 + features |= (1<<7); 189 + if (atmel_pri.Features & 0x40) /* queued erase supported */ 190 + features |= (1<<4); 191 + if (atmel_pri.Features & 0x80) /* Protection bits supported */ 192 + features |= (1<<6); 193 + 194 + extp->FeatureSupport = features; 195 + 196 + /* burst write mode not supported */ 197 + cfi->cfiq->BufWriteTimeoutTyp = 0; 198 + cfi->cfiq->BufWriteTimeoutMax = 0; 199 + } 200 + 161 201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 162 202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 163 203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) ··· 269 227 /* 270 228 * Some chips power-up with all sectors locked by default. 271 229 */ 272 - static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) 230 + static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) 273 231 { 274 - printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 275 - mtd->flags |= MTD_STUPID_LOCK; 232 + struct map_info *map = mtd->priv; 233 + struct cfi_private *cfi = map->fldrv_priv; 234 + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 235 + 236 + if (cfip->FeatureSupport&32) { 237 + printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); 238 + mtd->flags |= MTD_POWERUP_LOCK; 239 + } 276 240 } 277 241 278 242 static struct cfi_fixup cfi_fixup_table[] = { 243 + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 279 244 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE 280 245 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 281 246 #endif ··· 294 245 #endif 295 246 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, 296 247 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, 297 - { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, 248 + { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, 298 249 { 0, 0, NULL, NULL } 299 250 }; 300 251 ··· 326 277 return NULL; 327 278 328 279 if (extp->MajorVersion != '1' || 329 - (extp->MinorVersion < '0' || extp->MinorVersion > '4')) { 280 + (extp->MinorVersion < '0' || extp->MinorVersion > '5')) { 330 281 printk(KERN_ERR " Unknown Intel/Sharp Extended Query " 331 282 "version %c.%c.\n", extp->MajorVersion, 332 283 extp->MinorVersion); ··· 801 752 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 802 753 { 803 754 int ret; 755 + DECLARE_WAITQUEUE(wait, current); 804 756 805 757 retry: 806 758 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING ··· 856 806 } 857 807 spin_lock(&shared->lock); 858 808 spin_unlock(contender->mutex); 809 + } 810 + 811 + /* Check if we already have suspended erase 812 + * on this chip. Sleep. */ 813 + if (mode == FL_ERASING && shared->erasing 814 + && shared->erasing->oldstate == FL_ERASING) { 815 + spin_unlock(&shared->lock); 816 + set_current_state(TASK_UNINTERRUPTIBLE); 817 + add_wait_queue(&chip->wq, &wait); 818 + spin_unlock(chip->mutex); 819 + schedule(); 820 + remove_wait_queue(&chip->wq, &wait); 821 + spin_lock(chip->mutex); 822 + goto retry; 859 823 } 860 824 861 825 /* We now own it */ ··· 2358 2294 struct flchip *chip; 2359 2295 int ret = 0; 2360 2296 2361 - if ((mtd->flags & MTD_STUPID_LOCK) 2297 + if ((mtd->flags & MTD_POWERUP_LOCK) 2362 2298 && extp && (extp->FeatureSupport & (1 << 5))) 2363 2299 cfi_intelext_save_locks(mtd); 2364 2300 ··· 2469 2405 spin_unlock(chip->mutex); 2470 2406 } 2471 2407 2472 - if ((mtd->flags & MTD_STUPID_LOCK) 2408 + if ((mtd->flags & MTD_POWERUP_LOCK) 2473 2409 && extp && (extp->FeatureSupport & (1 << 5))) 2474 2410 cfi_intelext_restore_locks(mtd); 2475 2411 }
+10 -4
drivers/mtd/chips/cfi_cmdset_0002.c
··· 185 185 extp->TopBottom = 2; 186 186 else 187 187 extp->TopBottom = 3; 188 + 189 + /* burst write mode not supported */ 190 + cfi->cfiq->BufWriteTimeoutTyp = 0; 191 + cfi->cfiq->BufWriteTimeoutMax = 0; 188 192 } 189 193 190 194 static void fixup_use_secsi(struct mtd_info *mtd, void *param) ··· 217 213 { 218 214 mtd->lock = cfi_atmel_lock; 219 215 mtd->unlock = cfi_atmel_unlock; 220 - mtd->flags |= MTD_STUPID_LOCK; 216 + mtd->flags |= MTD_POWERUP_LOCK; 221 217 } 222 218 223 219 static struct cfi_fixup cfi_fixup_table[] = { 220 + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 224 221 #ifdef AMD_BOOTLOC_BUG 225 222 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, 226 223 #endif ··· 234 229 #if !FORCE_WORD_WRITE 235 230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, 236 231 #endif 237 - { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL }, 238 232 { 0, 0, NULL, NULL } 239 233 }; 240 234 static struct cfi_fixup jedec_fixup_table[] = { ··· 342 338 /* Modify the unlock address if we are in compatibility mode */ 343 339 if ( /* x16 in x8 mode */ 344 340 ((cfi->device_type == CFI_DEVICETYPE_X8) && 345 - (cfi->cfiq->InterfaceDesc == 2)) || 341 + (cfi->cfiq->InterfaceDesc == 342 + CFI_INTERFACE_X8_BY_X16_ASYNC)) || 346 343 /* x32 in x16 mode */ 347 344 ((cfi->device_type == CFI_DEVICETYPE_X16) && 348 - (cfi->cfiq->InterfaceDesc == 4))) 345 + (cfi->cfiq->InterfaceDesc == 346 + CFI_INTERFACE_X16_BY_X32_ASYNC))) 349 347 { 350 348 cfi->addr_unlock1 = 0xaaa; 351 349 cfi->addr_unlock2 = 0x555;
+6 -6
drivers/mtd/chips/cfi_probe.c
··· 370 370 printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); 371 371 printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); 372 372 switch(cfip->InterfaceDesc) { 373 - case 0: 373 + case CFI_INTERFACE_X8_ASYNC: 374 374 printk(" - x8-only asynchronous interface\n"); 375 375 break; 376 376 377 - case 1: 377 + case CFI_INTERFACE_X16_ASYNC: 378 378 printk(" - x16-only asynchronous interface\n"); 379 379 break; 380 380 381 - case 2: 381 + case CFI_INTERFACE_X8_BY_X16_ASYNC: 382 382 printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); 383 383 break; 384 384 385 - case 3: 385 + case CFI_INTERFACE_X32_ASYNC: 386 386 printk(" - x32-only asynchronous interface\n"); 387 387 break; 388 388 389 - case 4: 389 + case CFI_INTERFACE_X16_BY_X32_ASYNC: 390 390 printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); 391 391 break; 392 392 393 - case 65535: 393 + case CFI_INTERFACE_NOT_ALLOWED: 394 394 printk(" - Not Allowed / Reserved\n"); 395 395 break; 396 396
+1 -1
drivers/mtd/chips/gen_probe.c
··· 112 112 max_chips = 1; 113 113 } 114 114 115 - mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG; 115 + mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG ); 116 116 chip_map = kzalloc(mapsize, GFP_KERNEL); 117 117 if (!chip_map) { 118 118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
+595 -779
drivers/mtd/chips/jedec_probe.c
··· 194 194 195 195 196 196 struct unlock_addr { 197 - u32 addr1; 198 - u32 addr2; 197 + uint32_t addr1; 198 + uint32_t addr2; 199 199 }; 200 200 201 201 ··· 246 246 } 247 247 }; 248 248 249 - 250 249 struct amd_flash_info { 251 - const __u16 mfr_id; 252 - const __u16 dev_id; 253 250 const char *name; 254 - const int DevSize; 255 - const int NumEraseRegions; 256 - const int CmdSet; 257 - const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */ 258 - const ulong regions[6]; 251 + const uint16_t mfr_id; 252 + const uint16_t dev_id; 253 + const uint8_t dev_size; 254 + const uint8_t nr_regions; 255 + const uint16_t cmd_set; 256 + const uint32_t regions[6]; 257 + const uint8_t devtypes; /* Bitmask for x8, x16 etc. */ 258 + const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */ 259 259 }; 260 260 261 261 #define ERASEINFO(size,blocks) (size<<8)|(blocks-1) ··· 280 280 .mfr_id = MANUFACTURER_AMD, 281 281 .dev_id = AM29F032B, 282 282 .name = "AMD AM29F032B", 283 - .uaddr = { 284 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 285 - }, 286 - .DevSize = SIZE_4MiB, 287 - .CmdSet = P_ID_AMD_STD, 288 - .NumEraseRegions= 1, 283 + .uaddr = MTD_UADDR_0x0555_0x02AA, 284 + .devtypes = CFI_DEVICETYPE_X8, 285 + .dev_size = SIZE_4MiB, 286 + .cmd_set = P_ID_AMD_STD, 287 + .nr_regions = 1, 289 288 .regions = { 290 289 ERASEINFO(0x10000,64) 291 290 } ··· 292 293 .mfr_id = MANUFACTURER_AMD, 293 294 .dev_id = AM29LV160DT, 294 295 .name = "AMD AM29LV160DT", 295 - .uaddr = { 296 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 297 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 298 - }, 299 - .DevSize = SIZE_2MiB, 300 - .CmdSet = P_ID_AMD_STD, 301 - .NumEraseRegions= 4, 296 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 297 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 298 + .dev_size = SIZE_2MiB, 299 + .cmd_set = P_ID_AMD_STD, 300 + .nr_regions = 4, 302 301 .regions = { 303 302 ERASEINFO(0x10000,31), 304 303 ERASEINFO(0x08000,1), ··· 307 310 .mfr_id = MANUFACTURER_AMD, 308 311 .dev_id = AM29LV160DB, 309 312 .name = "AMD AM29LV160DB", 310 - .uaddr = { 311 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 312 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 313 - }, 314 - .DevSize = SIZE_2MiB, 315 - .CmdSet = P_ID_AMD_STD, 316 - .NumEraseRegions= 4, 313 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 314 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 315 + .dev_size = SIZE_2MiB, 316 + .cmd_set = P_ID_AMD_STD, 317 + .nr_regions = 4, 317 318 .regions = { 318 319 ERASEINFO(0x04000,1), 319 320 ERASEINFO(0x02000,2), ··· 322 327 .mfr_id = MANUFACTURER_AMD, 323 328 .dev_id = AM29LV400BB, 324 329 .name = "AMD AM29LV400BB", 325 - .uaddr = { 326 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 327 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 328 - }, 329 - .DevSize = SIZE_512KiB, 330 - .CmdSet = P_ID_AMD_STD, 331 - .NumEraseRegions= 4, 330 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 331 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 332 + .dev_size = SIZE_512KiB, 333 + .cmd_set = P_ID_AMD_STD, 334 + .nr_regions = 4, 332 335 .regions = { 333 336 ERASEINFO(0x04000,1), 334 337 ERASEINFO(0x02000,2), ··· 337 344 .mfr_id = MANUFACTURER_AMD, 338 345 .dev_id = AM29LV400BT, 339 346 .name = "AMD AM29LV400BT", 340 - .uaddr = { 341 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 342 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 343 - }, 344 - .DevSize = SIZE_512KiB, 345 - .CmdSet = P_ID_AMD_STD, 346 - .NumEraseRegions= 4, 347 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 348 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 349 + .dev_size = SIZE_512KiB, 350 + .cmd_set = P_ID_AMD_STD, 351 + .nr_regions = 4, 347 352 .regions = { 348 353 ERASEINFO(0x10000,7), 349 354 ERASEINFO(0x08000,1), ··· 352 361 .mfr_id = MANUFACTURER_AMD, 353 362 .dev_id = AM29LV800BB, 354 363 .name = "AMD AM29LV800BB", 355 - .uaddr = { 356 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 357 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 358 - }, 359 - .DevSize = SIZE_1MiB, 360 - .CmdSet = P_ID_AMD_STD, 361 - .NumEraseRegions= 4, 364 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 365 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 366 + .dev_size = SIZE_1MiB, 367 + .cmd_set = P_ID_AMD_STD, 368 + .nr_regions = 4, 362 369 .regions = { 363 370 ERASEINFO(0x04000,1), 364 371 ERASEINFO(0x02000,2), ··· 368 379 .mfr_id = MANUFACTURER_AMD, 369 380 .dev_id = AM29DL800BB, 370 381 .name = "AMD AM29DL800BB", 371 - .uaddr = { 372 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 373 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 374 - }, 375 - .DevSize = SIZE_1MiB, 376 - .CmdSet = P_ID_AMD_STD, 377 - .NumEraseRegions= 6, 382 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 383 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 384 + .dev_size = SIZE_1MiB, 385 + .cmd_set = P_ID_AMD_STD, 386 + .nr_regions = 6, 378 387 .regions = { 379 388 ERASEINFO(0x04000,1), 380 389 ERASEINFO(0x08000,1), ··· 385 398 .mfr_id = MANUFACTURER_AMD, 386 399 .dev_id = AM29DL800BT, 387 400 .name = "AMD AM29DL800BT", 388 - .uaddr = { 389 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 390 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 391 - }, 392 - .DevSize = SIZE_1MiB, 393 - .CmdSet = P_ID_AMD_STD, 394 - .NumEraseRegions= 6, 401 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 402 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 403 + .dev_size = SIZE_1MiB, 404 + .cmd_set = P_ID_AMD_STD, 405 + .nr_regions = 6, 395 406 .regions = { 396 407 ERASEINFO(0x10000,14), 397 408 ERASEINFO(0x04000,1), ··· 402 417 .mfr_id = MANUFACTURER_AMD, 403 418 .dev_id = AM29F800BB, 404 419 .name = "AMD AM29F800BB", 405 - .uaddr = { 406 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 407 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 408 - }, 409 - .DevSize = SIZE_1MiB, 410 - .CmdSet = P_ID_AMD_STD, 411 - .NumEraseRegions= 4, 420 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 421 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 422 + .dev_size = SIZE_1MiB, 423 + .cmd_set = P_ID_AMD_STD, 424 + .nr_regions = 4, 412 425 .regions = { 413 426 ERASEINFO(0x04000,1), 414 427 ERASEINFO(0x02000,2), ··· 417 434 .mfr_id = MANUFACTURER_AMD, 418 435 .dev_id = AM29LV800BT, 419 436 .name = "AMD AM29LV800BT", 420 - .uaddr = { 421 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 422 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 423 - }, 424 - .DevSize = SIZE_1MiB, 425 - .CmdSet = P_ID_AMD_STD, 426 - .NumEraseRegions= 4, 437 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 438 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 439 + .dev_size = SIZE_1MiB, 440 + .cmd_set = P_ID_AMD_STD, 441 + .nr_regions = 4, 427 442 .regions = { 428 443 ERASEINFO(0x10000,15), 429 444 ERASEINFO(0x08000,1), ··· 432 451 .mfr_id = MANUFACTURER_AMD, 433 452 .dev_id = AM29F800BT, 434 453 .name = "AMD AM29F800BT", 435 - .uaddr = { 436 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 437 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 438 - }, 439 - .DevSize = SIZE_1MiB, 440 - .CmdSet = P_ID_AMD_STD, 441 - .NumEraseRegions= 4, 454 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 455 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 456 + .dev_size = SIZE_1MiB, 457 + .cmd_set = P_ID_AMD_STD, 458 + .nr_regions = 4, 442 459 .regions = { 443 460 ERASEINFO(0x10000,15), 444 461 ERASEINFO(0x08000,1), ··· 447 468 .mfr_id = MANUFACTURER_AMD, 448 469 .dev_id = AM29F017D, 449 470 .name = "AMD AM29F017D", 450 - .uaddr = { 451 - [0] = MTD_UADDR_DONT_CARE /* x8 */ 452 - }, 453 - .DevSize = SIZE_2MiB, 454 - .CmdSet = P_ID_AMD_STD, 455 - .NumEraseRegions= 1, 471 + .devtypes = CFI_DEVICETYPE_X8, 472 + .uaddr = MTD_UADDR_DONT_CARE, 473 + .dev_size = SIZE_2MiB, 474 + .cmd_set = P_ID_AMD_STD, 475 + .nr_regions = 1, 456 476 .regions = { 457 477 ERASEINFO(0x10000,32), 458 478 } ··· 459 481 .mfr_id = MANUFACTURER_AMD, 460 482 .dev_id = AM29F016D, 461 483 .name = "AMD AM29F016D", 462 - .uaddr = { 463 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 464 - }, 465 - .DevSize = SIZE_2MiB, 466 - .CmdSet = P_ID_AMD_STD, 467 - .NumEraseRegions= 1, 484 + .devtypes = CFI_DEVICETYPE_X8, 485 + .uaddr = MTD_UADDR_0x0555_0x02AA, 486 + .dev_size = SIZE_2MiB, 487 + .cmd_set = P_ID_AMD_STD, 488 + .nr_regions = 1, 468 489 .regions = { 469 490 ERASEINFO(0x10000,32), 470 491 } ··· 471 494 .mfr_id = MANUFACTURER_AMD, 472 495 .dev_id = AM29F080, 473 496 .name = "AMD AM29F080", 474 - .uaddr = { 475 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 476 - }, 477 - .DevSize = SIZE_1MiB, 478 - .CmdSet = P_ID_AMD_STD, 479 - .NumEraseRegions= 1, 497 + .devtypes = CFI_DEVICETYPE_X8, 498 + .uaddr = MTD_UADDR_0x0555_0x02AA, 499 + .dev_size = SIZE_1MiB, 500 + .cmd_set = P_ID_AMD_STD, 501 + .nr_regions = 1, 480 502 .regions = { 481 503 ERASEINFO(0x10000,16), 482 504 } ··· 483 507 .mfr_id = MANUFACTURER_AMD, 484 508 .dev_id = AM29F040, 485 509 .name = "AMD AM29F040", 486 - .uaddr = { 487 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 488 - }, 489 - .DevSize = SIZE_512KiB, 490 - .CmdSet = P_ID_AMD_STD, 491 - .NumEraseRegions= 1, 510 + .devtypes = CFI_DEVICETYPE_X8, 511 + .uaddr = MTD_UADDR_0x0555_0x02AA, 512 + .dev_size = SIZE_512KiB, 513 + .cmd_set = P_ID_AMD_STD, 514 + .nr_regions = 1, 492 515 .regions = { 493 516 ERASEINFO(0x10000,8), 494 517 } ··· 495 520 .mfr_id = MANUFACTURER_AMD, 496 521 .dev_id = AM29LV040B, 497 522 .name = "AMD AM29LV040B", 498 - .uaddr = { 499 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 500 - }, 501 - .DevSize = SIZE_512KiB, 502 - .CmdSet = P_ID_AMD_STD, 503 - .NumEraseRegions= 1, 523 + .devtypes = CFI_DEVICETYPE_X8, 524 + .uaddr = MTD_UADDR_0x0555_0x02AA, 525 + .dev_size = SIZE_512KiB, 526 + .cmd_set = P_ID_AMD_STD, 527 + .nr_regions = 1, 504 528 .regions = { 505 529 ERASEINFO(0x10000,8), 506 530 } ··· 507 533 .mfr_id = MANUFACTURER_AMD, 508 534 .dev_id = AM29F002T, 509 535 .name = "AMD AM29F002T", 510 - .uaddr = { 511 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 512 - }, 513 - .DevSize = SIZE_256KiB, 514 - .CmdSet = P_ID_AMD_STD, 515 - .NumEraseRegions= 4, 536 + .devtypes = CFI_DEVICETYPE_X8, 537 + .uaddr = MTD_UADDR_0x0555_0x02AA, 538 + .dev_size = SIZE_256KiB, 539 + .cmd_set = P_ID_AMD_STD, 540 + .nr_regions = 4, 516 541 .regions = { 517 542 ERASEINFO(0x10000,3), 518 543 ERASEINFO(0x08000,1), ··· 522 549 .mfr_id = MANUFACTURER_ATMEL, 523 550 .dev_id = AT49BV512, 524 551 .name = "Atmel AT49BV512", 525 - .uaddr = { 526 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 527 - }, 528 - .DevSize = SIZE_64KiB, 529 - .CmdSet = P_ID_AMD_STD, 530 - .NumEraseRegions= 1, 552 + .devtypes = CFI_DEVICETYPE_X8, 553 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 554 + .dev_size = SIZE_64KiB, 555 + .cmd_set = P_ID_AMD_STD, 556 + .nr_regions = 1, 531 557 .regions = { 532 558 ERASEINFO(0x10000,1) 533 559 } ··· 534 562 .mfr_id = MANUFACTURER_ATMEL, 535 563 .dev_id = AT29LV512, 536 564 .name = "Atmel AT29LV512", 537 - .uaddr = { 538 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 539 - }, 540 - .DevSize = SIZE_64KiB, 541 - .CmdSet = P_ID_AMD_STD, 542 - .NumEraseRegions= 1, 565 + .devtypes = CFI_DEVICETYPE_X8, 566 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 567 + .dev_size = SIZE_64KiB, 568 + .cmd_set = P_ID_AMD_STD, 569 + .nr_regions = 1, 543 570 .regions = { 544 571 ERASEINFO(0x80,256), 545 572 ERASEINFO(0x80,256) ··· 547 576 .mfr_id = MANUFACTURER_ATMEL, 548 577 .dev_id = AT49BV16X, 549 578 .name = "Atmel AT49BV16X", 550 - .uaddr = { 551 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 552 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 553 - }, 554 - .DevSize = SIZE_2MiB, 555 - .CmdSet = P_ID_AMD_STD, 556 - .NumEraseRegions= 2, 579 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 580 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 581 + .dev_size = SIZE_2MiB, 582 + .cmd_set = P_ID_AMD_STD, 583 + .nr_regions = 2, 557 584 .regions = { 558 585 ERASEINFO(0x02000,8), 559 586 ERASEINFO(0x10000,31) ··· 560 591 .mfr_id = MANUFACTURER_ATMEL, 561 592 .dev_id = AT49BV16XT, 562 593 .name = "Atmel AT49BV16XT", 563 - .uaddr = { 564 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 565 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 566 - }, 567 - .DevSize = SIZE_2MiB, 568 - .CmdSet = P_ID_AMD_STD, 569 - .NumEraseRegions= 2, 594 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 595 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 596 + .dev_size = SIZE_2MiB, 597 + .cmd_set = P_ID_AMD_STD, 598 + .nr_regions = 2, 570 599 .regions = { 571 600 ERASEINFO(0x10000,31), 572 601 ERASEINFO(0x02000,8) ··· 573 606 .mfr_id = MANUFACTURER_ATMEL, 574 607 .dev_id = AT49BV32X, 575 608 .name = "Atmel AT49BV32X", 576 - .uaddr = { 577 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 578 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 579 - }, 580 - .DevSize = SIZE_4MiB, 581 - .CmdSet = P_ID_AMD_STD, 582 - .NumEraseRegions= 2, 609 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 610 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 611 + .dev_size = SIZE_4MiB, 612 + .cmd_set = P_ID_AMD_STD, 613 + .nr_regions = 2, 583 614 .regions = { 584 615 ERASEINFO(0x02000,8), 585 616 ERASEINFO(0x10000,63) ··· 586 621 .mfr_id = MANUFACTURER_ATMEL, 587 622 .dev_id = AT49BV32XT, 588 623 .name = "Atmel AT49BV32XT", 589 - .uaddr = { 590 - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ 591 - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ 592 - }, 593 - .DevSize = SIZE_4MiB, 594 - .CmdSet = P_ID_AMD_STD, 595 - .NumEraseRegions= 2, 624 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 625 + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ 626 + .dev_size = SIZE_4MiB, 627 + .cmd_set = P_ID_AMD_STD, 628 + .nr_regions = 2, 596 629 .regions = { 597 630 ERASEINFO(0x10000,63), 598 631 ERASEINFO(0x02000,8) ··· 599 636 .mfr_id = MANUFACTURER_FUJITSU, 600 637 .dev_id = MBM29F040C, 601 638 .name = "Fujitsu MBM29F040C", 602 - .uaddr = { 603 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 604 - }, 605 - .DevSize = SIZE_512KiB, 606 - .CmdSet = P_ID_AMD_STD, 607 - .NumEraseRegions= 1, 639 + .devtypes = CFI_DEVICETYPE_X8, 640 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 641 + .dev_size = SIZE_512KiB, 642 + .cmd_set = P_ID_AMD_STD, 643 + .nr_regions = 1, 608 644 .regions = { 609 645 ERASEINFO(0x10000,8) 610 646 } ··· 611 649 .mfr_id = MANUFACTURER_FUJITSU, 612 650 .dev_id = MBM29F800BA, 613 651 .name = "Fujitsu MBM29F800BA", 614 - .uaddr = { 615 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 616 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 617 - }, 618 - .DevSize = SIZE_1MiB, 619 - .CmdSet = P_ID_AMD_STD, 620 - .NumEraseRegions= 4, 652 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 653 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 654 + .dev_size = SIZE_1MiB, 655 + .cmd_set = P_ID_AMD_STD, 656 + .nr_regions = 4, 621 657 .regions = { 622 658 ERASEINFO(0x04000,1), 623 659 ERASEINFO(0x02000,2), ··· 626 666 .mfr_id = MANUFACTURER_FUJITSU, 627 667 .dev_id = MBM29LV650UE, 628 668 .name = "Fujitsu MBM29LV650UE", 629 - .uaddr = { 630 - [0] = MTD_UADDR_DONT_CARE /* x16 */ 631 - }, 632 - .DevSize = SIZE_8MiB, 633 - .CmdSet = P_ID_AMD_STD, 634 - .NumEraseRegions= 1, 669 + .devtypes = CFI_DEVICETYPE_X8, 670 + .uaddr = MTD_UADDR_DONT_CARE, 671 + .dev_size = SIZE_8MiB, 672 + .cmd_set = P_ID_AMD_STD, 673 + .nr_regions = 1, 635 674 .regions = { 636 675 ERASEINFO(0x10000,128) 637 676 } ··· 638 679 .mfr_id = MANUFACTURER_FUJITSU, 639 680 .dev_id = MBM29LV320TE, 640 681 .name = "Fujitsu MBM29LV320TE", 641 - .uaddr = { 642 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 643 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 644 - }, 645 - .DevSize = SIZE_4MiB, 646 - .CmdSet = P_ID_AMD_STD, 647 - .NumEraseRegions= 2, 682 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 683 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 684 + .dev_size = SIZE_4MiB, 685 + .cmd_set = P_ID_AMD_STD, 686 + .nr_regions = 2, 648 687 .regions = { 649 688 ERASEINFO(0x10000,63), 650 689 ERASEINFO(0x02000,8) ··· 651 694 .mfr_id = MANUFACTURER_FUJITSU, 652 695 .dev_id = MBM29LV320BE, 653 696 .name = "Fujitsu MBM29LV320BE", 654 - .uaddr = { 655 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 656 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 657 - }, 658 - .DevSize = SIZE_4MiB, 659 - .CmdSet = P_ID_AMD_STD, 660 - .NumEraseRegions= 2, 697 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 698 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 699 + .dev_size = SIZE_4MiB, 700 + .cmd_set = P_ID_AMD_STD, 701 + .nr_regions = 2, 661 702 .regions = { 662 703 ERASEINFO(0x02000,8), 663 704 ERASEINFO(0x10000,63) ··· 664 709 .mfr_id = MANUFACTURER_FUJITSU, 665 710 .dev_id = MBM29LV160TE, 666 711 .name = "Fujitsu MBM29LV160TE", 667 - .uaddr = { 668 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 669 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 670 - }, 671 - .DevSize = SIZE_2MiB, 672 - .CmdSet = P_ID_AMD_STD, 673 - .NumEraseRegions= 4, 712 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 713 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 714 + .dev_size = SIZE_2MiB, 715 + .cmd_set = P_ID_AMD_STD, 716 + .nr_regions = 4, 674 717 .regions = { 675 718 ERASEINFO(0x10000,31), 676 719 ERASEINFO(0x08000,1), ··· 679 726 .mfr_id = MANUFACTURER_FUJITSU, 680 727 .dev_id = MBM29LV160BE, 681 728 .name = "Fujitsu MBM29LV160BE", 682 - .uaddr = { 683 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 684 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 685 - }, 686 - .DevSize = SIZE_2MiB, 687 - .CmdSet = P_ID_AMD_STD, 688 - .NumEraseRegions= 4, 729 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 730 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 731 + .dev_size = SIZE_2MiB, 732 + .cmd_set = P_ID_AMD_STD, 733 + .nr_regions = 4, 689 734 .regions = { 690 735 ERASEINFO(0x04000,1), 691 736 ERASEINFO(0x02000,2), ··· 694 743 .mfr_id = MANUFACTURER_FUJITSU, 695 744 .dev_id = MBM29LV800BA, 696 745 .name = "Fujitsu MBM29LV800BA", 697 - .uaddr = { 698 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 699 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 700 - }, 701 - .DevSize = SIZE_1MiB, 702 - .CmdSet = P_ID_AMD_STD, 703 - .NumEraseRegions= 4, 746 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 747 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 748 + .dev_size = SIZE_1MiB, 749 + .cmd_set = P_ID_AMD_STD, 750 + .nr_regions = 4, 704 751 .regions = { 705 752 ERASEINFO(0x04000,1), 706 753 ERASEINFO(0x02000,2), ··· 709 760 .mfr_id = MANUFACTURER_FUJITSU, 710 761 .dev_id = MBM29LV800TA, 711 762 .name = "Fujitsu MBM29LV800TA", 712 - .uaddr = { 713 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 714 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 715 - }, 716 - .DevSize = SIZE_1MiB, 717 - .CmdSet = P_ID_AMD_STD, 718 - .NumEraseRegions= 4, 763 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 764 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 765 + .dev_size = SIZE_1MiB, 766 + .cmd_set = P_ID_AMD_STD, 767 + .nr_regions = 4, 719 768 .regions = { 720 769 ERASEINFO(0x10000,15), 721 770 ERASEINFO(0x08000,1), ··· 724 777 .mfr_id = MANUFACTURER_FUJITSU, 725 778 .dev_id = MBM29LV400BC, 726 779 .name = "Fujitsu MBM29LV400BC", 727 - .uaddr = { 728 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 729 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 730 - }, 731 - .DevSize = SIZE_512KiB, 732 - .CmdSet = P_ID_AMD_STD, 733 - .NumEraseRegions= 4, 780 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 781 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 782 + .dev_size = SIZE_512KiB, 783 + .cmd_set = P_ID_AMD_STD, 784 + .nr_regions = 4, 734 785 .regions = { 735 786 ERASEINFO(0x04000,1), 736 787 ERASEINFO(0x02000,2), ··· 739 794 .mfr_id = MANUFACTURER_FUJITSU, 740 795 .dev_id = MBM29LV400TC, 741 796 .name = "Fujitsu MBM29LV400TC", 742 - .uaddr = { 743 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 744 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 745 - }, 746 - .DevSize = SIZE_512KiB, 747 - .CmdSet = P_ID_AMD_STD, 748 - .NumEraseRegions= 4, 797 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 798 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 799 + .dev_size = SIZE_512KiB, 800 + .cmd_set = P_ID_AMD_STD, 801 + .nr_regions = 4, 749 802 .regions = { 750 803 ERASEINFO(0x10000,7), 751 804 ERASEINFO(0x08000,1), ··· 754 811 .mfr_id = MANUFACTURER_HYUNDAI, 755 812 .dev_id = HY29F002T, 756 813 .name = "Hyundai HY29F002T", 757 - .uaddr = { 758 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 759 - }, 760 - .DevSize = SIZE_256KiB, 761 - .CmdSet = P_ID_AMD_STD, 762 - .NumEraseRegions= 4, 814 + .devtypes = CFI_DEVICETYPE_X8, 815 + .uaddr = MTD_UADDR_0x0555_0x02AA, 816 + .dev_size = SIZE_256KiB, 817 + .cmd_set = P_ID_AMD_STD, 818 + .nr_regions = 4, 763 819 .regions = { 764 820 ERASEINFO(0x10000,3), 765 821 ERASEINFO(0x08000,1), ··· 769 827 .mfr_id = MANUFACTURER_INTEL, 770 828 .dev_id = I28F004B3B, 771 829 .name = "Intel 28F004B3B", 772 - .uaddr = { 773 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 774 - }, 775 - .DevSize = SIZE_512KiB, 776 - .CmdSet = P_ID_INTEL_STD, 777 - .NumEraseRegions= 2, 830 + .devtypes = CFI_DEVICETYPE_X8, 831 + .uaddr = MTD_UADDR_UNNECESSARY, 832 + .dev_size = SIZE_512KiB, 833 + .cmd_set = P_ID_INTEL_STD, 834 + .nr_regions = 2, 778 835 .regions = { 779 836 ERASEINFO(0x02000, 8), 780 837 ERASEINFO(0x10000, 7), ··· 782 841 .mfr_id = MANUFACTURER_INTEL, 783 842 .dev_id = I28F004B3T, 784 843 .name = "Intel 28F004B3T", 785 - .uaddr = { 786 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 787 - }, 788 - .DevSize = SIZE_512KiB, 789 - .CmdSet = P_ID_INTEL_STD, 790 - .NumEraseRegions= 2, 844 + .devtypes = CFI_DEVICETYPE_X8, 845 + .uaddr = MTD_UADDR_UNNECESSARY, 846 + .dev_size = SIZE_512KiB, 847 + .cmd_set = P_ID_INTEL_STD, 848 + .nr_regions = 2, 791 849 .regions = { 792 850 ERASEINFO(0x10000, 7), 793 851 ERASEINFO(0x02000, 8), ··· 795 855 .mfr_id = MANUFACTURER_INTEL, 796 856 .dev_id = I28F400B3B, 797 857 .name = "Intel 28F400B3B", 798 - .uaddr = { 799 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 800 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 801 - }, 802 - .DevSize = SIZE_512KiB, 803 - .CmdSet = P_ID_INTEL_STD, 804 - .NumEraseRegions= 2, 858 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 859 + .uaddr = MTD_UADDR_UNNECESSARY, 860 + .dev_size = SIZE_512KiB, 861 + .cmd_set = P_ID_INTEL_STD, 862 + .nr_regions = 2, 805 863 .regions = { 806 864 ERASEINFO(0x02000, 8), 807 865 ERASEINFO(0x10000, 7), ··· 808 870 .mfr_id = MANUFACTURER_INTEL, 809 871 .dev_id = I28F400B3T, 810 872 .name = "Intel 28F400B3T", 811 - .uaddr = { 812 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 813 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 814 - }, 815 - .DevSize = SIZE_512KiB, 816 - .CmdSet = P_ID_INTEL_STD, 817 - .NumEraseRegions= 2, 873 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 874 + .uaddr = MTD_UADDR_UNNECESSARY, 875 + .dev_size = SIZE_512KiB, 876 + .cmd_set = P_ID_INTEL_STD, 877 + .nr_regions = 2, 818 878 .regions = { 819 879 ERASEINFO(0x10000, 7), 820 880 ERASEINFO(0x02000, 8), ··· 821 885 .mfr_id = MANUFACTURER_INTEL, 822 886 .dev_id = I28F008B3B, 823 887 .name = "Intel 28F008B3B", 824 - .uaddr = { 825 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 826 - }, 827 - .DevSize = SIZE_1MiB, 828 - .CmdSet = P_ID_INTEL_STD, 829 - .NumEraseRegions= 2, 888 + .devtypes = CFI_DEVICETYPE_X8, 889 + .uaddr = MTD_UADDR_UNNECESSARY, 890 + .dev_size = SIZE_1MiB, 891 + .cmd_set = P_ID_INTEL_STD, 892 + .nr_regions = 2, 830 893 .regions = { 831 894 ERASEINFO(0x02000, 8), 832 895 ERASEINFO(0x10000, 15), ··· 834 899 .mfr_id = MANUFACTURER_INTEL, 835 900 .dev_id = I28F008B3T, 836 901 .name = "Intel 28F008B3T", 837 - .uaddr = { 838 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 839 - }, 840 - .DevSize = SIZE_1MiB, 841 - .CmdSet = P_ID_INTEL_STD, 842 - .NumEraseRegions= 2, 902 + .devtypes = CFI_DEVICETYPE_X8, 903 + .uaddr = MTD_UADDR_UNNECESSARY, 904 + .dev_size = SIZE_1MiB, 905 + .cmd_set = P_ID_INTEL_STD, 906 + .nr_regions = 2, 843 907 .regions = { 844 908 ERASEINFO(0x10000, 15), 845 909 ERASEINFO(0x02000, 8), ··· 847 913 .mfr_id = MANUFACTURER_INTEL, 848 914 .dev_id = I28F008S5, 849 915 .name = "Intel 28F008S5", 850 - .uaddr = { 851 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 852 - }, 853 - .DevSize = SIZE_1MiB, 854 - .CmdSet = P_ID_INTEL_EXT, 855 - .NumEraseRegions= 1, 916 + .devtypes = CFI_DEVICETYPE_X8, 917 + .uaddr = MTD_UADDR_UNNECESSARY, 918 + .dev_size = SIZE_1MiB, 919 + .cmd_set = P_ID_INTEL_EXT, 920 + .nr_regions = 1, 856 921 .regions = { 857 922 ERASEINFO(0x10000,16), 858 923 } ··· 859 926 .mfr_id = MANUFACTURER_INTEL, 860 927 .dev_id = I28F016S5, 861 928 .name = "Intel 28F016S5", 862 - .uaddr = { 863 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 864 - }, 865 - .DevSize = SIZE_2MiB, 866 - .CmdSet = P_ID_INTEL_EXT, 867 - .NumEraseRegions= 1, 929 + .devtypes = CFI_DEVICETYPE_X8, 930 + .uaddr = MTD_UADDR_UNNECESSARY, 931 + .dev_size = SIZE_2MiB, 932 + .cmd_set = P_ID_INTEL_EXT, 933 + .nr_regions = 1, 868 934 .regions = { 869 935 ERASEINFO(0x10000,32), 870 936 } ··· 871 939 .mfr_id = MANUFACTURER_INTEL, 872 940 .dev_id = I28F008SA, 873 941 .name = "Intel 28F008SA", 874 - .uaddr = { 875 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 876 - }, 877 - .DevSize = SIZE_1MiB, 878 - .CmdSet = P_ID_INTEL_STD, 879 - .NumEraseRegions= 1, 942 + .devtypes = CFI_DEVICETYPE_X8, 943 + .uaddr = MTD_UADDR_UNNECESSARY, 944 + .dev_size = SIZE_1MiB, 945 + .cmd_set = P_ID_INTEL_STD, 946 + .nr_regions = 1, 880 947 .regions = { 881 948 ERASEINFO(0x10000, 16), 882 949 } ··· 883 952 .mfr_id = MANUFACTURER_INTEL, 884 953 .dev_id = I28F800B3B, 885 954 .name = "Intel 28F800B3B", 886 - .uaddr = { 887 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 888 - }, 889 - .DevSize = SIZE_1MiB, 890 - .CmdSet = P_ID_INTEL_STD, 891 - .NumEraseRegions= 2, 955 + .devtypes = CFI_DEVICETYPE_X16, 956 + .uaddr = MTD_UADDR_UNNECESSARY, 957 + .dev_size = SIZE_1MiB, 958 + .cmd_set = P_ID_INTEL_STD, 959 + .nr_regions = 2, 892 960 .regions = { 893 961 ERASEINFO(0x02000, 8), 894 962 ERASEINFO(0x10000, 15), ··· 896 966 .mfr_id = MANUFACTURER_INTEL, 897 967 .dev_id = I28F800B3T, 898 968 .name = "Intel 28F800B3T", 899 - .uaddr = { 900 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 901 - }, 902 - .DevSize = SIZE_1MiB, 903 - .CmdSet = P_ID_INTEL_STD, 904 - .NumEraseRegions= 2, 969 + .devtypes = CFI_DEVICETYPE_X16, 970 + .uaddr = MTD_UADDR_UNNECESSARY, 971 + .dev_size = SIZE_1MiB, 972 + .cmd_set = P_ID_INTEL_STD, 973 + .nr_regions = 2, 905 974 .regions = { 906 975 ERASEINFO(0x10000, 15), 907 976 ERASEINFO(0x02000, 8), ··· 909 980 .mfr_id = MANUFACTURER_INTEL, 910 981 .dev_id = I28F016B3B, 911 982 .name = "Intel 28F016B3B", 912 - .uaddr = { 913 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 914 - }, 915 - .DevSize = SIZE_2MiB, 916 - .CmdSet = P_ID_INTEL_STD, 917 - .NumEraseRegions= 2, 983 + .devtypes = CFI_DEVICETYPE_X8, 984 + .uaddr = MTD_UADDR_UNNECESSARY, 985 + .dev_size = SIZE_2MiB, 986 + .cmd_set = P_ID_INTEL_STD, 987 + .nr_regions = 2, 918 988 .regions = { 919 989 ERASEINFO(0x02000, 8), 920 990 ERASEINFO(0x10000, 31), ··· 922 994 .mfr_id = MANUFACTURER_INTEL, 923 995 .dev_id = I28F016S3, 924 996 .name = "Intel I28F016S3", 925 - .uaddr = { 926 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 927 - }, 928 - .DevSize = SIZE_2MiB, 929 - .CmdSet = P_ID_INTEL_STD, 930 - .NumEraseRegions= 1, 997 + .devtypes = CFI_DEVICETYPE_X8, 998 + .uaddr = MTD_UADDR_UNNECESSARY, 999 + .dev_size = SIZE_2MiB, 1000 + .cmd_set = P_ID_INTEL_STD, 1001 + .nr_regions = 1, 931 1002 .regions = { 932 1003 ERASEINFO(0x10000, 32), 933 1004 } ··· 934 1007 .mfr_id = MANUFACTURER_INTEL, 935 1008 .dev_id = I28F016B3T, 936 1009 .name = "Intel 28F016B3T", 937 - .uaddr = { 938 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 939 - }, 940 - .DevSize = SIZE_2MiB, 941 - .CmdSet = P_ID_INTEL_STD, 942 - .NumEraseRegions= 2, 1010 + .devtypes = CFI_DEVICETYPE_X8, 1011 + .uaddr = MTD_UADDR_UNNECESSARY, 1012 + .dev_size = SIZE_2MiB, 1013 + .cmd_set = P_ID_INTEL_STD, 1014 + .nr_regions = 2, 943 1015 .regions = { 944 1016 ERASEINFO(0x10000, 31), 945 1017 ERASEINFO(0x02000, 8), ··· 947 1021 .mfr_id = MANUFACTURER_INTEL, 948 1022 .dev_id = I28F160B3B, 949 1023 .name = "Intel 28F160B3B", 950 - .uaddr = { 951 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 952 - }, 953 - .DevSize = SIZE_2MiB, 954 - .CmdSet = P_ID_INTEL_STD, 955 - .NumEraseRegions= 2, 1024 + .devtypes = CFI_DEVICETYPE_X16, 1025 + .uaddr = MTD_UADDR_UNNECESSARY, 1026 + .dev_size = SIZE_2MiB, 1027 + .cmd_set = P_ID_INTEL_STD, 1028 + .nr_regions = 2, 956 1029 .regions = { 957 1030 ERASEINFO(0x02000, 8), 958 1031 ERASEINFO(0x10000, 31), ··· 960 1035 .mfr_id = MANUFACTURER_INTEL, 961 1036 .dev_id = I28F160B3T, 962 1037 .name = "Intel 28F160B3T", 963 - .uaddr = { 964 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 965 - }, 966 - .DevSize = SIZE_2MiB, 967 - .CmdSet = P_ID_INTEL_STD, 968 - .NumEraseRegions= 2, 1038 + .devtypes = CFI_DEVICETYPE_X16, 1039 + .uaddr = MTD_UADDR_UNNECESSARY, 1040 + .dev_size = SIZE_2MiB, 1041 + .cmd_set = P_ID_INTEL_STD, 1042 + .nr_regions = 2, 969 1043 .regions = { 970 1044 ERASEINFO(0x10000, 31), 971 1045 ERASEINFO(0x02000, 8), ··· 973 1049 .mfr_id = MANUFACTURER_INTEL, 974 1050 .dev_id = I28F320B3B, 975 1051 .name = "Intel 28F320B3B", 976 - .uaddr = { 977 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 978 - }, 979 - .DevSize = SIZE_4MiB, 980 - .CmdSet = P_ID_INTEL_STD, 981 - .NumEraseRegions= 2, 1052 + .devtypes = CFI_DEVICETYPE_X16, 1053 + .uaddr = MTD_UADDR_UNNECESSARY, 1054 + .dev_size = SIZE_4MiB, 1055 + .cmd_set = P_ID_INTEL_STD, 1056 + .nr_regions = 2, 982 1057 .regions = { 983 1058 ERASEINFO(0x02000, 8), 984 1059 ERASEINFO(0x10000, 63), ··· 986 1063 .mfr_id = MANUFACTURER_INTEL, 987 1064 .dev_id = I28F320B3T, 988 1065 .name = "Intel 28F320B3T", 989 - .uaddr = { 990 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 991 - }, 992 - .DevSize = SIZE_4MiB, 993 - .CmdSet = P_ID_INTEL_STD, 994 - .NumEraseRegions= 2, 1066 + .devtypes = CFI_DEVICETYPE_X16, 1067 + .uaddr = MTD_UADDR_UNNECESSARY, 1068 + .dev_size = SIZE_4MiB, 1069 + .cmd_set = P_ID_INTEL_STD, 1070 + .nr_regions = 2, 995 1071 .regions = { 996 1072 ERASEINFO(0x10000, 63), 997 1073 ERASEINFO(0x02000, 8), ··· 999 1077 .mfr_id = MANUFACTURER_INTEL, 1000 1078 .dev_id = I28F640B3B, 1001 1079 .name = "Intel 28F640B3B", 1002 - .uaddr = { 1003 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1004 - }, 1005 - .DevSize = SIZE_8MiB, 1006 - .CmdSet = P_ID_INTEL_STD, 1007 - .NumEraseRegions= 2, 1080 + .devtypes = CFI_DEVICETYPE_X16, 1081 + .uaddr = MTD_UADDR_UNNECESSARY, 1082 + .dev_size = SIZE_8MiB, 1083 + .cmd_set = P_ID_INTEL_STD, 1084 + .nr_regions = 2, 1008 1085 .regions = { 1009 1086 ERASEINFO(0x02000, 8), 1010 1087 ERASEINFO(0x10000, 127), ··· 1012 1091 .mfr_id = MANUFACTURER_INTEL, 1013 1092 .dev_id = I28F640B3T, 1014 1093 .name = "Intel 28F640B3T", 1015 - .uaddr = { 1016 - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ 1017 - }, 1018 - .DevSize = SIZE_8MiB, 1019 - .CmdSet = P_ID_INTEL_STD, 1020 - .NumEraseRegions= 2, 1094 + .devtypes = CFI_DEVICETYPE_X16, 1095 + .uaddr = MTD_UADDR_UNNECESSARY, 1096 + .dev_size = SIZE_8MiB, 1097 + .cmd_set = P_ID_INTEL_STD, 1098 + .nr_regions = 2, 1021 1099 .regions = { 1022 1100 ERASEINFO(0x10000, 127), 1023 1101 ERASEINFO(0x02000, 8), ··· 1025 1105 .mfr_id = MANUFACTURER_INTEL, 1026 1106 .dev_id = I82802AB, 1027 1107 .name = "Intel 82802AB", 1028 - .uaddr = { 1029 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1030 - }, 1031 - .DevSize = SIZE_512KiB, 1032 - .CmdSet = P_ID_INTEL_EXT, 1033 - .NumEraseRegions= 1, 1108 + .devtypes = CFI_DEVICETYPE_X8, 1109 + .uaddr = MTD_UADDR_UNNECESSARY, 1110 + .dev_size = SIZE_512KiB, 1111 + .cmd_set = P_ID_INTEL_EXT, 1112 + .nr_regions = 1, 1034 1113 .regions = { 1035 1114 ERASEINFO(0x10000,8), 1036 1115 } ··· 1037 1118 .mfr_id = MANUFACTURER_INTEL, 1038 1119 .dev_id = I82802AC, 1039 1120 .name = "Intel 82802AC", 1040 - .uaddr = { 1041 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1042 - }, 1043 - .DevSize = SIZE_1MiB, 1044 - .CmdSet = P_ID_INTEL_EXT, 1045 - .NumEraseRegions= 1, 1121 + .devtypes = CFI_DEVICETYPE_X8, 1122 + .uaddr = MTD_UADDR_UNNECESSARY, 1123 + .dev_size = SIZE_1MiB, 1124 + .cmd_set = P_ID_INTEL_EXT, 1125 + .nr_regions = 1, 1046 1126 .regions = { 1047 1127 ERASEINFO(0x10000,16), 1048 1128 } ··· 1049 1131 .mfr_id = MANUFACTURER_MACRONIX, 1050 1132 .dev_id = MX29LV040C, 1051 1133 .name = "Macronix MX29LV040C", 1052 - .uaddr = { 1053 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1054 - }, 1055 - .DevSize = SIZE_512KiB, 1056 - .CmdSet = P_ID_AMD_STD, 1057 - .NumEraseRegions= 1, 1134 + .devtypes = CFI_DEVICETYPE_X8, 1135 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1136 + .dev_size = SIZE_512KiB, 1137 + .cmd_set = P_ID_AMD_STD, 1138 + .nr_regions = 1, 1058 1139 .regions = { 1059 1140 ERASEINFO(0x10000,8), 1060 1141 } ··· 1061 1144 .mfr_id = MANUFACTURER_MACRONIX, 1062 1145 .dev_id = MX29LV160T, 1063 1146 .name = "MXIC MX29LV160T", 1064 - .uaddr = { 1065 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1066 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1067 - }, 1068 - .DevSize = SIZE_2MiB, 1069 - .CmdSet = P_ID_AMD_STD, 1070 - .NumEraseRegions= 4, 1147 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1148 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1149 + .dev_size = SIZE_2MiB, 1150 + .cmd_set = P_ID_AMD_STD, 1151 + .nr_regions = 4, 1071 1152 .regions = { 1072 1153 ERASEINFO(0x10000,31), 1073 1154 ERASEINFO(0x08000,1), ··· 1076 1161 .mfr_id = MANUFACTURER_NEC, 1077 1162 .dev_id = UPD29F064115, 1078 1163 .name = "NEC uPD29F064115", 1079 - .uaddr = { 1080 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1081 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1082 - }, 1083 - .DevSize = SIZE_8MiB, 1084 - .CmdSet = P_ID_AMD_STD, 1085 - .NumEraseRegions= 3, 1164 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1165 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1166 + .dev_size = SIZE_8MiB, 1167 + .cmd_set = P_ID_AMD_STD, 1168 + .nr_regions = 3, 1086 1169 .regions = { 1087 1170 ERASEINFO(0x2000,8), 1088 1171 ERASEINFO(0x10000,126), ··· 1090 1177 .mfr_id = MANUFACTURER_MACRONIX, 1091 1178 .dev_id = MX29LV160B, 1092 1179 .name = "MXIC MX29LV160B", 1093 - .uaddr = { 1094 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1095 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1096 - }, 1097 - .DevSize = SIZE_2MiB, 1098 - .CmdSet = P_ID_AMD_STD, 1099 - .NumEraseRegions= 4, 1180 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1181 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1182 + .dev_size = SIZE_2MiB, 1183 + .cmd_set = P_ID_AMD_STD, 1184 + .nr_regions = 4, 1100 1185 .regions = { 1101 1186 ERASEINFO(0x04000,1), 1102 1187 ERASEINFO(0x02000,2), ··· 1105 1194 .mfr_id = MANUFACTURER_MACRONIX, 1106 1195 .dev_id = MX29F040, 1107 1196 .name = "Macronix MX29F040", 1108 - .uaddr = { 1109 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1110 - }, 1111 - .DevSize = SIZE_512KiB, 1112 - .CmdSet = P_ID_AMD_STD, 1113 - .NumEraseRegions= 1, 1197 + .devtypes = CFI_DEVICETYPE_X8, 1198 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1199 + .dev_size = SIZE_512KiB, 1200 + .cmd_set = P_ID_AMD_STD, 1201 + .nr_regions = 1, 1114 1202 .regions = { 1115 1203 ERASEINFO(0x10000,8), 1116 1204 } ··· 1117 1207 .mfr_id = MANUFACTURER_MACRONIX, 1118 1208 .dev_id = MX29F016, 1119 1209 .name = "Macronix MX29F016", 1120 - .uaddr = { 1121 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1122 - }, 1123 - .DevSize = SIZE_2MiB, 1124 - .CmdSet = P_ID_AMD_STD, 1125 - .NumEraseRegions= 1, 1210 + .devtypes = CFI_DEVICETYPE_X8, 1211 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1212 + .dev_size = SIZE_2MiB, 1213 + .cmd_set = P_ID_AMD_STD, 1214 + .nr_regions = 1, 1126 1215 .regions = { 1127 1216 ERASEINFO(0x10000,32), 1128 1217 } ··· 1129 1220 .mfr_id = MANUFACTURER_MACRONIX, 1130 1221 .dev_id = MX29F004T, 1131 1222 .name = "Macronix MX29F004T", 1132 - .uaddr = { 1133 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1134 - }, 1135 - .DevSize = SIZE_512KiB, 1136 - .CmdSet = P_ID_AMD_STD, 1137 - .NumEraseRegions= 4, 1223 + .devtypes = CFI_DEVICETYPE_X8, 1224 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1225 + .dev_size = SIZE_512KiB, 1226 + .cmd_set = P_ID_AMD_STD, 1227 + .nr_regions = 4, 1138 1228 .regions = { 1139 1229 ERASEINFO(0x10000,7), 1140 1230 ERASEINFO(0x08000,1), ··· 1144 1236 .mfr_id = MANUFACTURER_MACRONIX, 1145 1237 .dev_id = MX29F004B, 1146 1238 .name = "Macronix MX29F004B", 1147 - .uaddr = { 1148 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1149 - }, 1150 - .DevSize = SIZE_512KiB, 1151 - .CmdSet = P_ID_AMD_STD, 1152 - .NumEraseRegions= 4, 1239 + .devtypes = CFI_DEVICETYPE_X8, 1240 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1241 + .dev_size = SIZE_512KiB, 1242 + .cmd_set = P_ID_AMD_STD, 1243 + .nr_regions = 4, 1153 1244 .regions = { 1154 1245 ERASEINFO(0x04000,1), 1155 1246 ERASEINFO(0x02000,2), ··· 1159 1252 .mfr_id = MANUFACTURER_MACRONIX, 1160 1253 .dev_id = MX29F002T, 1161 1254 .name = "Macronix MX29F002T", 1162 - .uaddr = { 1163 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1164 - }, 1165 - .DevSize = SIZE_256KiB, 1166 - .CmdSet = P_ID_AMD_STD, 1167 - .NumEraseRegions= 4, 1255 + .devtypes = CFI_DEVICETYPE_X8, 1256 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1257 + .dev_size = SIZE_256KiB, 1258 + .cmd_set = P_ID_AMD_STD, 1259 + .nr_regions = 4, 1168 1260 .regions = { 1169 1261 ERASEINFO(0x10000,3), 1170 1262 ERASEINFO(0x08000,1), ··· 1174 1268 .mfr_id = MANUFACTURER_PMC, 1175 1269 .dev_id = PM49FL002, 1176 1270 .name = "PMC Pm49FL002", 1177 - .uaddr = { 1178 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1179 - }, 1180 - .DevSize = SIZE_256KiB, 1181 - .CmdSet = P_ID_AMD_STD, 1182 - .NumEraseRegions= 1, 1271 + .devtypes = CFI_DEVICETYPE_X8, 1272 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1273 + .dev_size = SIZE_256KiB, 1274 + .cmd_set = P_ID_AMD_STD, 1275 + .nr_regions = 1, 1183 1276 .regions = { 1184 1277 ERASEINFO( 0x01000, 64 ) 1185 1278 } ··· 1186 1281 .mfr_id = MANUFACTURER_PMC, 1187 1282 .dev_id = PM49FL004, 1188 1283 .name = "PMC Pm49FL004", 1189 - .uaddr = { 1190 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1191 - }, 1192 - .DevSize = SIZE_512KiB, 1193 - .CmdSet = P_ID_AMD_STD, 1194 - .NumEraseRegions= 1, 1284 + .devtypes = CFI_DEVICETYPE_X8, 1285 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1286 + .dev_size = SIZE_512KiB, 1287 + .cmd_set = P_ID_AMD_STD, 1288 + .nr_regions = 1, 1195 1289 .regions = { 1196 1290 ERASEINFO( 0x01000, 128 ) 1197 1291 } ··· 1198 1294 .mfr_id = MANUFACTURER_PMC, 1199 1295 .dev_id = PM49FL008, 1200 1296 .name = "PMC Pm49FL008", 1201 - .uaddr = { 1202 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1203 - }, 1204 - .DevSize = SIZE_1MiB, 1205 - .CmdSet = P_ID_AMD_STD, 1206 - .NumEraseRegions= 1, 1297 + .devtypes = CFI_DEVICETYPE_X8, 1298 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1299 + .dev_size = SIZE_1MiB, 1300 + .cmd_set = P_ID_AMD_STD, 1301 + .nr_regions = 1, 1207 1302 .regions = { 1208 1303 ERASEINFO( 0x01000, 256 ) 1209 1304 } ··· 1210 1307 .mfr_id = MANUFACTURER_SHARP, 1211 1308 .dev_id = LH28F640BF, 1212 1309 .name = "LH28F640BF", 1213 - .uaddr = { 1214 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1215 - }, 1216 - .DevSize = SIZE_4MiB, 1217 - .CmdSet = P_ID_INTEL_STD, 1218 - .NumEraseRegions= 1, 1219 - .regions = { 1310 + .devtypes = CFI_DEVICETYPE_X8, 1311 + .uaddr = MTD_UADDR_UNNECESSARY, 1312 + .dev_size = SIZE_4MiB, 1313 + .cmd_set = P_ID_INTEL_STD, 1314 + .nr_regions = 1, 1315 + .regions = { 1220 1316 ERASEINFO(0x40000,16), 1221 1317 } 1222 1318 }, { 1223 1319 .mfr_id = MANUFACTURER_SST, 1224 1320 .dev_id = SST39LF512, 1225 1321 .name = "SST 39LF512", 1226 - .uaddr = { 1227 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1228 - }, 1229 - .DevSize = SIZE_64KiB, 1230 - .CmdSet = P_ID_AMD_STD, 1231 - .NumEraseRegions= 1, 1322 + .devtypes = CFI_DEVICETYPE_X8, 1323 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1324 + .dev_size = SIZE_64KiB, 1325 + .cmd_set = P_ID_AMD_STD, 1326 + .nr_regions = 1, 1232 1327 .regions = { 1233 1328 ERASEINFO(0x01000,16), 1234 1329 } ··· 1234 1333 .mfr_id = MANUFACTURER_SST, 1235 1334 .dev_id = SST39LF010, 1236 1335 .name = "SST 39LF010", 1237 - .uaddr = { 1238 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1239 - }, 1240 - .DevSize = SIZE_128KiB, 1241 - .CmdSet = P_ID_AMD_STD, 1242 - .NumEraseRegions= 1, 1336 + .devtypes = CFI_DEVICETYPE_X8, 1337 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1338 + .dev_size = SIZE_128KiB, 1339 + .cmd_set = P_ID_AMD_STD, 1340 + .nr_regions = 1, 1243 1341 .regions = { 1244 1342 ERASEINFO(0x01000,32), 1245 1343 } ··· 1246 1346 .mfr_id = MANUFACTURER_SST, 1247 1347 .dev_id = SST29EE020, 1248 1348 .name = "SST 29EE020", 1249 - .uaddr = { 1250 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1251 - }, 1252 - .DevSize = SIZE_256KiB, 1253 - .CmdSet = P_ID_SST_PAGE, 1254 - .NumEraseRegions= 1, 1255 - .regions = {ERASEINFO(0x01000,64), 1256 - } 1257 - }, { 1349 + .devtypes = CFI_DEVICETYPE_X8, 1350 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1351 + .dev_size = SIZE_256KiB, 1352 + .cmd_set = P_ID_SST_PAGE, 1353 + .nr_regions = 1, 1354 + .regions = {ERASEINFO(0x01000,64), 1355 + } 1356 + }, { 1258 1357 .mfr_id = MANUFACTURER_SST, 1259 1358 .dev_id = SST29LE020, 1260 1359 .name = "SST 29LE020", 1261 - .uaddr = { 1262 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1263 - }, 1264 - .DevSize = SIZE_256KiB, 1265 - .CmdSet = P_ID_SST_PAGE, 1266 - .NumEraseRegions= 1, 1267 - .regions = {ERASEINFO(0x01000,64), 1268 - } 1360 + .devtypes = CFI_DEVICETYPE_X8, 1361 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1362 + .dev_size = SIZE_256KiB, 1363 + .cmd_set = P_ID_SST_PAGE, 1364 + .nr_regions = 1, 1365 + .regions = {ERASEINFO(0x01000,64), 1366 + } 1269 1367 }, { 1270 1368 .mfr_id = MANUFACTURER_SST, 1271 1369 .dev_id = SST39LF020, 1272 1370 .name = "SST 39LF020", 1273 - .uaddr = { 1274 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1275 - }, 1276 - .DevSize = SIZE_256KiB, 1277 - .CmdSet = P_ID_AMD_STD, 1278 - .NumEraseRegions= 1, 1371 + .devtypes = CFI_DEVICETYPE_X8, 1372 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1373 + .dev_size = SIZE_256KiB, 1374 + .cmd_set = P_ID_AMD_STD, 1375 + .nr_regions = 1, 1279 1376 .regions = { 1280 1377 ERASEINFO(0x01000,64), 1281 1378 } ··· 1280 1383 .mfr_id = MANUFACTURER_SST, 1281 1384 .dev_id = SST39LF040, 1282 1385 .name = "SST 39LF040", 1283 - .uaddr = { 1284 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1285 - }, 1286 - .DevSize = SIZE_512KiB, 1287 - .CmdSet = P_ID_AMD_STD, 1288 - .NumEraseRegions= 1, 1386 + .devtypes = CFI_DEVICETYPE_X8, 1387 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1388 + .dev_size = SIZE_512KiB, 1389 + .cmd_set = P_ID_AMD_STD, 1390 + .nr_regions = 1, 1289 1391 .regions = { 1290 1392 ERASEINFO(0x01000,128), 1291 1393 } ··· 1292 1396 .mfr_id = MANUFACTURER_SST, 1293 1397 .dev_id = SST39SF010A, 1294 1398 .name = "SST 39SF010A", 1295 - .uaddr = { 1296 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1297 - }, 1298 - .DevSize = SIZE_128KiB, 1299 - .CmdSet = P_ID_AMD_STD, 1300 - .NumEraseRegions= 1, 1399 + .devtypes = CFI_DEVICETYPE_X8, 1400 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1401 + .dev_size = SIZE_128KiB, 1402 + .cmd_set = P_ID_AMD_STD, 1403 + .nr_regions = 1, 1301 1404 .regions = { 1302 1405 ERASEINFO(0x01000,32), 1303 1406 } ··· 1304 1409 .mfr_id = MANUFACTURER_SST, 1305 1410 .dev_id = SST39SF020A, 1306 1411 .name = "SST 39SF020A", 1307 - .uaddr = { 1308 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1309 - }, 1310 - .DevSize = SIZE_256KiB, 1311 - .CmdSet = P_ID_AMD_STD, 1312 - .NumEraseRegions= 1, 1412 + .devtypes = CFI_DEVICETYPE_X8, 1413 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1414 + .dev_size = SIZE_256KiB, 1415 + .cmd_set = P_ID_AMD_STD, 1416 + .nr_regions = 1, 1313 1417 .regions = { 1314 1418 ERASEINFO(0x01000,64), 1315 1419 } 1316 1420 }, { 1317 1421 .mfr_id = MANUFACTURER_SST, 1318 - .dev_id = SST49LF040B, 1319 - .name = "SST 49LF040B", 1320 - .uaddr = { 1321 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1322 - }, 1323 - .DevSize = SIZE_512KiB, 1324 - .CmdSet = P_ID_AMD_STD, 1325 - .NumEraseRegions= 1, 1326 - .regions = { 1422 + .dev_id = SST49LF040B, 1423 + .name = "SST 49LF040B", 1424 + .devtypes = CFI_DEVICETYPE_X8, 1425 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1426 + .dev_size = SIZE_512KiB, 1427 + .cmd_set = P_ID_AMD_STD, 1428 + .nr_regions = 1, 1429 + .regions = { 1327 1430 ERASEINFO(0x01000,128), 1328 1431 } 1329 1432 }, { ··· 1329 1436 .mfr_id = MANUFACTURER_SST, 1330 1437 .dev_id = SST49LF004B, 1331 1438 .name = "SST 49LF004B", 1332 - .uaddr = { 1333 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1334 - }, 1335 - .DevSize = SIZE_512KiB, 1336 - .CmdSet = P_ID_AMD_STD, 1337 - .NumEraseRegions= 1, 1439 + .devtypes = CFI_DEVICETYPE_X8, 1440 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1441 + .dev_size = SIZE_512KiB, 1442 + .cmd_set = P_ID_AMD_STD, 1443 + .nr_regions = 1, 1338 1444 .regions = { 1339 1445 ERASEINFO(0x01000,128), 1340 1446 } ··· 1341 1449 .mfr_id = MANUFACTURER_SST, 1342 1450 .dev_id = SST49LF008A, 1343 1451 .name = "SST 49LF008A", 1344 - .uaddr = { 1345 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1346 - }, 1347 - .DevSize = SIZE_1MiB, 1348 - .CmdSet = P_ID_AMD_STD, 1349 - .NumEraseRegions= 1, 1452 + .devtypes = CFI_DEVICETYPE_X8, 1453 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1454 + .dev_size = SIZE_1MiB, 1455 + .cmd_set = P_ID_AMD_STD, 1456 + .nr_regions = 1, 1350 1457 .regions = { 1351 1458 ERASEINFO(0x01000,256), 1352 1459 } ··· 1353 1462 .mfr_id = MANUFACTURER_SST, 1354 1463 .dev_id = SST49LF030A, 1355 1464 .name = "SST 49LF030A", 1356 - .uaddr = { 1357 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1358 - }, 1359 - .DevSize = SIZE_512KiB, 1360 - .CmdSet = P_ID_AMD_STD, 1361 - .NumEraseRegions= 1, 1465 + .devtypes = CFI_DEVICETYPE_X8, 1466 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1467 + .dev_size = SIZE_512KiB, 1468 + .cmd_set = P_ID_AMD_STD, 1469 + .nr_regions = 1, 1362 1470 .regions = { 1363 1471 ERASEINFO(0x01000,96), 1364 1472 } ··· 1365 1475 .mfr_id = MANUFACTURER_SST, 1366 1476 .dev_id = SST49LF040A, 1367 1477 .name = "SST 49LF040A", 1368 - .uaddr = { 1369 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1370 - }, 1371 - .DevSize = SIZE_512KiB, 1372 - .CmdSet = P_ID_AMD_STD, 1373 - .NumEraseRegions= 1, 1478 + .devtypes = CFI_DEVICETYPE_X8, 1479 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1480 + .dev_size = SIZE_512KiB, 1481 + .cmd_set = P_ID_AMD_STD, 1482 + .nr_regions = 1, 1374 1483 .regions = { 1375 1484 ERASEINFO(0x01000,128), 1376 1485 } ··· 1377 1488 .mfr_id = MANUFACTURER_SST, 1378 1489 .dev_id = SST49LF080A, 1379 1490 .name = "SST 49LF080A", 1380 - .uaddr = { 1381 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1382 - }, 1383 - .DevSize = SIZE_1MiB, 1384 - .CmdSet = P_ID_AMD_STD, 1385 - .NumEraseRegions= 1, 1491 + .devtypes = CFI_DEVICETYPE_X8, 1492 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1493 + .dev_size = SIZE_1MiB, 1494 + .cmd_set = P_ID_AMD_STD, 1495 + .nr_regions = 1, 1386 1496 .regions = { 1387 1497 ERASEINFO(0x01000,256), 1388 1498 } 1389 1499 }, { 1390 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1391 - .dev_id = SST39LF160, 1392 - .name = "SST 39LF160", 1393 - .uaddr = { 1394 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1395 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1396 - }, 1397 - .DevSize = SIZE_2MiB, 1398 - .CmdSet = P_ID_AMD_STD, 1399 - .NumEraseRegions= 2, 1400 - .regions = { 1401 - ERASEINFO(0x1000,256), 1402 - ERASEINFO(0x1000,256) 1403 - } 1500 + .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1501 + .dev_id = SST39LF160, 1502 + .name = "SST 39LF160", 1503 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1504 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1505 + .dev_size = SIZE_2MiB, 1506 + .cmd_set = P_ID_AMD_STD, 1507 + .nr_regions = 2, 1508 + .regions = { 1509 + ERASEINFO(0x1000,256), 1510 + ERASEINFO(0x1000,256) 1511 + } 1404 1512 }, { 1405 - .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1406 - .dev_id = SST39VF1601, 1407 - .name = "SST 39VF1601", 1408 - .uaddr = { 1409 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1410 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1411 - }, 1412 - .DevSize = SIZE_2MiB, 1413 - .CmdSet = P_ID_AMD_STD, 1414 - .NumEraseRegions= 2, 1415 - .regions = { 1416 - ERASEINFO(0x1000,256), 1417 - ERASEINFO(0x1000,256) 1418 - } 1419 - 1513 + .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1514 + .dev_id = SST39VF1601, 1515 + .name = "SST 39VF1601", 1516 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1517 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1518 + .dev_size = SIZE_2MiB, 1519 + .cmd_set = P_ID_AMD_STD, 1520 + .nr_regions = 2, 1521 + .regions = { 1522 + ERASEINFO(0x1000,256), 1523 + ERASEINFO(0x1000,256) 1524 + } 1420 1525 }, { 1421 1526 .mfr_id = MANUFACTURER_ST, 1422 1527 .dev_id = M29F800AB, 1423 1528 .name = "ST M29F800AB", 1424 - .uaddr = { 1425 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1426 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1427 - }, 1428 - .DevSize = SIZE_1MiB, 1429 - .CmdSet = P_ID_AMD_STD, 1430 - .NumEraseRegions= 4, 1529 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1530 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1531 + .dev_size = SIZE_1MiB, 1532 + .cmd_set = P_ID_AMD_STD, 1533 + .nr_regions = 4, 1431 1534 .regions = { 1432 1535 ERASEINFO(0x04000,1), 1433 1536 ERASEINFO(0x02000,2), ··· 1430 1549 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1431 1550 .dev_id = M29W800DT, 1432 1551 .name = "ST M29W800DT", 1433 - .uaddr = { 1434 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1435 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1436 - }, 1437 - .DevSize = SIZE_1MiB, 1438 - .CmdSet = P_ID_AMD_STD, 1439 - .NumEraseRegions= 4, 1552 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1553 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1554 + .dev_size = SIZE_1MiB, 1555 + .cmd_set = P_ID_AMD_STD, 1556 + .nr_regions = 4, 1440 1557 .regions = { 1441 1558 ERASEINFO(0x10000,15), 1442 1559 ERASEINFO(0x08000,1), ··· 1445 1566 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1446 1567 .dev_id = M29W800DB, 1447 1568 .name = "ST M29W800DB", 1448 - .uaddr = { 1449 - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ 1450 - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ 1451 - }, 1452 - .DevSize = SIZE_1MiB, 1453 - .CmdSet = P_ID_AMD_STD, 1454 - .NumEraseRegions= 4, 1569 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1570 + .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1571 + .dev_size = SIZE_1MiB, 1572 + .cmd_set = P_ID_AMD_STD, 1573 + .nr_regions = 4, 1455 1574 .regions = { 1456 1575 ERASEINFO(0x04000,1), 1457 1576 ERASEINFO(0x02000,2), ··· 1460 1583 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1461 1584 .dev_id = M29W160DT, 1462 1585 .name = "ST M29W160DT", 1463 - .uaddr = { 1464 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1465 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1466 - }, 1467 - .DevSize = SIZE_2MiB, 1468 - .CmdSet = P_ID_AMD_STD, 1469 - .NumEraseRegions= 4, 1586 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1587 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1588 + .dev_size = SIZE_2MiB, 1589 + .cmd_set = P_ID_AMD_STD, 1590 + .nr_regions = 4, 1470 1591 .regions = { 1471 1592 ERASEINFO(0x10000,31), 1472 1593 ERASEINFO(0x08000,1), ··· 1475 1600 .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ 1476 1601 .dev_id = M29W160DB, 1477 1602 .name = "ST M29W160DB", 1478 - .uaddr = { 1479 - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ 1480 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1481 - }, 1482 - .DevSize = SIZE_2MiB, 1483 - .CmdSet = P_ID_AMD_STD, 1484 - .NumEraseRegions= 4, 1603 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1604 + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ 1605 + .dev_size = SIZE_2MiB, 1606 + .cmd_set = P_ID_AMD_STD, 1607 + .nr_regions = 4, 1485 1608 .regions = { 1486 1609 ERASEINFO(0x04000,1), 1487 1610 ERASEINFO(0x02000,2), ··· 1490 1617 .mfr_id = MANUFACTURER_ST, 1491 1618 .dev_id = M29W040B, 1492 1619 .name = "ST M29W040B", 1493 - .uaddr = { 1494 - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ 1495 - }, 1496 - .DevSize = SIZE_512KiB, 1497 - .CmdSet = P_ID_AMD_STD, 1498 - .NumEraseRegions= 1, 1620 + .devtypes = CFI_DEVICETYPE_X8, 1621 + .uaddr = MTD_UADDR_0x0555_0x02AA, 1622 + .dev_size = SIZE_512KiB, 1623 + .cmd_set = P_ID_AMD_STD, 1624 + .nr_regions = 1, 1499 1625 .regions = { 1500 1626 ERASEINFO(0x10000,8), 1501 1627 } ··· 1502 1630 .mfr_id = MANUFACTURER_ST, 1503 1631 .dev_id = M50FW040, 1504 1632 .name = "ST M50FW040", 1505 - .uaddr = { 1506 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1507 - }, 1508 - .DevSize = SIZE_512KiB, 1509 - .CmdSet = P_ID_INTEL_EXT, 1510 - .NumEraseRegions= 1, 1633 + .devtypes = CFI_DEVICETYPE_X8, 1634 + .uaddr = MTD_UADDR_UNNECESSARY, 1635 + .dev_size = SIZE_512KiB, 1636 + .cmd_set = P_ID_INTEL_EXT, 1637 + .nr_regions = 1, 1511 1638 .regions = { 1512 1639 ERASEINFO(0x10000,8), 1513 1640 } ··· 1514 1643 .mfr_id = MANUFACTURER_ST, 1515 1644 .dev_id = M50FW080, 1516 1645 .name = "ST M50FW080", 1517 - .uaddr = { 1518 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1519 - }, 1520 - .DevSize = SIZE_1MiB, 1521 - .CmdSet = P_ID_INTEL_EXT, 1522 - .NumEraseRegions= 1, 1646 + .devtypes = CFI_DEVICETYPE_X8, 1647 + .uaddr = MTD_UADDR_UNNECESSARY, 1648 + .dev_size = SIZE_1MiB, 1649 + .cmd_set = P_ID_INTEL_EXT, 1650 + .nr_regions = 1, 1523 1651 .regions = { 1524 1652 ERASEINFO(0x10000,16), 1525 1653 } ··· 1526 1656 .mfr_id = MANUFACTURER_ST, 1527 1657 .dev_id = M50FW016, 1528 1658 .name = "ST M50FW016", 1529 - .uaddr = { 1530 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1531 - }, 1532 - .DevSize = SIZE_2MiB, 1533 - .CmdSet = P_ID_INTEL_EXT, 1534 - .NumEraseRegions= 1, 1659 + .devtypes = CFI_DEVICETYPE_X8, 1660 + .uaddr = MTD_UADDR_UNNECESSARY, 1661 + .dev_size = SIZE_2MiB, 1662 + .cmd_set = P_ID_INTEL_EXT, 1663 + .nr_regions = 1, 1535 1664 .regions = { 1536 1665 ERASEINFO(0x10000,32), 1537 1666 } ··· 1538 1669 .mfr_id = MANUFACTURER_ST, 1539 1670 .dev_id = M50LPW080, 1540 1671 .name = "ST M50LPW080", 1541 - .uaddr = { 1542 - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ 1543 - }, 1544 - .DevSize = SIZE_1MiB, 1545 - .CmdSet = P_ID_INTEL_EXT, 1546 - .NumEraseRegions= 1, 1672 + .devtypes = CFI_DEVICETYPE_X8, 1673 + .uaddr = MTD_UADDR_UNNECESSARY, 1674 + .dev_size = SIZE_1MiB, 1675 + .cmd_set = P_ID_INTEL_EXT, 1676 + .nr_regions = 1, 1547 1677 .regions = { 1548 1678 ERASEINFO(0x10000,16), 1549 1679 } ··· 1550 1682 .mfr_id = MANUFACTURER_TOSHIBA, 1551 1683 .dev_id = TC58FVT160, 1552 1684 .name = "Toshiba TC58FVT160", 1553 - .uaddr = { 1554 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1555 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1556 - }, 1557 - .DevSize = SIZE_2MiB, 1558 - .CmdSet = P_ID_AMD_STD, 1559 - .NumEraseRegions= 4, 1685 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1686 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1687 + .dev_size = SIZE_2MiB, 1688 + .cmd_set = P_ID_AMD_STD, 1689 + .nr_regions = 4, 1560 1690 .regions = { 1561 1691 ERASEINFO(0x10000,31), 1562 1692 ERASEINFO(0x08000,1), ··· 1565 1699 .mfr_id = MANUFACTURER_TOSHIBA, 1566 1700 .dev_id = TC58FVB160, 1567 1701 .name = "Toshiba TC58FVB160", 1568 - .uaddr = { 1569 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1570 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1571 - }, 1572 - .DevSize = SIZE_2MiB, 1573 - .CmdSet = P_ID_AMD_STD, 1574 - .NumEraseRegions= 4, 1702 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1703 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1704 + .dev_size = SIZE_2MiB, 1705 + .cmd_set = P_ID_AMD_STD, 1706 + .nr_regions = 4, 1575 1707 .regions = { 1576 1708 ERASEINFO(0x04000,1), 1577 1709 ERASEINFO(0x02000,2), ··· 1580 1716 .mfr_id = MANUFACTURER_TOSHIBA, 1581 1717 .dev_id = TC58FVB321, 1582 1718 .name = "Toshiba TC58FVB321", 1583 - .uaddr = { 1584 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1585 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1586 - }, 1587 - .DevSize = SIZE_4MiB, 1588 - .CmdSet = P_ID_AMD_STD, 1589 - .NumEraseRegions= 2, 1719 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1720 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1721 + .dev_size = SIZE_4MiB, 1722 + .cmd_set = P_ID_AMD_STD, 1723 + .nr_regions = 2, 1590 1724 .regions = { 1591 1725 ERASEINFO(0x02000,8), 1592 1726 ERASEINFO(0x10000,63) ··· 1593 1731 .mfr_id = MANUFACTURER_TOSHIBA, 1594 1732 .dev_id = TC58FVT321, 1595 1733 .name = "Toshiba TC58FVT321", 1596 - .uaddr = { 1597 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1598 - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ 1599 - }, 1600 - .DevSize = SIZE_4MiB, 1601 - .CmdSet = P_ID_AMD_STD, 1602 - .NumEraseRegions= 2, 1734 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1735 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1736 + .dev_size = SIZE_4MiB, 1737 + .cmd_set = P_ID_AMD_STD, 1738 + .nr_regions = 2, 1603 1739 .regions = { 1604 1740 ERASEINFO(0x10000,63), 1605 1741 ERASEINFO(0x02000,8) ··· 1606 1746 .mfr_id = MANUFACTURER_TOSHIBA, 1607 1747 .dev_id = TC58FVB641, 1608 1748 .name = "Toshiba TC58FVB641", 1609 - .uaddr = { 1610 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1611 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1612 - }, 1613 - .DevSize = SIZE_8MiB, 1614 - .CmdSet = P_ID_AMD_STD, 1615 - .NumEraseRegions= 2, 1749 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1750 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1751 + .dev_size = SIZE_8MiB, 1752 + .cmd_set = P_ID_AMD_STD, 1753 + .nr_regions = 2, 1616 1754 .regions = { 1617 1755 ERASEINFO(0x02000,8), 1618 1756 ERASEINFO(0x10000,127) ··· 1619 1761 .mfr_id = MANUFACTURER_TOSHIBA, 1620 1762 .dev_id = TC58FVT641, 1621 1763 .name = "Toshiba TC58FVT641", 1622 - .uaddr = { 1623 - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ 1624 - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ 1625 - }, 1626 - .DevSize = SIZE_8MiB, 1627 - .CmdSet = P_ID_AMD_STD, 1628 - .NumEraseRegions= 2, 1764 + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1765 + .uaddr = MTD_UADDR_0x0AAA_0x0555, 1766 + .dev_size = SIZE_8MiB, 1767 + .cmd_set = P_ID_AMD_STD, 1768 + .nr_regions = 2, 1629 1769 .regions = { 1630 1770 ERASEINFO(0x10000,127), 1631 1771 ERASEINFO(0x02000,8) ··· 1632 1776 .mfr_id = MANUFACTURER_WINBOND, 1633 1777 .dev_id = W49V002A, 1634 1778 .name = "Winbond W49V002A", 1635 - .uaddr = { 1636 - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ 1637 - }, 1638 - .DevSize = SIZE_256KiB, 1639 - .CmdSet = P_ID_AMD_STD, 1640 - .NumEraseRegions= 4, 1779 + .devtypes = CFI_DEVICETYPE_X8, 1780 + .uaddr = MTD_UADDR_0x5555_0x2AAA, 1781 + .dev_size = SIZE_256KiB, 1782 + .cmd_set = P_ID_AMD_STD, 1783 + .nr_regions = 4, 1641 1784 .regions = { 1642 1785 ERASEINFO(0x10000, 3), 1643 1786 ERASEINFO(0x08000, 1), ··· 1646 1791 } 1647 1792 }; 1648 1793 1649 - 1650 - static int cfi_jedec_setup(struct cfi_private *p_cfi, int index); 1651 - 1652 - static int jedec_probe_chip(struct map_info *map, __u32 base, 1653 - unsigned long *chip_map, struct cfi_private *cfi); 1654 - 1655 - static struct mtd_info *jedec_probe(struct map_info *map); 1656 - 1657 - static inline u32 jedec_read_mfr(struct map_info *map, __u32 base, 1794 + static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, 1658 1795 struct cfi_private *cfi) 1659 1796 { 1660 1797 map_word result; ··· 1657 1810 return result.x[0] & mask; 1658 1811 } 1659 1812 1660 - static inline u32 jedec_read_id(struct map_info *map, __u32 base, 1813 + static inline u32 jedec_read_id(struct map_info *map, uint32_t base, 1661 1814 struct cfi_private *cfi) 1662 1815 { 1663 1816 map_word result; ··· 1668 1821 return result.x[0] & mask; 1669 1822 } 1670 1823 1671 - static inline void jedec_reset(u32 base, struct map_info *map, 1672 - struct cfi_private *cfi) 1824 + static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) 1673 1825 { 1674 1826 /* Reset */ 1675 1827 ··· 1678 1832 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips 1679 1833 * as they will ignore the writes and dont care what address 1680 1834 * the F0 is written to */ 1681 - if(cfi->addr_unlock1) { 1835 + if (cfi->addr_unlock1) { 1682 1836 DEBUG( MTD_DEBUG_LEVEL3, 1683 1837 "reset unlock called %x %x \n", 1684 1838 cfi->addr_unlock1,cfi->addr_unlock2); ··· 1687 1841 } 1688 1842 1689 1843 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1690 - /* Some misdesigned intel chips do not respond for 0xF0 for a reset, 1844 + /* Some misdesigned Intel chips do not respond for 0xF0 for a reset, 1691 1845 * so ensure we're in read mode. Send both the Intel and the AMD command 1692 1846 * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so 1693 1847 * this should be safe. ··· 1697 1851 } 1698 1852 1699 1853 1700 - static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type) 1701 - { 1702 - int uaddr_idx; 1703 - __u8 uaddr = MTD_UADDR_NOT_SUPPORTED; 1704 - 1705 - switch ( device_type ) { 1706 - case CFI_DEVICETYPE_X8: uaddr_idx = 0; break; 1707 - case CFI_DEVICETYPE_X16: uaddr_idx = 1; break; 1708 - case CFI_DEVICETYPE_X32: uaddr_idx = 2; break; 1709 - default: 1710 - printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n", 1711 - __func__, device_type); 1712 - goto uaddr_done; 1713 - } 1714 - 1715 - uaddr = finfo->uaddr[uaddr_idx]; 1716 - 1717 - if (uaddr != MTD_UADDR_NOT_SUPPORTED ) { 1718 - /* ASSERT("The unlock addresses for non-8-bit mode 1719 - are bollocks. We don't really need an array."); */ 1720 - uaddr = finfo->uaddr[0]; 1721 - } 1722 - 1723 - uaddr_done: 1724 - return uaddr; 1725 - } 1726 - 1727 - 1728 1854 static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1729 1855 { 1730 1856 int i,num_erase_regions; 1731 - __u8 uaddr; 1857 + uint8_t uaddr; 1732 1858 1733 - printk("Found: %s\n",jedec_table[index].name); 1859 + if (! (jedec_table[index].devtypes & p_cfi->device_type)) { 1860 + DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1861 + jedec_table[index].name, 4 * (1<<p_cfi->device_type)); 1862 + return 0; 1863 + } 1734 1864 1735 - num_erase_regions = jedec_table[index].NumEraseRegions; 1865 + printk(KERN_INFO "Found: %s\n",jedec_table[index].name); 1866 + 1867 + num_erase_regions = jedec_table[index].nr_regions; 1736 1868 1737 1869 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1738 1870 if (!p_cfi->cfiq) { ··· 1720 1896 1721 1897 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1722 1898 1723 - p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; 1724 - p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; 1725 - p_cfi->cfiq->DevSize = jedec_table[index].DevSize; 1899 + p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; 1900 + p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; 1901 + p_cfi->cfiq->DevSize = jedec_table[index].dev_size; 1726 1902 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1727 1903 1728 1904 for (i=0; i<num_erase_regions; i++){ ··· 1734 1910 p_cfi->mfr = jedec_table[index].mfr_id; 1735 1911 p_cfi->id = jedec_table[index].dev_id; 1736 1912 1737 - uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type); 1738 - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { 1739 - kfree( p_cfi->cfiq ); 1740 - return 0; 1741 - } 1913 + uaddr = jedec_table[index].uaddr; 1742 1914 1743 - p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1; 1744 - p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2; 1915 + /* The table has unlock addresses in _bytes_, and we try not to let 1916 + our brains explode when we see the datasheets talking about address 1917 + lines numbered from A-1 to A18. The CFI table has unlock addresses 1918 + in device-words according to the mode the device is connected in */ 1919 + p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; 1920 + p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; 1745 1921 1746 1922 return 1; /* ok */ 1747 1923 } ··· 1754 1930 * be perfect - consequently there should be some module parameters that 1755 1931 * could be manually specified to force the chip info. 1756 1932 */ 1757 - static inline int jedec_match( __u32 base, 1933 + static inline int jedec_match( uint32_t base, 1758 1934 struct map_info *map, 1759 1935 struct cfi_private *cfi, 1760 1936 const struct amd_flash_info *finfo ) 1761 1937 { 1762 1938 int rc = 0; /* failure until all tests pass */ 1763 1939 u32 mfr, id; 1764 - __u8 uaddr; 1940 + uint8_t uaddr; 1765 1941 1766 1942 /* 1767 1943 * The IDs must match. For X16 and X32 devices operating in ··· 1774 1950 */ 1775 1951 switch (cfi->device_type) { 1776 1952 case CFI_DEVICETYPE_X8: 1777 - mfr = (__u8)finfo->mfr_id; 1778 - id = (__u8)finfo->dev_id; 1953 + mfr = (uint8_t)finfo->mfr_id; 1954 + id = (uint8_t)finfo->dev_id; 1779 1955 1780 1956 /* bjd: it seems that if we do this, we can end up 1781 1957 * detecting 16bit flashes as an 8bit device, even though ··· 1788 1964 } 1789 1965 break; 1790 1966 case CFI_DEVICETYPE_X16: 1791 - mfr = (__u16)finfo->mfr_id; 1792 - id = (__u16)finfo->dev_id; 1967 + mfr = (uint16_t)finfo->mfr_id; 1968 + id = (uint16_t)finfo->dev_id; 1793 1969 break; 1794 1970 case CFI_DEVICETYPE_X32: 1795 - mfr = (__u16)finfo->mfr_id; 1796 - id = (__u32)finfo->dev_id; 1971 + mfr = (uint16_t)finfo->mfr_id; 1972 + id = (uint32_t)finfo->dev_id; 1797 1973 break; 1798 1974 default: 1799 1975 printk(KERN_WARNING ··· 1808 1984 /* the part size must fit in the memory window */ 1809 1985 DEBUG( MTD_DEBUG_LEVEL3, 1810 1986 "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", 1811 - __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) ); 1812 - if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) { 1987 + __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); 1988 + if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { 1813 1989 DEBUG( MTD_DEBUG_LEVEL3, 1814 1990 "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", 1815 1991 __func__, finfo->mfr_id, finfo->dev_id, 1816 - 1 << finfo->DevSize ); 1992 + 1 << finfo->dev_size ); 1817 1993 goto match_done; 1818 1994 } 1819 1995 1820 - uaddr = finfo_uaddr(finfo, cfi->device_type); 1821 - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { 1996 + if (! (finfo->devtypes & cfi->device_type)) 1822 1997 goto match_done; 1823 - } 1998 + 1999 + uaddr = finfo->uaddr; 1824 2000 1825 2001 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", 1826 2002 __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); 1827 2003 if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr 1828 - && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 || 1829 - unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) { 2004 + && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || 2005 + unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { 1830 2006 DEBUG( MTD_DEBUG_LEVEL3, 1831 2007 "MTD %s(): 0x%.4x 0x%.4x did not match\n", 1832 2008 __func__, ··· 1866 2042 * were truly frobbing a real device. 1867 2043 */ 1868 2044 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); 1869 - if(cfi->addr_unlock1) { 2045 + if (cfi->addr_unlock1) { 1870 2046 cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); 1871 2047 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); 1872 2048 } ··· 1892 2068 if (MTD_UADDR_UNNECESSARY == uaddr_idx) 1893 2069 return 0; 1894 2070 1895 - cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; 1896 - cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; 2071 + cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type; 2072 + cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type; 1897 2073 } 1898 2074 1899 2075 /* Make certain we aren't probing past the end of map */ ··· 1905 2081 1906 2082 } 1907 2083 /* Ensure the unlock addresses we try stay inside the map */ 1908 - probe_offset1 = cfi_build_cmd_addr( 1909 - cfi->addr_unlock1, 1910 - cfi_interleave(cfi), 1911 - cfi->device_type); 1912 - probe_offset2 = cfi_build_cmd_addr( 1913 - cfi->addr_unlock1, 1914 - cfi_interleave(cfi), 1915 - cfi->device_type); 2084 + probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type); 2085 + probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type); 1916 2086 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || 1917 2087 ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) 1918 - { 1919 2088 goto retry; 1920 - } 1921 2089 1922 2090 /* Reset */ 1923 2091 jedec_reset(base, map, cfi); ··· 1944 2128 } 1945 2129 goto retry; 1946 2130 } else { 1947 - __u16 mfr; 1948 - __u16 id; 2131 + uint16_t mfr; 2132 + uint16_t id; 1949 2133 1950 2134 /* Make sure it is a chip of the same manufacturer and id */ 1951 2135 mfr = jedec_read_mfr(map, base, cfi);
+8 -1
drivers/mtd/cmdlinepart.c
··· 9 9 * 10 10 * mtdparts=<mtddef>[;<mtddef] 11 11 * <mtddef> := <mtd-id>:<partdef>[,<partdef>] 12 - * <partdef> := <size>[@offset][<name>][ro] 12 + * <partdef> := <size>[@offset][<name>][ro][lk] 13 13 * <mtd-id> := unique name used in mapping driver/device (mtd->name) 14 14 * <size> := standard linux memsize OR "-" to denote all remaining space 15 15 * <name> := '(' NAME ')' ··· 140 140 if (strncmp(s, "ro", 2) == 0) 141 141 { 142 142 mask_flags |= MTD_WRITEABLE; 143 + s += 2; 144 + } 145 + 146 + /* if lk is found do NOT unlock the MTD partition*/ 147 + if (strncmp(s, "lk", 2) == 0) 148 + { 149 + mask_flags |= MTD_POWERUP_LOCK; 143 150 s += 2; 144 151 } 145 152
+2 -2
drivers/mtd/devices/doc2000.c
··· 632 632 len = ((from | 0x1ff) + 1) - from; 633 633 634 634 /* The ECC will not be calculated correctly if less than 512 is read */ 635 - if (len != 0x200 && eccbuf) 635 + if (len != 0x200) 636 636 printk(KERN_WARNING 637 637 "ECC needs a full sector read (adr: %lx size %lx)\n", 638 638 (long) from, (long) len); ··· 896 896 /* Let the caller know we completed it */ 897 897 *retlen += len; 898 898 899 - if (eccbuf) { 899 + { 900 900 unsigned char x[8]; 901 901 size_t dummy; 902 902 int ret;
+1 -1
drivers/mtd/devices/doc2001plus.c
··· 748 748 WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); 749 749 750 750 /* On interleaved devices the flags for 2nd half 512 are before data */ 751 - if (eccbuf && before) 751 + if (before) 752 752 fto -= 2; 753 753 754 754 /* issue the Serial Data In command to initial the Page Program process */
+1 -1
drivers/mtd/devices/lart.c
··· 323 323 /* put the flash back into command mode */ 324 324 write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); 325 325 326 - return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM)); 326 + return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM)); 327 327 } 328 328 329 329 /*
+1 -1
drivers/mtd/devices/mtd_dataflash.c
··· 420 420 status = dataflash_waitready(priv->spi); 421 421 422 422 /* Check result of the compare operation */ 423 - if ((status & (1 << 6)) == 1) { 423 + if (status & (1 << 6)) { 424 424 printk(KERN_ERR "%s: compare page %u, err %d\n", 425 425 spi->dev.bus_id, pageaddr, status); 426 426 remaining = 0;
+1 -8
drivers/mtd/maps/Kconfig
··· 110 110 Sun Microsystems boardsets. This driver will require CFI support 111 111 in the kernel, so if you did not enable CFI previously, do that now. 112 112 113 - config MTD_PNC2000 114 - tristate "CFI Flash device mapped on Photron PNC-2000" 115 - depends on X86 && MTD_CFI && MTD_PARTITIONS 116 - help 117 - PNC-2000 is the name of Network Camera product from PHOTRON 118 - Ltd. in Japan. It uses CFI-compliant flash. 119 - 120 113 config MTD_SC520CDP 121 114 tristate "CFI Flash device mapped on AMD SC520 CDP" 122 115 depends on X86 && MTD_CFI && MTD_CONCAT ··· 569 576 default "4" 570 577 571 578 config MTD_SHARP_SL 572 - bool "ROM mapped on Sharp SL Series" 579 + tristate "ROM mapped on Sharp SL Series" 573 580 depends on ARCH_PXA 574 581 help 575 582 This enables access to the flash chip on the Sharp SL Series of PDAs.
-1
drivers/mtd/maps/Makefile
··· 28 28 obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o 29 29 obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o 30 30 obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o 31 - obj-$(CONFIG_MTD_PNC2000) += pnc2000.o 32 31 obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o 33 32 obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o 34 33 obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
+112 -64
drivers/mtd/maps/physmap.c
··· 20 20 #include <linux/mtd/map.h> 21 21 #include <linux/mtd/partitions.h> 22 22 #include <linux/mtd/physmap.h> 23 + #include <linux/mtd/concat.h> 23 24 #include <asm/io.h> 24 25 26 + #define MAX_RESOURCES 4 27 + 25 28 struct physmap_flash_info { 26 - struct mtd_info *mtd; 27 - struct map_info map; 29 + struct mtd_info *mtd[MAX_RESOURCES]; 30 + struct mtd_info *cmtd; 31 + struct map_info map[MAX_RESOURCES]; 28 32 struct resource *res; 29 33 #ifdef CONFIG_MTD_PARTITIONS 30 34 int nr_parts; ··· 36 32 #endif 37 33 }; 38 34 39 - 40 35 static int physmap_flash_remove(struct platform_device *dev) 41 36 { 42 37 struct physmap_flash_info *info; 43 38 struct physmap_flash_data *physmap_data; 39 + int i; 44 40 45 41 info = platform_get_drvdata(dev); 46 42 if (info == NULL) ··· 49 45 50 46 physmap_data = dev->dev.platform_data; 51 47 52 - if (info->mtd != NULL) { 53 - #ifdef CONFIG_MTD_PARTITIONS 54 - if (info->nr_parts) { 55 - del_mtd_partitions(info->mtd); 56 - kfree(info->parts); 57 - } else if (physmap_data->nr_parts) { 58 - del_mtd_partitions(info->mtd); 59 - } else { 60 - del_mtd_device(info->mtd); 61 - } 62 - #else 63 - del_mtd_device(info->mtd); 64 - #endif 65 - map_destroy(info->mtd); 48 + #ifdef CONFIG_MTD_CONCAT 49 + if (info->cmtd != info->mtd[0]) { 50 + del_mtd_device(info->cmtd); 51 + mtd_concat_destroy(info->cmtd); 66 52 } 53 + #endif 67 54 68 - if (info->map.virt != NULL) 69 - iounmap(info->map.virt); 55 + for (i = 0; i < MAX_RESOURCES; i++) { 56 + if (info->mtd[i] != NULL) { 57 + #ifdef CONFIG_MTD_PARTITIONS 58 + if (info->nr_parts) { 59 + del_mtd_partitions(info->mtd[i]); 60 + kfree(info->parts); 61 + } else if (physmap_data->nr_parts) { 62 + del_mtd_partitions(info->mtd[i]); 63 + } else { 64 + del_mtd_device(info->mtd[i]); 65 + } 66 + #else 67 + del_mtd_device(info->mtd[i]); 68 + #endif 69 + map_destroy(info->mtd[i]); 70 + } 71 + 72 + if (info->map[i].virt != NULL) 73 + iounmap(info->map[i].virt); 74 + } 70 75 71 76 if (info->res != NULL) { 72 77 release_resource(info->res); ··· 95 82 struct physmap_flash_data *physmap_data; 96 83 struct physmap_flash_info *info; 97 84 const char **probe_type; 98 - int err; 85 + int err = 0; 86 + int i; 87 + int devices_found = 0; 99 88 100 89 physmap_data = dev->dev.platform_data; 101 90 if (physmap_data == NULL) 102 91 return -ENODEV; 103 - 104 - printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", 105 - (unsigned long long)(dev->resource->end - dev->resource->start + 1), 106 - (unsigned long long)dev->resource->start); 107 92 108 93 info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); 109 94 if (info == NULL) { ··· 111 100 112 101 platform_set_drvdata(dev, info); 113 102 114 - info->res = request_mem_region(dev->resource->start, 115 - dev->resource->end - dev->resource->start + 1, 116 - dev->dev.bus_id); 117 - if (info->res == NULL) { 118 - dev_err(&dev->dev, "Could not reserve memory region\n"); 119 - err = -ENOMEM; 120 - goto err_out; 103 + for (i = 0; i < dev->num_resources; i++) { 104 + printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n", 105 + (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), 106 + (unsigned long long)dev->resource[i].start); 107 + 108 + info->res = request_mem_region(dev->resource[i].start, 109 + dev->resource[i].end - dev->resource[i].start + 1, 110 + dev->dev.bus_id); 111 + if (info->res == NULL) { 112 + dev_err(&dev->dev, "Could not reserve memory region\n"); 113 + err = -ENOMEM; 114 + goto err_out; 115 + } 116 + 117 + info->map[i].name = dev->dev.bus_id; 118 + info->map[i].phys = dev->resource[i].start; 119 + info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; 120 + info->map[i].bankwidth = physmap_data->width; 121 + info->map[i].set_vpp = physmap_data->set_vpp; 122 + 123 + info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); 124 + if (info->map[i].virt == NULL) { 125 + dev_err(&dev->dev, "Failed to ioremap flash region\n"); 126 + err = EIO; 127 + goto err_out; 128 + } 129 + 130 + simple_map_init(&info->map[i]); 131 + 132 + probe_type = rom_probe_types; 133 + for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++) 134 + info->mtd[i] = do_map_probe(*probe_type, &info->map[i]); 135 + if (info->mtd[i] == NULL) { 136 + dev_err(&dev->dev, "map_probe failed\n"); 137 + err = -ENXIO; 138 + goto err_out; 139 + } else { 140 + devices_found++; 141 + } 142 + info->mtd[i]->owner = THIS_MODULE; 121 143 } 122 144 123 - info->map.name = dev->dev.bus_id; 124 - info->map.phys = dev->resource->start; 125 - info->map.size = dev->resource->end - dev->resource->start + 1; 126 - info->map.bankwidth = physmap_data->width; 127 - info->map.set_vpp = physmap_data->set_vpp; 128 - 129 - info->map.virt = ioremap(info->map.phys, info->map.size); 130 - if (info->map.virt == NULL) { 131 - dev_err(&dev->dev, "Failed to ioremap flash region\n"); 132 - err = EIO; 133 - goto err_out; 134 - } 135 - 136 - simple_map_init(&info->map); 137 - 138 - probe_type = rom_probe_types; 139 - for (; info->mtd == NULL && *probe_type != NULL; probe_type++) 140 - info->mtd = do_map_probe(*probe_type, &info->map); 141 - if (info->mtd == NULL) { 142 - dev_err(&dev->dev, "map_probe failed\n"); 145 + if (devices_found == 1) { 146 + info->cmtd = info->mtd[0]; 147 + } else if (devices_found > 1) { 148 + /* 149 + * We detected multiple devices. Concatenate them together. 150 + */ 151 + #ifdef CONFIG_MTD_CONCAT 152 + info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id); 153 + if (info->cmtd == NULL) 154 + err = -ENXIO; 155 + #else 156 + printk(KERN_ERR "physmap-flash: multiple devices " 157 + "found but MTD concat support disabled.\n"); 143 158 err = -ENXIO; 144 - goto err_out; 159 + #endif 145 160 } 146 - info->mtd->owner = THIS_MODULE; 161 + if (err) 162 + goto err_out; 147 163 148 164 #ifdef CONFIG_MTD_PARTITIONS 149 - err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0); 165 + err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0); 150 166 if (err > 0) { 151 - add_mtd_partitions(info->mtd, info->parts, err); 167 + add_mtd_partitions(info->cmtd, info->parts, err); 152 168 return 0; 153 169 } 154 170 155 171 if (physmap_data->nr_parts) { 156 172 printk(KERN_NOTICE "Using physmap partition information\n"); 157 - add_mtd_partitions(info->mtd, physmap_data->parts, 158 - physmap_data->nr_parts); 173 + add_mtd_partitions(info->cmtd, physmap_data->parts, 174 + physmap_data->nr_parts); 159 175 return 0; 160 176 } 161 177 #endif 162 178 163 - add_mtd_device(info->mtd); 179 + add_mtd_device(info->cmtd); 164 180 return 0; 165 181 166 182 err_out: ··· 200 162 { 201 163 struct physmap_flash_info *info = platform_get_drvdata(dev); 202 164 int ret = 0; 165 + int i; 203 166 204 167 if (info) 205 - ret = info->mtd->suspend(info->mtd); 168 + for (i = 0; i < MAX_RESOURCES; i++) 169 + ret |= info->mtd[i]->suspend(info->mtd[i]); 206 170 207 171 return ret; 208 172 } ··· 212 172 static int physmap_flash_resume(struct platform_device *dev) 213 173 { 214 174 struct physmap_flash_info *info = platform_get_drvdata(dev); 175 + int i; 176 + 215 177 if (info) 216 - info->mtd->resume(info->mtd); 178 + for (i = 0; i < MAX_RESOURCES; i++) 179 + info->mtd[i]->resume(info->mtd[i]); 217 180 return 0; 218 181 } 219 182 220 183 static void physmap_flash_shutdown(struct platform_device *dev) 221 184 { 222 185 struct physmap_flash_info *info = platform_get_drvdata(dev); 223 - if (info && info->mtd->suspend(info->mtd) == 0) 224 - info->mtd->resume(info->mtd); 186 + int i; 187 + 188 + for (i = 0; i < MAX_RESOURCES; i++) 189 + if (info && info->mtd[i]->suspend(info->mtd[i]) == 0) 190 + info->mtd[i]->resume(info->mtd[i]); 225 191 } 192 + #else 193 + #define physmap_flash_suspend NULL 194 + #define physmap_flash_resume NULL 195 + #define physmap_flash_shutdown NULL 226 196 #endif 227 197 228 198 static struct platform_driver physmap_flash_driver = { 229 199 .probe = physmap_flash_probe, 230 200 .remove = physmap_flash_remove, 231 - #ifdef CONFIG_PM 232 201 .suspend = physmap_flash_suspend, 233 202 .resume = physmap_flash_resume, 234 203 .shutdown = physmap_flash_shutdown, 235 - #endif 236 204 .driver = { 237 205 .name = "physmap-flash", 238 206 },
+27 -61
drivers/mtd/maps/physmap_of.c
··· 80 80 81 81 return nr_parts; 82 82 } 83 - 84 - static int __devinit parse_partitions(struct of_flash *info, 85 - struct of_device *dev) 86 - { 87 - const char *partname; 88 - static const char *part_probe_types[] 89 - = { "cmdlinepart", "RedBoot", NULL }; 90 - struct device_node *dp = dev->node, *pp; 91 - int nr_parts, i; 92 - 93 - /* First look for RedBoot table or partitions on the command 94 - * line, these take precedence over device tree information */ 95 - nr_parts = parse_mtd_partitions(info->mtd, part_probe_types, 96 - &info->parts, 0); 97 - if (nr_parts > 0) { 98 - add_mtd_partitions(info->mtd, info->parts, nr_parts); 99 - return 0; 100 - } 101 - 102 - /* First count the subnodes */ 103 - nr_parts = 0; 104 - for (pp = dp->child; pp; pp = pp->sibling) 105 - nr_parts++; 106 - 107 - if (nr_parts == 0) 108 - return parse_obsolete_partitions(dev, info, dp); 109 - 110 - info->parts = kzalloc(nr_parts * sizeof(*info->parts), 111 - GFP_KERNEL); 112 - if (!info->parts) 113 - return -ENOMEM; 114 - 115 - for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) { 116 - const u32 *reg; 117 - int len; 118 - 119 - reg = of_get_property(pp, "reg", &len); 120 - if (!reg || (len != 2*sizeof(u32))) { 121 - dev_err(&dev->dev, "Invalid 'reg' on %s\n", 122 - dp->full_name); 123 - kfree(info->parts); 124 - info->parts = NULL; 125 - return -EINVAL; 126 - } 127 - info->parts[i].offset = reg[0]; 128 - info->parts[i].size = reg[1]; 129 - 130 - partname = of_get_property(pp, "label", &len); 131 - if (!partname) 132 - partname = of_get_property(pp, "name", &len); 133 - info->parts[i].name = (char *)partname; 134 - 135 - if (of_get_property(pp, "read-only", &len)) 136 - info->parts[i].mask_flags = MTD_WRITEABLE; 137 - } 138 - 139 - return nr_parts; 140 - } 141 83 #else /* MTD_PARTITIONS */ 142 84 #define OF_FLASH_PARTS(info) (0) 143 85 #define parse_partitions(info, dev) (0) ··· 154 212 static int __devinit of_flash_probe(struct of_device *dev, 155 213 const struct of_device_id *match) 156 214 { 215 + #ifdef CONFIG_MTD_PARTITIONS 216 + static const char *part_probe_types[] 217 + = { "cmdlinepart", "RedBoot", NULL }; 218 + #endif 157 219 struct device_node *dp = dev->node; 158 220 struct resource res; 159 221 struct of_flash *info; ··· 220 274 } 221 275 info->mtd->owner = THIS_MODULE; 222 276 223 - err = parse_partitions(info, dev); 277 + #ifdef CONFIG_MTD_PARTITIONS 278 + /* First look for RedBoot table or partitions on the command 279 + * line, these take precedence over device tree information */ 280 + err = parse_mtd_partitions(info->mtd, part_probe_types, 281 + &info->parts, 0); 224 282 if (err < 0) 225 - goto err_out; 283 + return err; 284 + 285 + #ifdef CONFIG_MTD_OF_PARTS 286 + if (err == 0) { 287 + err = of_mtd_parse_partitions(&dev->dev, info->mtd, 288 + dp, &info->parts); 289 + if (err < 0) 290 + return err; 291 + } 292 + #endif 293 + 294 + if (err == 0) { 295 + err = parse_obsolete_partitions(dev, info, dp); 296 + if (err < 0) 297 + return err; 298 + } 226 299 227 300 if (err > 0) 228 - add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err); 301 + add_mtd_partitions(info->mtd, info->parts, err); 229 302 else 303 + #endif 230 304 add_mtd_device(info->mtd); 231 305 232 306 return 0;
-93
drivers/mtd/maps/pnc2000.c
··· 1 - /* 2 - * pnc2000.c - mapper for Photron PNC-2000 board. 3 - * 4 - * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 5 - * 6 - * This code is GPL 7 - * 8 - * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $ 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <linux/types.h> 13 - #include <linux/kernel.h> 14 - #include <linux/init.h> 15 - 16 - #include <linux/mtd/mtd.h> 17 - #include <linux/mtd/map.h> 18 - #include <linux/mtd/partitions.h> 19 - 20 - 21 - #define WINDOW_ADDR 0xbf000000 22 - #define WINDOW_SIZE 0x00400000 23 - 24 - /* 25 - * MAP DRIVER STUFF 26 - */ 27 - 28 - 29 - static struct map_info pnc_map = { 30 - .name = "PNC-2000", 31 - .size = WINDOW_SIZE, 32 - .bankwidth = 4, 33 - .phys = 0xFFFFFFFF, 34 - .virt = (void __iomem *)WINDOW_ADDR, 35 - }; 36 - 37 - 38 - /* 39 - * MTD 'PARTITIONING' STUFF 40 - */ 41 - static struct mtd_partition pnc_partitions[3] = { 42 - { 43 - .name = "PNC-2000 boot firmware", 44 - .size = 0x20000, 45 - .offset = 0 46 - }, 47 - { 48 - .name = "PNC-2000 kernel", 49 - .size = 0x1a0000, 50 - .offset = 0x20000 51 - }, 52 - { 53 - .name = "PNC-2000 filesystem", 54 - .size = 0x240000, 55 - .offset = 0x1c0000 56 - } 57 - }; 58 - 59 - /* 60 - * This is the master MTD device for which all the others are just 61 - * auto-relocating aliases. 62 - */ 63 - static struct mtd_info *mymtd; 64 - 65 - static int __init init_pnc2000(void) 66 - { 67 - printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR); 68 - 69 - simple_map_init(&pnc_map); 70 - 71 - mymtd = do_map_probe("cfi_probe", &pnc_map); 72 - if (mymtd) { 73 - mymtd->owner = THIS_MODULE; 74 - return add_mtd_partitions(mymtd, pnc_partitions, 3); 75 - } 76 - 77 - return -ENXIO; 78 - } 79 - 80 - static void __exit cleanup_pnc2000(void) 81 - { 82 - if (mymtd) { 83 - del_mtd_partitions(mymtd); 84 - map_destroy(mymtd); 85 - } 86 - } 87 - 88 - module_init(init_pnc2000); 89 - module_exit(cleanup_pnc2000); 90 - 91 - MODULE_LICENSE("GPL"); 92 - MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>"); 93 - MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
+1 -1
drivers/mtd/maps/scb2_flash.c
··· 79 79 struct cfi_private *cfi = map->fldrv_priv; 80 80 81 81 /* barf if this doesn't look right */ 82 - if (cfi->cfiq->InterfaceDesc != 1) { 82 + if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) { 83 83 printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n", 84 84 cfi->cfiq->InterfaceDesc); 85 85 return -1;
+1 -1
drivers/mtd/mtd_blkdevs.c
··· 248 248 return -EBUSY; 249 249 } 250 250 251 - mutex_init(&new->lock); 252 251 list_add_tail(&new->list, &tr->devs); 253 252 added: 253 + mutex_init(&new->lock); 254 254 if (!tr->writesect) 255 255 new->readonly = 1; 256 256
+6 -2
drivers/mtd/mtdchar.c
··· 481 481 { 482 482 struct mtd_oob_buf buf; 483 483 struct mtd_oob_ops ops; 484 + uint32_t retlen; 484 485 485 486 if(!(file->f_mode & 2)) 486 487 return -EPERM; ··· 521 520 buf.start &= ~(mtd->oobsize - 1); 522 521 ret = mtd->write_oob(mtd, buf.start, &ops); 523 522 524 - if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen, 525 - sizeof(uint32_t))) 523 + if (ops.oobretlen > 0xFFFFFFFFU) 524 + ret = -EOVERFLOW; 525 + retlen = ops.oobretlen; 526 + if (copy_to_user(&((struct mtd_oob_buf *)argp)->length, 527 + &retlen, sizeof(buf.length))) 526 528 ret = -EFAULT; 527 529 528 530 kfree(ops.oobbuf);
+1 -1
drivers/mtd/mtdcore.c
··· 61 61 62 62 /* Some chips always power up locked. Unlock them now */ 63 63 if ((mtd->flags & MTD_WRITEABLE) 64 - && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { 64 + && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { 65 65 if (mtd->unlock(mtd, 0, mtd->size)) 66 66 printk(KERN_WARNING 67 67 "%s: unlock failed, "
+129 -66
drivers/mtd/mtdoops.c
··· 28 28 #include <linux/workqueue.h> 29 29 #include <linux/sched.h> 30 30 #include <linux/wait.h> 31 + #include <linux/delay.h> 32 + #include <linux/spinlock.h> 33 + #include <linux/interrupt.h> 31 34 #include <linux/mtd/mtd.h> 32 35 33 36 #define OOPS_PAGE_SIZE 4096 34 37 35 - static struct mtdoops_context { 38 + struct mtdoops_context { 36 39 int mtd_index; 37 - struct work_struct work; 40 + struct work_struct work_erase; 41 + struct work_struct work_write; 38 42 struct mtd_info *mtd; 39 43 int oops_pages; 40 44 int nextpage; 41 45 int nextcount; 42 46 43 47 void *oops_buf; 48 + 49 + /* writecount and disabling ready are spin lock protected */ 50 + spinlock_t writecount_lock; 44 51 int ready; 45 52 int writecount; 46 53 } oops_cxt; ··· 69 62 erase.mtd = mtd; 70 63 erase.callback = mtdoops_erase_callback; 71 64 erase.addr = offset; 72 - if (mtd->erasesize < OOPS_PAGE_SIZE) 73 - erase.len = OOPS_PAGE_SIZE; 74 - else 75 - erase.len = mtd->erasesize; 65 + erase.len = mtd->erasesize; 76 66 erase.priv = (u_long)&wait_q; 77 67 78 68 set_current_state(TASK_INTERRUPTIBLE); ··· 91 87 return 0; 92 88 } 93 89 94 - static int mtdoops_inc_counter(struct mtdoops_context *cxt) 90 + static void mtdoops_inc_counter(struct mtdoops_context *cxt) 95 91 { 96 92 struct mtd_info *mtd = cxt->mtd; 97 93 size_t retlen; ··· 107 103 108 104 ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, 109 105 &retlen, (u_char *) &count); 110 - if ((retlen != 4) || (ret < 0)) { 106 + if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { 111 107 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 112 108 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, 113 109 retlen, ret); 114 - return 1; 110 + schedule_work(&cxt->work_erase); 111 + return; 115 112 } 116 113 117 114 /* See if we need to erase the next block */ 118 - if (count != 0xffffffff) 119 - return 1; 115 + if (count != 0xffffffff) { 116 + schedule_work(&cxt->work_erase); 117 + return; 118 + } 120 119 121 120 printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", 122 121 cxt->nextpage, cxt->nextcount); 123 122 cxt->ready = 1; 124 - return 0; 125 123 } 126 124 127 - static void mtdoops_prepare(struct mtdoops_context *cxt) 125 + /* Scheduled work - when we can't proceed without erasing a block */ 126 + static void mtdoops_workfunc_erase(struct work_struct *work) 128 127 { 128 + struct mtdoops_context *cxt = 129 + container_of(work, struct mtdoops_context, work_erase); 129 130 struct mtd_info *mtd = cxt->mtd; 130 131 int i = 0, j, ret, mod; 131 132 ··· 145 136 cxt->nextpage = 0; 146 137 } 147 138 148 - while (mtd->block_isbad && 149 - mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { 139 + while (mtd->block_isbad) { 140 + ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 141 + if (!ret) 142 + break; 143 + if (ret < 0) { 144 + printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); 145 + return; 146 + } 150 147 badblock: 151 148 printk(KERN_WARNING "mtdoops: Bad block at %08x\n", 152 149 cxt->nextpage * OOPS_PAGE_SIZE); ··· 169 154 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) 170 155 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 171 156 172 - if (ret < 0) { 173 - if (mtd->block_markbad) 174 - mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 175 - goto badblock; 157 + if (ret >= 0) { 158 + printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 159 + cxt->ready = 1; 160 + return; 176 161 } 177 162 178 - printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); 179 - 180 - cxt->ready = 1; 163 + if (mtd->block_markbad && (ret == -EIO)) { 164 + ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); 165 + if (ret < 0) { 166 + printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); 167 + return; 168 + } 169 + } 170 + goto badblock; 181 171 } 182 172 183 - static void mtdoops_workfunc(struct work_struct *work) 184 - { 185 - struct mtdoops_context *cxt = 186 - container_of(work, struct mtdoops_context, work); 187 - 188 - mtdoops_prepare(cxt); 189 - } 190 - 191 - static int find_next_position(struct mtdoops_context *cxt) 173 + static void mtdoops_write(struct mtdoops_context *cxt, int panic) 192 174 { 193 175 struct mtd_info *mtd = cxt->mtd; 194 - int page, maxpos = 0; 176 + size_t retlen; 177 + int ret; 178 + 179 + if (cxt->writecount < OOPS_PAGE_SIZE) 180 + memset(cxt->oops_buf + cxt->writecount, 0xff, 181 + OOPS_PAGE_SIZE - cxt->writecount); 182 + 183 + if (panic) 184 + ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 185 + OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 186 + else 187 + ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 188 + OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 189 + 190 + cxt->writecount = 0; 191 + 192 + if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 193 + printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 194 + cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 195 + 196 + mtdoops_inc_counter(cxt); 197 + } 198 + 199 + 200 + static void mtdoops_workfunc_write(struct work_struct *work) 201 + { 202 + struct mtdoops_context *cxt = 203 + container_of(work, struct mtdoops_context, work_write); 204 + 205 + mtdoops_write(cxt, 0); 206 + } 207 + 208 + static void find_next_position(struct mtdoops_context *cxt) 209 + { 210 + struct mtd_info *mtd = cxt->mtd; 211 + int ret, page, maxpos = 0; 195 212 u32 count, maxcount = 0xffffffff; 196 213 size_t retlen; 197 214 198 215 for (page = 0; page < cxt->oops_pages; page++) { 199 - mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 216 + ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); 217 + if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { 218 + printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" 219 + ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); 220 + continue; 221 + } 222 + 200 223 if (count == 0xffffffff) 201 224 continue; 202 225 if (maxcount == 0xffffffff) { ··· 258 205 cxt->ready = 1; 259 206 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", 260 207 cxt->nextpage, cxt->nextcount); 261 - return 0; 208 + return; 262 209 } 263 210 264 211 cxt->nextpage = maxpos; 265 212 cxt->nextcount = maxcount; 266 213 267 - return mtdoops_inc_counter(cxt); 214 + mtdoops_inc_counter(cxt); 268 215 } 269 216 270 217 271 218 static void mtdoops_notify_add(struct mtd_info *mtd) 272 219 { 273 220 struct mtdoops_context *cxt = &oops_cxt; 274 - int ret; 275 221 276 222 if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) 277 223 return; ··· 281 229 return; 282 230 } 283 231 232 + if (mtd->erasesize < OOPS_PAGE_SIZE) { 233 + printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", 234 + mtd->index); 235 + return; 236 + } 237 + 284 238 cxt->mtd = mtd; 285 239 cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; 286 240 287 - ret = find_next_position(cxt); 288 - if (ret == 1) 289 - mtdoops_prepare(cxt); 241 + find_next_position(cxt); 290 242 291 - printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); 243 + printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); 292 244 } 293 245 294 246 static void mtdoops_notify_remove(struct mtd_info *mtd) ··· 310 254 { 311 255 struct mtdoops_context *cxt = &oops_cxt; 312 256 struct mtd_info *mtd = cxt->mtd; 313 - size_t retlen; 314 - int ret; 257 + unsigned long flags; 315 258 316 - if (!cxt->ready || !mtd) 259 + if (!cxt->ready || !mtd || cxt->writecount == 0) 317 260 return; 318 261 319 - if (cxt->writecount == 0) 262 + /* 263 + * Once ready is 0 and we've held the lock no further writes to the 264 + * buffer will happen 265 + */ 266 + spin_lock_irqsave(&cxt->writecount_lock, flags); 267 + if (!cxt->ready) { 268 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 320 269 return; 321 - 322 - if (cxt->writecount < OOPS_PAGE_SIZE) 323 - memset(cxt->oops_buf + cxt->writecount, 0xff, 324 - OOPS_PAGE_SIZE - cxt->writecount); 325 - 326 - ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 327 - OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); 270 + } 328 271 cxt->ready = 0; 329 - cxt->writecount = 0; 272 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 330 273 331 - if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) 332 - printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", 333 - cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); 334 - 335 - ret = mtdoops_inc_counter(cxt); 336 - if (ret == 1) 337 - schedule_work(&cxt->work); 274 + if (mtd->panic_write && in_interrupt()) 275 + /* Interrupt context, we're going to panic so try and log */ 276 + mtdoops_write(cxt, 1); 277 + else 278 + schedule_work(&cxt->work_write); 338 279 } 339 280 340 281 static void ··· 339 286 { 340 287 struct mtdoops_context *cxt = co->data; 341 288 struct mtd_info *mtd = cxt->mtd; 342 - int i; 289 + unsigned long flags; 343 290 344 291 if (!oops_in_progress) { 345 292 mtdoops_console_sync(); ··· 347 294 } 348 295 349 296 if (!cxt->ready || !mtd) 297 + return; 298 + 299 + /* Locking on writecount ensures sequential writes to the buffer */ 300 + spin_lock_irqsave(&cxt->writecount_lock, flags); 301 + 302 + /* Check ready status didn't change whilst waiting for the lock */ 303 + if (!cxt->ready) 350 304 return; 351 305 352 306 if (cxt->writecount == 0) { ··· 365 305 if ((count + cxt->writecount) > OOPS_PAGE_SIZE) 366 306 count = OOPS_PAGE_SIZE - cxt->writecount; 367 307 368 - for (i = 0; i < count; i++, s++) 369 - *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s; 308 + memcpy(cxt->oops_buf + cxt->writecount, s, count); 309 + cxt->writecount += count; 370 310 371 - cxt->writecount = cxt->writecount + count; 311 + spin_unlock_irqrestore(&cxt->writecount_lock, flags); 312 + 313 + if (cxt->writecount == OOPS_PAGE_SIZE) 314 + mtdoops_console_sync(); 372 315 } 373 316 374 317 static int __init mtdoops_console_setup(struct console *co, char *options) ··· 397 334 .write = mtdoops_console_write, 398 335 .setup = mtdoops_console_setup, 399 336 .unblank = mtdoops_console_sync, 400 - .flags = CON_PRINTBUFFER, 401 337 .index = -1, 402 338 .data = &oops_cxt, 403 339 }; ··· 409 347 cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); 410 348 411 349 if (!cxt->oops_buf) { 412 - printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); 350 + printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); 413 351 return -ENOMEM; 414 352 } 415 353 416 - INIT_WORK(&cxt->work, mtdoops_workfunc); 354 + INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); 355 + INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); 417 356 418 357 register_console(&mtdoops_console); 419 358 register_mtd_user(&mtdoops_notifier);
+17
drivers/mtd/mtdpart.c
··· 151 151 len, retlen, buf); 152 152 } 153 153 154 + static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len, 155 + size_t *retlen, const u_char *buf) 156 + { 157 + struct mtd_part *part = PART(mtd); 158 + if (!(mtd->flags & MTD_WRITEABLE)) 159 + return -EROFS; 160 + if (to >= mtd->size) 161 + len = 0; 162 + else if (to + len > mtd->size) 163 + len = mtd->size - to; 164 + return part->master->panic_write (part->master, to + part->offset, 165 + len, retlen, buf); 166 + } 167 + 154 168 static int part_write_oob(struct mtd_info *mtd, loff_t to, 155 169 struct mtd_oob_ops *ops) 156 170 { ··· 365 351 366 352 slave->mtd.read = part_read; 367 353 slave->mtd.write = part_write; 354 + 355 + if (master->panic_write) 356 + slave->mtd.panic_write = part_panic_write; 368 357 369 358 if(master->point && master->unpoint){ 370 359 slave->mtd.point = part_point;
+25 -1
drivers/mtd/nand/Kconfig
··· 93 93 94 94 config MTD_NAND_BF5XX 95 95 tristate "Blackfin on-chip NAND Flash Controller driver" 96 - depends on BF54x && MTD_NAND 96 + depends on (BF54x || BF52x) && MTD_NAND 97 97 help 98 98 This enables the Blackfin on-chip NAND flash controller 99 99 ··· 283 283 tristate "Support for NAND Flash on CM-X270 modules" 284 284 depends on MTD_NAND && MACH_ARMCORE 285 285 286 + config MTD_NAND_PASEMI 287 + tristate "NAND support for PA Semi PWRficient" 288 + depends on MTD_NAND && PPC_PASEMI 289 + help 290 + Enables support for NAND Flash interface on PA Semi PWRficient 291 + based boards 286 292 287 293 config MTD_NAND_NANDSIM 288 294 tristate "Support for NAND Flash Simulator" ··· 311 305 help 312 306 These two (and possibly other) Alauda-based cardreaders for 313 307 SmartMedia and xD allow raw flash access. 308 + 309 + config MTD_NAND_ORION 310 + tristate "NAND Flash support for Marvell Orion SoC" 311 + depends on ARCH_ORION && MTD_NAND 312 + help 313 + This enables the NAND flash controller on Orion machines. 314 + 315 + No board specific support is done by this driver, each board 316 + must advertise a platform_device for the driver to attach. 317 + 318 + config MTD_NAND_FSL_ELBC 319 + tristate "NAND support for Freescale eLBC controllers" 320 + depends on MTD_NAND && PPC_OF 321 + help 322 + Various Freescale chips, including the 8313, include a NAND Flash 323 + Controller Module with built-in hardware ECC capabilities. 324 + Enabling this option will enable you to use this to control 325 + external NAND devices. 314 326 315 327 endif # MTD_NAND
+3
drivers/mtd/nand/Makefile
··· 29 29 obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 30 30 obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 31 31 obj-$(CONFIG_MTD_ALAUDA) += alauda.o 32 + obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 33 + obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o 34 + obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o 32 35 33 36 nand-objs := nand_base.o nand_bbt.o
+6 -6
drivers/mtd/nand/at91_nand.c
··· 156 156 } 157 157 158 158 #ifdef CONFIG_MTD_PARTITIONS 159 - if (host->board->partition_info) 160 - partitions = host->board->partition_info(mtd->size, &num_partitions); 161 159 #ifdef CONFIG_MTD_CMDLINE_PARTS 162 - else { 163 - mtd->name = "at91_nand"; 164 - num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 165 - } 160 + mtd->name = "at91_nand"; 161 + num_partitions = parse_mtd_partitions(mtd, part_probes, 162 + &partitions, 0); 166 163 #endif 164 + if (num_partitions <= 0 && host->board->partition_info) 165 + partitions = host->board->partition_info(mtd->size, 166 + &num_partitions); 167 167 168 168 if ((!partitions) || (num_partitions == 0)) { 169 169 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
+28 -11
drivers/mtd/nand/bf5xx_nand.c
··· 74 74 static int hardware_ecc; 75 75 #endif 76 76 77 - static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0}; 77 + static unsigned short bfin_nfc_pin_req[] = 78 + {P_NAND_CE, 79 + P_NAND_RB, 80 + P_NAND_D0, 81 + P_NAND_D1, 82 + P_NAND_D2, 83 + P_NAND_D3, 84 + P_NAND_D4, 85 + P_NAND_D5, 86 + P_NAND_D6, 87 + P_NAND_D7, 88 + P_NAND_WE, 89 + P_NAND_RE, 90 + P_NAND_CLE, 91 + P_NAND_ALE, 92 + 0}; 78 93 79 94 /* 80 95 * Data structures for bf5xx nand flash controller driver ··· 293 278 u16 ecc0, ecc1; 294 279 u32 code[2]; 295 280 u8 *p; 296 - int bytes = 3, i; 297 281 298 282 /* first 4 bytes ECC code for 256 page size */ 299 283 ecc0 = bfin_read_NFC_ECC0(); ··· 302 288 303 289 dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]); 304 290 291 + /* first 3 bytes in ecc_code for 256 page size */ 292 + p = (u8 *) code; 293 + memcpy(ecc_code, p, 3); 294 + 305 295 /* second 4 bytes ECC code for 512 page size */ 306 296 if (page_size == 512) { 307 297 ecc0 = bfin_read_NFC_ECC2(); 308 298 ecc1 = bfin_read_NFC_ECC3(); 309 299 code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11); 310 - bytes = 6; 300 + 301 + /* second 3 bytes in ecc_code for second 256 302 + * bytes of 512 page size 303 + */ 304 + p = (u8 *) (code + 1); 305 + memcpy((ecc_code + 3), p, 3); 311 306 dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]); 312 307 } 313 - 314 - p = (u8 *)code; 315 - for (i = 0; i < bytes; i++) 316 - ecc_code[i] = p[i]; 317 308 318 309 return 0; 319 310 } ··· 526 507 527 508 init_completion(&info->dma_completion); 528 509 510 + #ifdef CONFIG_BF54x 529 511 /* Setup DMAC1 channel mux for NFC which shared with SDH */ 530 512 val = bfin_read_DMAC1_PERIMUX(); 531 513 val &= 0xFFFE; 532 514 bfin_write_DMAC1_PERIMUX(val); 533 515 SSYNC(); 534 - 516 + #endif 535 517 /* Request NFC DMA channel */ 536 518 ret = request_dma(CH_NFC, "BF5XX NFC driver"); 537 519 if (ret < 0) { ··· 763 743 static int bf5xx_nand_resume(struct platform_device *dev) 764 744 { 765 745 struct bf5xx_nand_info *info = platform_get_drvdata(dev); 766 - 767 - if (info) 768 - bf5xx_nand_hw_init(info); 769 746 770 747 return 0; 771 748 }
+19
drivers/mtd/nand/cafe_nand.c
··· 11 11 #undef DEBUG 12 12 #include <linux/mtd/mtd.h> 13 13 #include <linux/mtd/nand.h> 14 + #include <linux/mtd/partitions.h> 14 15 #include <linux/rslib.h> 15 16 #include <linux/pci.h> 16 17 #include <linux/delay.h> ··· 53 52 54 53 struct cafe_priv { 55 54 struct nand_chip nand; 55 + struct mtd_partition *parts; 56 56 struct pci_dev *pdev; 57 57 void __iomem *mmio; 58 58 struct rs_control *rs; ··· 85 83 static unsigned int numtimings; 86 84 static int timing[3]; 87 85 module_param_array(timing, int, &numtimings, 0644); 86 + 87 + #ifdef CONFIG_MTD_PARTITIONS 88 + static const char *part_probes[] = { "RedBoot", NULL }; 89 + #endif 88 90 89 91 /* Hrm. Why isn't this already conditional on something in the struct device? */ 90 92 #define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0) ··· 626 620 { 627 621 struct mtd_info *mtd; 628 622 struct cafe_priv *cafe; 623 + struct mtd_partition *parts; 629 624 uint32_t ctrl; 625 + int nr_parts; 630 626 int err = 0; 631 627 632 628 /* Very old versions shared the same PCI ident for all three ··· 795 787 goto out_irq; 796 788 797 789 pci_set_drvdata(pdev, mtd); 790 + 791 + /* We register the whole device first, separate from the partitions */ 798 792 add_mtd_device(mtd); 793 + 794 + #ifdef CONFIG_MTD_PARTITIONS 795 + nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); 796 + if (nr_parts > 0) { 797 + cafe->parts = parts; 798 + dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts); 799 + add_mtd_partitions(mtd, parts, nr_parts); 800 + } 801 + #endif 799 802 goto out; 800 803 801 804 out_irq:
+1244
drivers/mtd/nand/fsl_elbc_nand.c
··· 1 + /* Freescale Enhanced Local Bus Controller NAND driver 2 + * 3 + * Copyright (c) 2006-2007 Freescale Semiconductor 4 + * 5 + * Authors: Nick Spence <nick.spence@freescale.com>, 6 + * Scott Wood <scottwood@freescale.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the Free Software 20 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 + */ 22 + 23 + #include <linux/module.h> 24 + #include <linux/types.h> 25 + #include <linux/init.h> 26 + #include <linux/kernel.h> 27 + #include <linux/string.h> 28 + #include <linux/ioport.h> 29 + #include <linux/of_platform.h> 30 + #include <linux/slab.h> 31 + #include <linux/interrupt.h> 32 + 33 + #include <linux/mtd/mtd.h> 34 + #include <linux/mtd/nand.h> 35 + #include <linux/mtd/nand_ecc.h> 36 + #include <linux/mtd/partitions.h> 37 + 38 + #include <asm/io.h> 39 + 40 + 41 + #define MAX_BANKS 8 42 + #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ 43 + #define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */ 44 + 45 + struct elbc_bank { 46 + __be32 br; /**< Base Register */ 47 + #define BR_BA 0xFFFF8000 48 + #define BR_BA_SHIFT 15 49 + #define BR_PS 0x00001800 50 + #define BR_PS_SHIFT 11 51 + #define BR_PS_8 0x00000800 /* Port Size 8 bit */ 52 + #define BR_PS_16 0x00001000 /* Port Size 16 bit */ 53 + #define BR_PS_32 0x00001800 /* Port Size 32 bit */ 54 + #define BR_DECC 0x00000600 55 + #define BR_DECC_SHIFT 9 56 + #define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */ 57 + #define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */ 58 + #define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */ 59 + #define BR_WP 0x00000100 60 + #define BR_WP_SHIFT 8 61 + #define BR_MSEL 0x000000E0 62 + #define BR_MSEL_SHIFT 5 63 + #define BR_MS_GPCM 0x00000000 /* GPCM */ 64 + #define BR_MS_FCM 0x00000020 /* FCM */ 65 + #define BR_MS_SDRAM 0x00000060 /* SDRAM */ 66 + #define BR_MS_UPMA 0x00000080 /* UPMA */ 67 + #define BR_MS_UPMB 0x000000A0 /* UPMB */ 68 + #define BR_MS_UPMC 0x000000C0 /* UPMC */ 69 + #define BR_V 0x00000001 70 + #define BR_V_SHIFT 0 71 + #define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V) 72 + 73 + __be32 or; /**< Base Register */ 74 + #define OR0 0x5004 75 + #define OR1 0x500C 76 + #define OR2 0x5014 77 + #define OR3 0x501C 78 + #define OR4 0x5024 79 + #define OR5 0x502C 80 + #define OR6 0x5034 81 + #define OR7 0x503C 82 + 83 + #define OR_FCM_AM 0xFFFF8000 84 + #define OR_FCM_AM_SHIFT 15 85 + #define OR_FCM_BCTLD 0x00001000 86 + #define OR_FCM_BCTLD_SHIFT 12 87 + #define OR_FCM_PGS 0x00000400 88 + #define OR_FCM_PGS_SHIFT 10 89 + #define OR_FCM_CSCT 0x00000200 90 + #define OR_FCM_CSCT_SHIFT 9 91 + #define OR_FCM_CST 0x00000100 92 + #define OR_FCM_CST_SHIFT 8 93 + #define OR_FCM_CHT 0x00000080 94 + #define OR_FCM_CHT_SHIFT 7 95 + #define OR_FCM_SCY 0x00000070 96 + #define OR_FCM_SCY_SHIFT 4 97 + #define OR_FCM_SCY_1 0x00000010 98 + #define OR_FCM_SCY_2 0x00000020 99 + #define OR_FCM_SCY_3 0x00000030 100 + #define OR_FCM_SCY_4 0x00000040 101 + #define OR_FCM_SCY_5 0x00000050 102 + #define OR_FCM_SCY_6 0x00000060 103 + #define OR_FCM_SCY_7 0x00000070 104 + #define OR_FCM_RST 0x00000008 105 + #define OR_FCM_RST_SHIFT 3 106 + #define OR_FCM_TRLX 0x00000004 107 + #define OR_FCM_TRLX_SHIFT 2 108 + #define OR_FCM_EHTR 0x00000002 109 + #define OR_FCM_EHTR_SHIFT 1 110 + }; 111 + 112 + struct elbc_regs { 113 + struct elbc_bank bank[8]; 114 + u8 res0[0x28]; 115 + __be32 mar; /**< UPM Address Register */ 116 + u8 res1[0x4]; 117 + __be32 mamr; /**< UPMA Mode Register */ 118 + __be32 mbmr; /**< UPMB Mode Register */ 119 + __be32 mcmr; /**< UPMC Mode Register */ 120 + u8 res2[0x8]; 121 + __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */ 122 + __be32 mdr; /**< UPM Data Register */ 123 + u8 res3[0x4]; 124 + __be32 lsor; /**< Special Operation Initiation Register */ 125 + __be32 lsdmr; /**< SDRAM Mode Register */ 126 + u8 res4[0x8]; 127 + __be32 lurt; /**< UPM Refresh Timer */ 128 + __be32 lsrt; /**< SDRAM Refresh Timer */ 129 + u8 res5[0x8]; 130 + __be32 ltesr; /**< Transfer Error Status Register */ 131 + #define LTESR_BM 0x80000000 132 + #define LTESR_FCT 0x40000000 133 + #define LTESR_PAR 0x20000000 134 + #define LTESR_WP 0x04000000 135 + #define LTESR_ATMW 0x00800000 136 + #define LTESR_ATMR 0x00400000 137 + #define LTESR_CS 0x00080000 138 + #define LTESR_CC 0x00000001 139 + #define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) 140 + __be32 ltedr; /**< Transfer Error Disable Register */ 141 + __be32 lteir; /**< Transfer Error Interrupt Register */ 142 + __be32 lteatr; /**< Transfer Error Attributes Register */ 143 + __be32 ltear; /**< Transfer Error Address Register */ 144 + u8 res6[0xC]; 145 + __be32 lbcr; /**< Configuration Register */ 146 + #define LBCR_LDIS 0x80000000 147 + #define LBCR_LDIS_SHIFT 31 148 + #define LBCR_BCTLC 0x00C00000 149 + #define LBCR_BCTLC_SHIFT 22 150 + #define LBCR_AHD 0x00200000 151 + #define LBCR_LPBSE 0x00020000 152 + #define LBCR_LPBSE_SHIFT 17 153 + #define LBCR_EPAR 0x00010000 154 + #define LBCR_EPAR_SHIFT 16 155 + #define LBCR_BMT 0x0000FF00 156 + #define LBCR_BMT_SHIFT 8 157 + #define LBCR_INIT 0x00040000 158 + __be32 lcrr; /**< Clock Ratio Register */ 159 + #define LCRR_DBYP 0x80000000 160 + #define LCRR_DBYP_SHIFT 31 161 + #define LCRR_BUFCMDC 0x30000000 162 + #define LCRR_BUFCMDC_SHIFT 28 163 + #define LCRR_ECL 0x03000000 164 + #define LCRR_ECL_SHIFT 24 165 + #define LCRR_EADC 0x00030000 166 + #define LCRR_EADC_SHIFT 16 167 + #define LCRR_CLKDIV 0x0000000F 168 + #define LCRR_CLKDIV_SHIFT 0 169 + u8 res7[0x8]; 170 + __be32 fmr; /**< Flash Mode Register */ 171 + #define FMR_CWTO 0x0000F000 172 + #define FMR_CWTO_SHIFT 12 173 + #define FMR_BOOT 0x00000800 174 + #define FMR_ECCM 0x00000100 175 + #define FMR_AL 0x00000030 176 + #define FMR_AL_SHIFT 4 177 + #define FMR_OP 0x00000003 178 + #define FMR_OP_SHIFT 0 179 + __be32 fir; /**< Flash Instruction Register */ 180 + #define FIR_OP0 0xF0000000 181 + #define FIR_OP0_SHIFT 28 182 + #define FIR_OP1 0x0F000000 183 + #define FIR_OP1_SHIFT 24 184 + #define FIR_OP2 0x00F00000 185 + #define FIR_OP2_SHIFT 20 186 + #define FIR_OP3 0x000F0000 187 + #define FIR_OP3_SHIFT 16 188 + #define FIR_OP4 0x0000F000 189 + #define FIR_OP4_SHIFT 12 190 + #define FIR_OP5 0x00000F00 191 + #define FIR_OP5_SHIFT 8 192 + #define FIR_OP6 0x000000F0 193 + #define FIR_OP6_SHIFT 4 194 + #define FIR_OP7 0x0000000F 195 + #define FIR_OP7_SHIFT 0 196 + #define FIR_OP_NOP 0x0 /* No operation and end of sequence */ 197 + #define FIR_OP_CA 0x1 /* Issue current column address */ 198 + #define FIR_OP_PA 0x2 /* Issue current block+page address */ 199 + #define FIR_OP_UA 0x3 /* Issue user defined address */ 200 + #define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */ 201 + #define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */ 202 + #define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */ 203 + #define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */ 204 + #define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */ 205 + #define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */ 206 + #define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */ 207 + #define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */ 208 + #define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */ 209 + #define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */ 210 + #define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */ 211 + #define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */ 212 + __be32 fcr; /**< Flash Command Register */ 213 + #define FCR_CMD0 0xFF000000 214 + #define FCR_CMD0_SHIFT 24 215 + #define FCR_CMD1 0x00FF0000 216 + #define FCR_CMD1_SHIFT 16 217 + #define FCR_CMD2 0x0000FF00 218 + #define FCR_CMD2_SHIFT 8 219 + #define FCR_CMD3 0x000000FF 220 + #define FCR_CMD3_SHIFT 0 221 + __be32 fbar; /**< Flash Block Address Register */ 222 + #define FBAR_BLK 0x00FFFFFF 223 + __be32 fpar; /**< Flash Page Address Register */ 224 + #define FPAR_SP_PI 0x00007C00 225 + #define FPAR_SP_PI_SHIFT 10 226 + #define FPAR_SP_MS 0x00000200 227 + #define FPAR_SP_CI 0x000001FF 228 + #define FPAR_SP_CI_SHIFT 0 229 + #define FPAR_LP_PI 0x0003F000 230 + #define FPAR_LP_PI_SHIFT 12 231 + #define FPAR_LP_MS 0x00000800 232 + #define FPAR_LP_CI 0x000007FF 233 + #define FPAR_LP_CI_SHIFT 0 234 + __be32 fbcr; /**< Flash Byte Count Register */ 235 + #define FBCR_BC 0x00000FFF 236 + u8 res11[0x8]; 237 + u8 res8[0xF00]; 238 + }; 239 + 240 + struct fsl_elbc_ctrl; 241 + 242 + /* mtd information per set */ 243 + 244 + struct fsl_elbc_mtd { 245 + struct mtd_info mtd; 246 + struct nand_chip chip; 247 + struct fsl_elbc_ctrl *ctrl; 248 + 249 + struct device *dev; 250 + int bank; /* Chip select bank number */ 251 + u8 __iomem *vbase; /* Chip select base virtual address */ 252 + int page_size; /* NAND page size (0=512, 1=2048) */ 253 + unsigned int fmr; /* FCM Flash Mode Register value */ 254 + }; 255 + 256 + /* overview of the fsl elbc controller */ 257 + 258 + struct fsl_elbc_ctrl { 259 + struct nand_hw_control controller; 260 + struct fsl_elbc_mtd *chips[MAX_BANKS]; 261 + 262 + /* device info */ 263 + struct device *dev; 264 + struct elbc_regs __iomem *regs; 265 + int irq; 266 + wait_queue_head_t irq_wait; 267 + unsigned int irq_status; /* status read from LTESR by irq handler */ 268 + u8 __iomem *addr; /* Address of assigned FCM buffer */ 269 + unsigned int page; /* Last page written to / read from */ 270 + unsigned int read_bytes; /* Number of bytes read during command */ 271 + unsigned int column; /* Saved column from SEQIN */ 272 + unsigned int index; /* Pointer to next byte to 'read' */ 273 + unsigned int status; /* status read from LTESR after last op */ 274 + unsigned int mdr; /* UPM/FCM Data Register value */ 275 + unsigned int use_mdr; /* Non zero if the MDR is to be set */ 276 + unsigned int oob; /* Non zero if operating on OOB data */ 277 + char *oob_poi; /* Place to write ECC after read back */ 278 + }; 279 + 280 + /* These map to the positions used by the FCM hardware ECC generator */ 281 + 282 + /* Small Page FLASH with FMR[ECCM] = 0 */ 283 + static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { 284 + .eccbytes = 3, 285 + .eccpos = {6, 7, 8}, 286 + .oobfree = { {0, 5}, {9, 7} }, 287 + .oobavail = 12, 288 + }; 289 + 290 + /* Small Page FLASH with FMR[ECCM] = 1 */ 291 + static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { 292 + .eccbytes = 3, 293 + .eccpos = {8, 9, 10}, 294 + .oobfree = { {0, 5}, {6, 2}, {11, 5} }, 295 + .oobavail = 12, 296 + }; 297 + 298 + /* Large Page FLASH with FMR[ECCM] = 0 */ 299 + static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { 300 + .eccbytes = 12, 301 + .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, 302 + .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, 303 + .oobavail = 48, 304 + }; 305 + 306 + /* Large Page FLASH with FMR[ECCM] = 1 */ 307 + static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { 308 + .eccbytes = 12, 309 + .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, 310 + .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, 311 + .oobavail = 48, 312 + }; 313 + 314 + /*=================================*/ 315 + 316 + /* 317 + * Set up the FCM hardware block and page address fields, and the fcm 318 + * structure addr field to point to the correct FCM buffer in memory 319 + */ 320 + static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) 321 + { 322 + struct nand_chip *chip = mtd->priv; 323 + struct fsl_elbc_mtd *priv = chip->priv; 324 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 325 + struct elbc_regs __iomem *lbc = ctrl->regs; 326 + int buf_num; 327 + 328 + ctrl->page = page_addr; 329 + 330 + out_be32(&lbc->fbar, 331 + page_addr >> (chip->phys_erase_shift - chip->page_shift)); 332 + 333 + if (priv->page_size) { 334 + out_be32(&lbc->fpar, 335 + ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | 336 + (oob ? FPAR_LP_MS : 0) | column); 337 + buf_num = (page_addr & 1) << 2; 338 + } else { 339 + out_be32(&lbc->fpar, 340 + ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | 341 + (oob ? FPAR_SP_MS : 0) | column); 342 + buf_num = page_addr & 7; 343 + } 344 + 345 + ctrl->addr = priv->vbase + buf_num * 1024; 346 + ctrl->index = column; 347 + 348 + /* for OOB data point to the second half of the buffer */ 349 + if (oob) 350 + ctrl->index += priv->page_size ? 2048 : 512; 351 + 352 + dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), " 353 + "index %x, pes %d ps %d\n", 354 + buf_num, ctrl->addr, priv->vbase, ctrl->index, 355 + chip->phys_erase_shift, chip->page_shift); 356 + } 357 + 358 + /* 359 + * execute FCM command and wait for it to complete 360 + */ 361 + static int fsl_elbc_run_command(struct mtd_info *mtd) 362 + { 363 + struct nand_chip *chip = mtd->priv; 364 + struct fsl_elbc_mtd *priv = chip->priv; 365 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 366 + struct elbc_regs __iomem *lbc = ctrl->regs; 367 + 368 + /* Setup the FMR[OP] to execute without write protection */ 369 + out_be32(&lbc->fmr, priv->fmr | 3); 370 + if (ctrl->use_mdr) 371 + out_be32(&lbc->mdr, ctrl->mdr); 372 + 373 + dev_vdbg(ctrl->dev, 374 + "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n", 375 + in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); 376 + dev_vdbg(ctrl->dev, 377 + "fsl_elbc_run_command: fbar=%08x fpar=%08x " 378 + "fbcr=%08x bank=%d\n", 379 + in_be32(&lbc->fbar), in_be32(&lbc->fpar), 380 + in_be32(&lbc->fbcr), priv->bank); 381 + 382 + /* execute special operation */ 383 + out_be32(&lbc->lsor, priv->bank); 384 + 385 + /* wait for FCM complete flag or timeout */ 386 + ctrl->irq_status = 0; 387 + wait_event_timeout(ctrl->irq_wait, ctrl->irq_status, 388 + FCM_TIMEOUT_MSECS * HZ/1000); 389 + ctrl->status = ctrl->irq_status; 390 + 391 + /* store mdr value in case it was needed */ 392 + if (ctrl->use_mdr) 393 + ctrl->mdr = in_be32(&lbc->mdr); 394 + 395 + ctrl->use_mdr = 0; 396 + 397 + dev_vdbg(ctrl->dev, 398 + "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", 399 + ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); 400 + 401 + /* returns 0 on success otherwise non-zero) */ 402 + return ctrl->status == LTESR_CC ? 0 : -EIO; 403 + } 404 + 405 + static void fsl_elbc_do_read(struct nand_chip *chip, int oob) 406 + { 407 + struct fsl_elbc_mtd *priv = chip->priv; 408 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 409 + struct elbc_regs __iomem *lbc = ctrl->regs; 410 + 411 + if (priv->page_size) { 412 + out_be32(&lbc->fir, 413 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 414 + (FIR_OP_CA << FIR_OP1_SHIFT) | 415 + (FIR_OP_PA << FIR_OP2_SHIFT) | 416 + (FIR_OP_CW1 << FIR_OP3_SHIFT) | 417 + (FIR_OP_RBW << FIR_OP4_SHIFT)); 418 + 419 + out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | 420 + (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); 421 + } else { 422 + out_be32(&lbc->fir, 423 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 424 + (FIR_OP_CA << FIR_OP1_SHIFT) | 425 + (FIR_OP_PA << FIR_OP2_SHIFT) | 426 + (FIR_OP_RBW << FIR_OP3_SHIFT)); 427 + 428 + if (oob) 429 + out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT); 430 + else 431 + out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); 432 + } 433 + } 434 + 435 + /* cmdfunc send commands to the FCM */ 436 + static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, 437 + int column, int page_addr) 438 + { 439 + struct nand_chip *chip = mtd->priv; 440 + struct fsl_elbc_mtd *priv = chip->priv; 441 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 442 + struct elbc_regs __iomem *lbc = ctrl->regs; 443 + 444 + ctrl->use_mdr = 0; 445 + 446 + /* clear the read buffer */ 447 + ctrl->read_bytes = 0; 448 + if (command != NAND_CMD_PAGEPROG) 449 + ctrl->index = 0; 450 + 451 + switch (command) { 452 + /* READ0 and READ1 read the entire buffer to use hardware ECC. */ 453 + case NAND_CMD_READ1: 454 + column += 256; 455 + 456 + /* fall-through */ 457 + case NAND_CMD_READ0: 458 + dev_dbg(ctrl->dev, 459 + "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:" 460 + " 0x%x, column: 0x%x.\n", page_addr, column); 461 + 462 + 463 + out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */ 464 + set_addr(mtd, 0, page_addr, 0); 465 + 466 + ctrl->read_bytes = mtd->writesize + mtd->oobsize; 467 + ctrl->index += column; 468 + 469 + fsl_elbc_do_read(chip, 0); 470 + fsl_elbc_run_command(mtd); 471 + return; 472 + 473 + /* READOOB reads only the OOB because no ECC is performed. */ 474 + case NAND_CMD_READOOB: 475 + dev_vdbg(ctrl->dev, 476 + "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:" 477 + " 0x%x, column: 0x%x.\n", page_addr, column); 478 + 479 + out_be32(&lbc->fbcr, mtd->oobsize - column); 480 + set_addr(mtd, column, page_addr, 1); 481 + 482 + ctrl->read_bytes = mtd->writesize + mtd->oobsize; 483 + 484 + fsl_elbc_do_read(chip, 1); 485 + fsl_elbc_run_command(mtd); 486 + return; 487 + 488 + /* READID must read all 5 possible bytes while CEB is active */ 489 + case NAND_CMD_READID: 490 + dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n"); 491 + 492 + out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | 493 + (FIR_OP_UA << FIR_OP1_SHIFT) | 494 + (FIR_OP_RBW << FIR_OP2_SHIFT)); 495 + out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT); 496 + /* 5 bytes for manuf, device and exts */ 497 + out_be32(&lbc->fbcr, 5); 498 + ctrl->read_bytes = 5; 499 + ctrl->use_mdr = 1; 500 + ctrl->mdr = 0; 501 + 502 + set_addr(mtd, 0, 0, 0); 503 + fsl_elbc_run_command(mtd); 504 + return; 505 + 506 + /* ERASE1 stores the block and page address */ 507 + case NAND_CMD_ERASE1: 508 + dev_vdbg(ctrl->dev, 509 + "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, " 510 + "page_addr: 0x%x.\n", page_addr); 511 + set_addr(mtd, 0, page_addr, 0); 512 + return; 513 + 514 + /* ERASE2 uses the block and page address from ERASE1 */ 515 + case NAND_CMD_ERASE2: 516 + dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); 517 + 518 + out_be32(&lbc->fir, 519 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 520 + (FIR_OP_PA << FIR_OP1_SHIFT) | 521 + (FIR_OP_CM1 << FIR_OP2_SHIFT)); 522 + 523 + out_be32(&lbc->fcr, 524 + (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | 525 + (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); 526 + 527 + out_be32(&lbc->fbcr, 0); 528 + ctrl->read_bytes = 0; 529 + 530 + fsl_elbc_run_command(mtd); 531 + return; 532 + 533 + /* SEQIN sets up the addr buffer and all registers except the length */ 534 + case NAND_CMD_SEQIN: { 535 + __be32 fcr; 536 + dev_vdbg(ctrl->dev, 537 + "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, " 538 + "page_addr: 0x%x, column: 0x%x.\n", 539 + page_addr, column); 540 + 541 + ctrl->column = column; 542 + ctrl->oob = 0; 543 + 544 + fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) | 545 + (NAND_CMD_SEQIN << FCR_CMD2_SHIFT); 546 + 547 + if (priv->page_size) { 548 + out_be32(&lbc->fir, 549 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 550 + (FIR_OP_CA << FIR_OP1_SHIFT) | 551 + (FIR_OP_PA << FIR_OP2_SHIFT) | 552 + (FIR_OP_WB << FIR_OP3_SHIFT) | 553 + (FIR_OP_CW1 << FIR_OP4_SHIFT)); 554 + 555 + fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 556 + } else { 557 + out_be32(&lbc->fir, 558 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 559 + (FIR_OP_CM2 << FIR_OP1_SHIFT) | 560 + (FIR_OP_CA << FIR_OP2_SHIFT) | 561 + (FIR_OP_PA << FIR_OP3_SHIFT) | 562 + (FIR_OP_WB << FIR_OP4_SHIFT) | 563 + (FIR_OP_CW1 << FIR_OP5_SHIFT)); 564 + 565 + if (column >= mtd->writesize) { 566 + /* OOB area --> READOOB */ 567 + column -= mtd->writesize; 568 + fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; 569 + ctrl->oob = 1; 570 + } else if (column < 256) { 571 + /* First 256 bytes --> READ0 */ 572 + fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; 573 + } else { 574 + /* Second 256 bytes --> READ1 */ 575 + fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT; 576 + } 577 + } 578 + 579 + out_be32(&lbc->fcr, fcr); 580 + set_addr(mtd, column, page_addr, ctrl->oob); 581 + return; 582 + } 583 + 584 + /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ 585 + case NAND_CMD_PAGEPROG: { 586 + int full_page; 587 + dev_vdbg(ctrl->dev, 588 + "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " 589 + "writing %d bytes.\n", ctrl->index); 590 + 591 + /* if the write did not start at 0 or is not a full page 592 + * then set the exact length, otherwise use a full page 593 + * write so the HW generates the ECC. 594 + */ 595 + if (ctrl->oob || ctrl->column != 0 || 596 + ctrl->index != mtd->writesize + mtd->oobsize) { 597 + out_be32(&lbc->fbcr, ctrl->index); 598 + full_page = 0; 599 + } else { 600 + out_be32(&lbc->fbcr, 0); 601 + full_page = 1; 602 + } 603 + 604 + fsl_elbc_run_command(mtd); 605 + 606 + /* Read back the page in order to fill in the ECC for the 607 + * caller. Is this really needed? 608 + */ 609 + if (full_page && ctrl->oob_poi) { 610 + out_be32(&lbc->fbcr, 3); 611 + set_addr(mtd, 6, page_addr, 1); 612 + 613 + ctrl->read_bytes = mtd->writesize + 9; 614 + 615 + fsl_elbc_do_read(chip, 1); 616 + fsl_elbc_run_command(mtd); 617 + 618 + memcpy_fromio(ctrl->oob_poi + 6, 619 + &ctrl->addr[ctrl->index], 3); 620 + ctrl->index += 3; 621 + } 622 + 623 + ctrl->oob_poi = NULL; 624 + return; 625 + } 626 + 627 + /* CMD_STATUS must read the status byte while CEB is active */ 628 + /* Note - it does not wait for the ready line */ 629 + case NAND_CMD_STATUS: 630 + out_be32(&lbc->fir, 631 + (FIR_OP_CM0 << FIR_OP0_SHIFT) | 632 + (FIR_OP_RBW << FIR_OP1_SHIFT)); 633 + out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); 634 + out_be32(&lbc->fbcr, 1); 635 + set_addr(mtd, 0, 0, 0); 636 + ctrl->read_bytes = 1; 637 + 638 + fsl_elbc_run_command(mtd); 639 + 640 + /* The chip always seems to report that it is 641 + * write-protected, even when it is not. 642 + */ 643 + setbits8(ctrl->addr, NAND_STATUS_WP); 644 + return; 645 + 646 + /* RESET without waiting for the ready line */ 647 + case NAND_CMD_RESET: 648 + dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n"); 649 + out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT); 650 + out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT); 651 + fsl_elbc_run_command(mtd); 652 + return; 653 + 654 + default: 655 + dev_err(ctrl->dev, 656 + "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n", 657 + command); 658 + } 659 + } 660 + 661 + static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip) 662 + { 663 + /* The hardware does not seem to support multiple 664 + * chips per bank. 665 + */ 666 + } 667 + 668 + /* 669 + * Write buf to the FCM Controller Data Buffer 670 + */ 671 + static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) 672 + { 673 + struct nand_chip *chip = mtd->priv; 674 + struct fsl_elbc_mtd *priv = chip->priv; 675 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 676 + unsigned int bufsize = mtd->writesize + mtd->oobsize; 677 + 678 + if (len < 0) { 679 + dev_err(ctrl->dev, "write_buf of %d bytes", len); 680 + ctrl->status = 0; 681 + return; 682 + } 683 + 684 + if ((unsigned int)len > bufsize - ctrl->index) { 685 + dev_err(ctrl->dev, 686 + "write_buf beyond end of buffer " 687 + "(%d requested, %u available)\n", 688 + len, bufsize - ctrl->index); 689 + len = bufsize - ctrl->index; 690 + } 691 + 692 + memcpy_toio(&ctrl->addr[ctrl->index], buf, len); 693 + ctrl->index += len; 694 + } 695 + 696 + /* 697 + * read a byte from either the FCM hardware buffer if it has any data left 698 + * otherwise issue a command to read a single byte. 699 + */ 700 + static u8 fsl_elbc_read_byte(struct mtd_info *mtd) 701 + { 702 + struct nand_chip *chip = mtd->priv; 703 + struct fsl_elbc_mtd *priv = chip->priv; 704 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 705 + 706 + /* If there are still bytes in the FCM, then use the next byte. */ 707 + if (ctrl->index < ctrl->read_bytes) 708 + return in_8(&ctrl->addr[ctrl->index++]); 709 + 710 + dev_err(ctrl->dev, "read_byte beyond end of buffer\n"); 711 + return ERR_BYTE; 712 + } 713 + 714 + /* 715 + * Read from the FCM Controller Data Buffer 716 + */ 717 + static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len) 718 + { 719 + struct nand_chip *chip = mtd->priv; 720 + struct fsl_elbc_mtd *priv = chip->priv; 721 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 722 + int avail; 723 + 724 + if (len < 0) 725 + return; 726 + 727 + avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index); 728 + memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail); 729 + ctrl->index += avail; 730 + 731 + if (len > avail) 732 + dev_err(ctrl->dev, 733 + "read_buf beyond end of buffer " 734 + "(%d requested, %d available)\n", 735 + len, avail); 736 + } 737 + 738 + /* 739 + * Verify buffer against the FCM Controller Data Buffer 740 + */ 741 + static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 742 + { 743 + struct nand_chip *chip = mtd->priv; 744 + struct fsl_elbc_mtd *priv = chip->priv; 745 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 746 + int i; 747 + 748 + if (len < 0) { 749 + dev_err(ctrl->dev, "write_buf of %d bytes", len); 750 + return -EINVAL; 751 + } 752 + 753 + if ((unsigned int)len > ctrl->read_bytes - ctrl->index) { 754 + dev_err(ctrl->dev, 755 + "verify_buf beyond end of buffer " 756 + "(%d requested, %u available)\n", 757 + len, ctrl->read_bytes - ctrl->index); 758 + 759 + ctrl->index = ctrl->read_bytes; 760 + return -EINVAL; 761 + } 762 + 763 + for (i = 0; i < len; i++) 764 + if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i]) 765 + break; 766 + 767 + ctrl->index += len; 768 + return i == len && ctrl->status == LTESR_CC ? 0 : -EIO; 769 + } 770 + 771 + /* This function is called after Program and Erase Operations to 772 + * check for success or failure. 773 + */ 774 + static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) 775 + { 776 + struct fsl_elbc_mtd *priv = chip->priv; 777 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 778 + struct elbc_regs __iomem *lbc = ctrl->regs; 779 + 780 + if (ctrl->status != LTESR_CC) 781 + return NAND_STATUS_FAIL; 782 + 783 + /* Use READ_STATUS command, but wait for the device to be ready */ 784 + ctrl->use_mdr = 0; 785 + out_be32(&lbc->fir, 786 + (FIR_OP_CW0 << FIR_OP0_SHIFT) | 787 + (FIR_OP_RBW << FIR_OP1_SHIFT)); 788 + out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); 789 + out_be32(&lbc->fbcr, 1); 790 + set_addr(mtd, 0, 0, 0); 791 + ctrl->read_bytes = 1; 792 + 793 + fsl_elbc_run_command(mtd); 794 + 795 + if (ctrl->status != LTESR_CC) 796 + return NAND_STATUS_FAIL; 797 + 798 + /* The chip always seems to report that it is 799 + * write-protected, even when it is not. 800 + */ 801 + setbits8(ctrl->addr, NAND_STATUS_WP); 802 + return fsl_elbc_read_byte(mtd); 803 + } 804 + 805 + static int fsl_elbc_chip_init_tail(struct mtd_info *mtd) 806 + { 807 + struct nand_chip *chip = mtd->priv; 808 + struct fsl_elbc_mtd *priv = chip->priv; 809 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 810 + struct elbc_regs __iomem *lbc = ctrl->regs; 811 + unsigned int al; 812 + 813 + /* calculate FMR Address Length field */ 814 + al = 0; 815 + if (chip->pagemask & 0xffff0000) 816 + al++; 817 + if (chip->pagemask & 0xff000000) 818 + al++; 819 + 820 + /* add to ECCM mode set in fsl_elbc_init */ 821 + priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */ 822 + (al << FMR_AL_SHIFT); 823 + 824 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n", 825 + chip->numchips); 826 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n", 827 + chip->chipsize); 828 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n", 829 + chip->pagemask); 830 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n", 831 + chip->chip_delay); 832 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n", 833 + chip->badblockpos); 834 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n", 835 + chip->chip_shift); 836 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n", 837 + chip->page_shift); 838 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n", 839 + chip->phys_erase_shift); 840 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n", 841 + chip->ecclayout); 842 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n", 843 + chip->ecc.mode); 844 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n", 845 + chip->ecc.steps); 846 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n", 847 + chip->ecc.bytes); 848 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n", 849 + chip->ecc.total); 850 + dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n", 851 + chip->ecc.layout); 852 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags); 853 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size); 854 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n", 855 + mtd->erasesize); 856 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n", 857 + mtd->writesize); 858 + dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n", 859 + mtd->oobsize); 860 + 861 + /* adjust Option Register and ECC to match Flash page size */ 862 + if (mtd->writesize == 512) { 863 + priv->page_size = 0; 864 + clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS); 865 + } else if (mtd->writesize == 2048) { 866 + priv->page_size = 1; 867 + setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS); 868 + /* adjust ecc setup if needed */ 869 + if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 870 + BR_DECC_CHK_GEN) { 871 + chip->ecc.size = 512; 872 + chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 873 + &fsl_elbc_oob_lp_eccm1 : 874 + &fsl_elbc_oob_lp_eccm0; 875 + mtd->ecclayout = chip->ecc.layout; 876 + mtd->oobavail = chip->ecc.layout->oobavail; 877 + } 878 + } else { 879 + dev_err(ctrl->dev, 880 + "fsl_elbc_init: page size %d is not supported\n", 881 + mtd->writesize); 882 + return -1; 883 + } 884 + 885 + /* The default u-boot configuration on MPC8313ERDB causes errors; 886 + * more delay is needed. This should be safe for other boards 887 + * as well. 888 + */ 889 + setbits32(&lbc->bank[priv->bank].or, 0x70); 890 + return 0; 891 + } 892 + 893 + static int fsl_elbc_read_page(struct mtd_info *mtd, 894 + struct nand_chip *chip, 895 + uint8_t *buf) 896 + { 897 + fsl_elbc_read_buf(mtd, buf, mtd->writesize); 898 + fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 899 + 900 + if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) 901 + mtd->ecc_stats.failed++; 902 + 903 + return 0; 904 + } 905 + 906 + /* ECC will be calculated automatically, and errors will be detected in 907 + * waitfunc. 908 + */ 909 + static void fsl_elbc_write_page(struct mtd_info *mtd, 910 + struct nand_chip *chip, 911 + const uint8_t *buf) 912 + { 913 + struct fsl_elbc_mtd *priv = chip->priv; 914 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 915 + 916 + fsl_elbc_write_buf(mtd, buf, mtd->writesize); 917 + fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 918 + 919 + ctrl->oob_poi = chip->oob_poi; 920 + } 921 + 922 + static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) 923 + { 924 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 925 + struct elbc_regs __iomem *lbc = ctrl->regs; 926 + struct nand_chip *chip = &priv->chip; 927 + 928 + dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank); 929 + 930 + /* Fill in fsl_elbc_mtd structure */ 931 + priv->mtd.priv = chip; 932 + priv->mtd.owner = THIS_MODULE; 933 + priv->fmr = 0; /* rest filled in later */ 934 + 935 + /* fill in nand_chip structure */ 936 + /* set up function call table */ 937 + chip->read_byte = fsl_elbc_read_byte; 938 + chip->write_buf = fsl_elbc_write_buf; 939 + chip->read_buf = fsl_elbc_read_buf; 940 + chip->verify_buf = fsl_elbc_verify_buf; 941 + chip->select_chip = fsl_elbc_select_chip; 942 + chip->cmdfunc = fsl_elbc_cmdfunc; 943 + chip->waitfunc = fsl_elbc_wait; 944 + 945 + /* set up nand options */ 946 + chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 947 + 948 + chip->controller = &ctrl->controller; 949 + chip->priv = priv; 950 + 951 + chip->ecc.read_page = fsl_elbc_read_page; 952 + chip->ecc.write_page = fsl_elbc_write_page; 953 + 954 + /* If CS Base Register selects full hardware ECC then use it */ 955 + if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) == 956 + BR_DECC_CHK_GEN) { 957 + chip->ecc.mode = NAND_ECC_HW; 958 + /* put in small page settings and adjust later if needed */ 959 + chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 960 + &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; 961 + chip->ecc.size = 512; 962 + chip->ecc.bytes = 3; 963 + } else { 964 + /* otherwise fall back to default software ECC */ 965 + chip->ecc.mode = NAND_ECC_SOFT; 966 + } 967 + 968 + return 0; 969 + } 970 + 971 + static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) 972 + { 973 + struct fsl_elbc_ctrl *ctrl = priv->ctrl; 974 + 975 + nand_release(&priv->mtd); 976 + 977 + if (priv->vbase) 978 + iounmap(priv->vbase); 979 + 980 + ctrl->chips[priv->bank] = NULL; 981 + kfree(priv); 982 + 983 + return 0; 984 + } 985 + 986 + static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, 987 + struct device_node *node) 988 + { 989 + struct elbc_regs __iomem *lbc = ctrl->regs; 990 + struct fsl_elbc_mtd *priv; 991 + struct resource res; 992 + #ifdef CONFIG_MTD_PARTITIONS 993 + static const char *part_probe_types[] 994 + = { "cmdlinepart", "RedBoot", NULL }; 995 + struct mtd_partition *parts; 996 + #endif 997 + int ret; 998 + int bank; 999 + 1000 + /* get, allocate and map the memory resource */ 1001 + ret = of_address_to_resource(node, 0, &res); 1002 + if (ret) { 1003 + dev_err(ctrl->dev, "failed to get resource\n"); 1004 + return ret; 1005 + } 1006 + 1007 + /* find which chip select it is connected to */ 1008 + for (bank = 0; bank < MAX_BANKS; bank++) 1009 + if ((in_be32(&lbc->bank[bank].br) & BR_V) && 1010 + (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM && 1011 + (in_be32(&lbc->bank[bank].br) & 1012 + in_be32(&lbc->bank[bank].or) & BR_BA) 1013 + == res.start) 1014 + break; 1015 + 1016 + if (bank >= MAX_BANKS) { 1017 + dev_err(ctrl->dev, "address did not match any chip selects\n"); 1018 + return -ENODEV; 1019 + } 1020 + 1021 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1022 + if (!priv) 1023 + return -ENOMEM; 1024 + 1025 + ctrl->chips[bank] = priv; 1026 + priv->bank = bank; 1027 + priv->ctrl = ctrl; 1028 + priv->dev = ctrl->dev; 1029 + 1030 + priv->vbase = ioremap(res.start, res.end - res.start + 1); 1031 + if (!priv->vbase) { 1032 + dev_err(ctrl->dev, "failed to map chip region\n"); 1033 + ret = -ENOMEM; 1034 + goto err; 1035 + } 1036 + 1037 + ret = fsl_elbc_chip_init(priv); 1038 + if (ret) 1039 + goto err; 1040 + 1041 + ret = nand_scan_ident(&priv->mtd, 1); 1042 + if (ret) 1043 + goto err; 1044 + 1045 + ret = fsl_elbc_chip_init_tail(&priv->mtd); 1046 + if (ret) 1047 + goto err; 1048 + 1049 + ret = nand_scan_tail(&priv->mtd); 1050 + if (ret) 1051 + goto err; 1052 + 1053 + #ifdef CONFIG_MTD_PARTITIONS 1054 + /* First look for RedBoot table or partitions on the command 1055 + * line, these take precedence over device tree information */ 1056 + ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); 1057 + if (ret < 0) 1058 + goto err; 1059 + 1060 + #ifdef CONFIG_MTD_OF_PARTS 1061 + if (ret == 0) { 1062 + ret = of_mtd_parse_partitions(priv->dev, &priv->mtd, 1063 + node, &parts); 1064 + if (ret < 0) 1065 + goto err; 1066 + } 1067 + #endif 1068 + 1069 + if (ret > 0) 1070 + add_mtd_partitions(&priv->mtd, parts, ret); 1071 + else 1072 + #endif 1073 + add_mtd_device(&priv->mtd); 1074 + 1075 + printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n", 1076 + res.start, priv->bank); 1077 + return 0; 1078 + 1079 + err: 1080 + fsl_elbc_chip_remove(priv); 1081 + return ret; 1082 + } 1083 + 1084 + static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl) 1085 + { 1086 + struct elbc_regs __iomem *lbc = ctrl->regs; 1087 + 1088 + /* clear event registers */ 1089 + setbits32(&lbc->ltesr, LTESR_NAND_MASK); 1090 + out_be32(&lbc->lteatr, 0); 1091 + 1092 + /* Enable interrupts for any detected events */ 1093 + out_be32(&lbc->lteir, LTESR_NAND_MASK); 1094 + 1095 + ctrl->read_bytes = 0; 1096 + ctrl->index = 0; 1097 + ctrl->addr = NULL; 1098 + 1099 + return 0; 1100 + } 1101 + 1102 + static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev) 1103 + { 1104 + struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev); 1105 + int i; 1106 + 1107 + for (i = 0; i < MAX_BANKS; i++) 1108 + if (ctrl->chips[i]) 1109 + fsl_elbc_chip_remove(ctrl->chips[i]); 1110 + 1111 + if (ctrl->irq) 1112 + free_irq(ctrl->irq, ctrl); 1113 + 1114 + if (ctrl->regs) 1115 + iounmap(ctrl->regs); 1116 + 1117 + dev_set_drvdata(&ofdev->dev, NULL); 1118 + kfree(ctrl); 1119 + return 0; 1120 + } 1121 + 1122 + /* NOTE: This interrupt is also used to report other localbus events, 1123 + * such as transaction errors on other chipselects. If we want to 1124 + * capture those, we'll need to move the IRQ code into a shared 1125 + * LBC driver. 1126 + */ 1127 + 1128 + static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data) 1129 + { 1130 + struct fsl_elbc_ctrl *ctrl = data; 1131 + struct elbc_regs __iomem *lbc = ctrl->regs; 1132 + __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK; 1133 + 1134 + if (status) { 1135 + out_be32(&lbc->ltesr, status); 1136 + out_be32(&lbc->lteatr, 0); 1137 + 1138 + ctrl->irq_status = status; 1139 + smp_wmb(); 1140 + wake_up(&ctrl->irq_wait); 1141 + 1142 + return IRQ_HANDLED; 1143 + } 1144 + 1145 + return IRQ_NONE; 1146 + } 1147 + 1148 + /* fsl_elbc_ctrl_probe 1149 + * 1150 + * called by device layer when it finds a device matching 1151 + * one our driver can handled. This code allocates all of 1152 + * the resources needed for the controller only. The 1153 + * resources for the NAND banks themselves are allocated 1154 + * in the chip probe function. 1155 + */ 1156 + 1157 + static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev, 1158 + const struct of_device_id *match) 1159 + { 1160 + struct device_node *child; 1161 + struct fsl_elbc_ctrl *ctrl; 1162 + int ret; 1163 + 1164 + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1165 + if (!ctrl) 1166 + return -ENOMEM; 1167 + 1168 + dev_set_drvdata(&ofdev->dev, ctrl); 1169 + 1170 + spin_lock_init(&ctrl->controller.lock); 1171 + init_waitqueue_head(&ctrl->controller.wq); 1172 + init_waitqueue_head(&ctrl->irq_wait); 1173 + 1174 + ctrl->regs = of_iomap(ofdev->node, 0); 1175 + if (!ctrl->regs) { 1176 + dev_err(&ofdev->dev, "failed to get memory region\n"); 1177 + ret = -ENODEV; 1178 + goto err; 1179 + } 1180 + 1181 + ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL); 1182 + if (ctrl->irq == NO_IRQ) { 1183 + dev_err(&ofdev->dev, "failed to get irq resource\n"); 1184 + ret = -ENODEV; 1185 + goto err; 1186 + } 1187 + 1188 + ctrl->dev = &ofdev->dev; 1189 + 1190 + ret = fsl_elbc_ctrl_init(ctrl); 1191 + if (ret < 0) 1192 + goto err; 1193 + 1194 + ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl); 1195 + if (ret != 0) { 1196 + dev_err(&ofdev->dev, "failed to install irq (%d)\n", 1197 + ctrl->irq); 1198 + ret = ctrl->irq; 1199 + goto err; 1200 + } 1201 + 1202 + for_each_child_of_node(ofdev->node, child) 1203 + if (of_device_is_compatible(child, "fsl,elbc-fcm-nand")) 1204 + fsl_elbc_chip_probe(ctrl, child); 1205 + 1206 + return 0; 1207 + 1208 + err: 1209 + fsl_elbc_ctrl_remove(ofdev); 1210 + return ret; 1211 + } 1212 + 1213 + static const struct of_device_id fsl_elbc_match[] = { 1214 + { 1215 + .compatible = "fsl,elbc", 1216 + }, 1217 + {} 1218 + }; 1219 + 1220 + static struct of_platform_driver fsl_elbc_ctrl_driver = { 1221 + .driver = { 1222 + .name = "fsl-elbc", 1223 + }, 1224 + .match_table = fsl_elbc_match, 1225 + .probe = fsl_elbc_ctrl_probe, 1226 + .remove = __devexit_p(fsl_elbc_ctrl_remove), 1227 + }; 1228 + 1229 + static int __init fsl_elbc_init(void) 1230 + { 1231 + return of_register_platform_driver(&fsl_elbc_ctrl_driver); 1232 + } 1233 + 1234 + static void __exit fsl_elbc_exit(void) 1235 + { 1236 + of_unregister_platform_driver(&fsl_elbc_ctrl_driver); 1237 + } 1238 + 1239 + module_init(fsl_elbc_init); 1240 + module_exit(fsl_elbc_exit); 1241 + 1242 + MODULE_LICENSE("GPL"); 1243 + MODULE_AUTHOR("Freescale"); 1244 + MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
+6 -2
drivers/mtd/nand/nand_base.c
··· 2469 2469 chip->ecc.write_oob = nand_write_oob_std; 2470 2470 2471 2471 case NAND_ECC_HW_SYNDROME: 2472 - if (!chip->ecc.calculate || !chip->ecc.correct || 2473 - !chip->ecc.hwctl) { 2472 + if ((!chip->ecc.calculate || !chip->ecc.correct || 2473 + !chip->ecc.hwctl) && 2474 + (!chip->ecc.read_page || 2475 + chip->ecc.read_page == nand_read_page_hwecc || 2476 + !chip->ecc.write_page || 2477 + chip->ecc.write_page == nand_write_page_hwecc)) { 2474 2478 printk(KERN_WARNING "No ECC functions supplied, " 2475 2479 "Hardware ECC not possible\n"); 2476 2480 BUG();
+171
drivers/mtd/nand/orion_nand.c
··· 1 + /* 2 + * drivers/mtd/nand/orion_nand.c 3 + * 4 + * NAND support for Marvell Orion SoC platforms 5 + * 6 + * Tzachi Perelstein <tzachi@marvell.com> 7 + * 8 + * This file is licensed under the terms of the GNU General Public 9 + * License version 2. This program is licensed "as is" without any 10 + * warranty of any kind, whether express or implied. 11 + */ 12 + 13 + #include <linux/slab.h> 14 + #include <linux/module.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/mtd/mtd.h> 17 + #include <linux/mtd/nand.h> 18 + #include <linux/mtd/partitions.h> 19 + #include <asm/io.h> 20 + #include <asm/sizes.h> 21 + #include <asm/arch/platform.h> 22 + #include <asm/arch/hardware.h> 23 + 24 + #ifdef CONFIG_MTD_CMDLINE_PARTS 25 + static const char *part_probes[] = { "cmdlinepart", NULL }; 26 + #endif 27 + 28 + static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 29 + { 30 + struct nand_chip *nc = mtd->priv; 31 + struct orion_nand_data *board = nc->priv; 32 + u32 offs; 33 + 34 + if (cmd == NAND_CMD_NONE) 35 + return; 36 + 37 + if (ctrl & NAND_CLE) 38 + offs = (1 << board->cle); 39 + else if (ctrl & NAND_ALE) 40 + offs = (1 << board->ale); 41 + else 42 + return; 43 + 44 + if (nc->options & NAND_BUSWIDTH_16) 45 + offs <<= 1; 46 + 47 + writeb(cmd, nc->IO_ADDR_W + offs); 48 + } 49 + 50 + static int __init orion_nand_probe(struct platform_device *pdev) 51 + { 52 + struct mtd_info *mtd; 53 + struct nand_chip *nc; 54 + struct orion_nand_data *board; 55 + void __iomem *io_base; 56 + int ret = 0; 57 + #ifdef CONFIG_MTD_PARTITIONS 58 + struct mtd_partition *partitions = NULL; 59 + int num_part = 0; 60 + #endif 61 + 62 + nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); 63 + if (!nc) { 64 + printk(KERN_ERR "orion_nand: failed to allocate device structure.\n"); 65 + ret = -ENOMEM; 66 + goto no_res; 67 + } 68 + mtd = (struct mtd_info *)(nc + 1); 69 + 70 + io_base = ioremap(pdev->resource[0].start, 71 + pdev->resource[0].end - pdev->resource[0].start + 1); 72 + if (!io_base) { 73 + printk(KERN_ERR "orion_nand: ioremap failed\n"); 74 + ret = -EIO; 75 + goto no_res; 76 + } 77 + 78 + board = pdev->dev.platform_data; 79 + 80 + mtd->priv = nc; 81 + mtd->owner = THIS_MODULE; 82 + 83 + nc->priv = board; 84 + nc->IO_ADDR_R = nc->IO_ADDR_W = io_base; 85 + nc->cmd_ctrl = orion_nand_cmd_ctrl; 86 + nc->ecc.mode = NAND_ECC_SOFT; 87 + 88 + if (board->width == 16) 89 + nc->options |= NAND_BUSWIDTH_16; 90 + 91 + platform_set_drvdata(pdev, mtd); 92 + 93 + if (nand_scan(mtd, 1)) { 94 + ret = -ENXIO; 95 + goto no_dev; 96 + } 97 + 98 + #ifdef CONFIG_MTD_PARTITIONS 99 + #ifdef CONFIG_MTD_CMDLINE_PARTS 100 + mtd->name = "orion_nand"; 101 + num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); 102 + #endif 103 + /* If cmdline partitions have been passed, let them be used */ 104 + if (num_part <= 0) { 105 + num_part = board->nr_parts; 106 + partitions = board->parts; 107 + } 108 + 109 + if (partitions && num_part > 0) 110 + ret = add_mtd_partitions(mtd, partitions, num_part); 111 + else 112 + ret = add_mtd_device(mtd); 113 + #else 114 + ret = add_mtd_device(mtd); 115 + #endif 116 + 117 + if (ret) { 118 + nand_release(mtd); 119 + goto no_dev; 120 + } 121 + 122 + return 0; 123 + 124 + no_dev: 125 + platform_set_drvdata(pdev, NULL); 126 + iounmap(io_base); 127 + no_res: 128 + kfree(nc); 129 + 130 + return ret; 131 + } 132 + 133 + static int __devexit orion_nand_remove(struct platform_device *pdev) 134 + { 135 + struct mtd_info *mtd = platform_get_drvdata(pdev); 136 + struct nand_chip *nc = mtd->priv; 137 + 138 + nand_release(mtd); 139 + 140 + iounmap(nc->IO_ADDR_W); 141 + 142 + kfree(nc); 143 + 144 + return 0; 145 + } 146 + 147 + static struct platform_driver orion_nand_driver = { 148 + .probe = orion_nand_probe, 149 + .remove = orion_nand_remove, 150 + .driver = { 151 + .name = "orion_nand", 152 + .owner = THIS_MODULE, 153 + }, 154 + }; 155 + 156 + static int __init orion_nand_init(void) 157 + { 158 + return platform_driver_register(&orion_nand_driver); 159 + } 160 + 161 + static void __exit orion_nand_exit(void) 162 + { 163 + platform_driver_unregister(&orion_nand_driver); 164 + } 165 + 166 + module_init(orion_nand_init); 167 + module_exit(orion_nand_exit); 168 + 169 + MODULE_LICENSE("GPL"); 170 + MODULE_AUTHOR("Tzachi Perelstein"); 171 + MODULE_DESCRIPTION("NAND glue for Orion platforms");
+243
drivers/mtd/nand/pasemi_nand.c
··· 1 + /* 2 + * Copyright (C) 2006-2007 PA Semi, Inc 3 + * 4 + * Author: Egor Martovetsky <egor@pasemi.com> 5 + * Maintained by: Olof Johansson <olof@lixom.net> 6 + * 7 + * Driver for the PWRficient onchip NAND flash interface 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + * 18 + * You should have received a copy of the GNU General Public License 19 + * along with this program; if not, write to the Free Software 20 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 + */ 22 + 23 + #undef DEBUG 24 + 25 + #include <linux/slab.h> 26 + #include <linux/init.h> 27 + #include <linux/module.h> 28 + #include <linux/mtd/mtd.h> 29 + #include <linux/mtd/nand.h> 30 + #include <linux/mtd/nand_ecc.h> 31 + #include <linux/of_platform.h> 32 + #include <linux/platform_device.h> 33 + #include <linux/pci.h> 34 + 35 + #include <asm/io.h> 36 + 37 + #define LBICTRL_LPCCTL_NR 0x00004000 38 + #define CLE_PIN_CTL 15 39 + #define ALE_PIN_CTL 14 40 + 41 + static unsigned int lpcctl; 42 + static struct mtd_info *pasemi_nand_mtd; 43 + static const char driver_name[] = "pasemi-nand"; 44 + 45 + static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len) 46 + { 47 + struct nand_chip *chip = mtd->priv; 48 + 49 + while (len > 0x800) { 50 + memcpy_fromio(buf, chip->IO_ADDR_R, 0x800); 51 + buf += 0x800; 52 + len -= 0x800; 53 + } 54 + memcpy_fromio(buf, chip->IO_ADDR_R, len); 55 + } 56 + 57 + static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 58 + { 59 + struct nand_chip *chip = mtd->priv; 60 + 61 + while (len > 0x800) { 62 + memcpy_toio(chip->IO_ADDR_R, buf, 0x800); 63 + buf += 0x800; 64 + len -= 0x800; 65 + } 66 + memcpy_toio(chip->IO_ADDR_R, buf, len); 67 + } 68 + 69 + static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd, 70 + unsigned int ctrl) 71 + { 72 + struct nand_chip *chip = mtd->priv; 73 + 74 + if (cmd == NAND_CMD_NONE) 75 + return; 76 + 77 + if (ctrl & NAND_CLE) 78 + out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd); 79 + else 80 + out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd); 81 + 82 + /* Push out posted writes */ 83 + eieio(); 84 + inl(lpcctl); 85 + } 86 + 87 + int pasemi_device_ready(struct mtd_info *mtd) 88 + { 89 + return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); 90 + } 91 + 92 + static int __devinit pasemi_nand_probe(struct of_device *ofdev, 93 + const struct of_device_id *match) 94 + { 95 + struct pci_dev *pdev; 96 + struct device_node *np = ofdev->node; 97 + struct resource res; 98 + struct nand_chip *chip; 99 + int err = 0; 100 + 101 + err = of_address_to_resource(np, 0, &res); 102 + 103 + if (err) 104 + return -EINVAL; 105 + 106 + /* We only support one device at the moment */ 107 + if (pasemi_nand_mtd) 108 + return -ENODEV; 109 + 110 + pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end); 111 + 112 + /* Allocate memory for MTD device structure and private data */ 113 + pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + 114 + sizeof(struct nand_chip), GFP_KERNEL); 115 + if (!pasemi_nand_mtd) { 116 + printk(KERN_WARNING 117 + "Unable to allocate PASEMI NAND MTD device structure\n"); 118 + err = -ENOMEM; 119 + goto out; 120 + } 121 + 122 + /* Get pointer to private data */ 123 + chip = (struct nand_chip *)&pasemi_nand_mtd[1]; 124 + 125 + /* Link the private data with the MTD structure */ 126 + pasemi_nand_mtd->priv = chip; 127 + pasemi_nand_mtd->owner = THIS_MODULE; 128 + 129 + chip->IO_ADDR_R = of_iomap(np, 0); 130 + chip->IO_ADDR_W = chip->IO_ADDR_R; 131 + 132 + if (!chip->IO_ADDR_R) { 133 + err = -EIO; 134 + goto out_mtd; 135 + } 136 + 137 + pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL); 138 + if (!pdev) { 139 + err = -ENODEV; 140 + goto out_ior; 141 + } 142 + 143 + lpcctl = pci_resource_start(pdev, 0); 144 + 145 + if (!request_region(lpcctl, 4, driver_name)) { 146 + err = -EBUSY; 147 + goto out_ior; 148 + } 149 + 150 + chip->cmd_ctrl = pasemi_hwcontrol; 151 + chip->dev_ready = pasemi_device_ready; 152 + chip->read_buf = pasemi_read_buf; 153 + chip->write_buf = pasemi_write_buf; 154 + chip->chip_delay = 0; 155 + chip->ecc.mode = NAND_ECC_SOFT; 156 + 157 + /* Enable the following for a flash based bad block table */ 158 + chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; 159 + 160 + /* Scan to find existance of the device */ 161 + if (nand_scan(pasemi_nand_mtd, 1)) { 162 + err = -ENXIO; 163 + goto out_lpc; 164 + } 165 + 166 + if (add_mtd_device(pasemi_nand_mtd)) { 167 + printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); 168 + err = -ENODEV; 169 + goto out_lpc; 170 + } 171 + 172 + printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n", 173 + res.start, lpcctl); 174 + 175 + return 0; 176 + 177 + out_lpc: 178 + release_region(lpcctl, 4); 179 + out_ior: 180 + iounmap(chip->IO_ADDR_R); 181 + out_mtd: 182 + kfree(pasemi_nand_mtd); 183 + out: 184 + return err; 185 + } 186 + 187 + static int __devexit pasemi_nand_remove(struct of_device *ofdev) 188 + { 189 + struct nand_chip *chip; 190 + 191 + if (!pasemi_nand_mtd) 192 + return 0; 193 + 194 + chip = pasemi_nand_mtd->priv; 195 + 196 + /* Release resources, unregister device */ 197 + nand_release(pasemi_nand_mtd); 198 + 199 + release_region(lpcctl, 4); 200 + 201 + iounmap(chip->IO_ADDR_R); 202 + 203 + /* Free the MTD device structure */ 204 + kfree(pasemi_nand_mtd); 205 + 206 + pasemi_nand_mtd = NULL; 207 + 208 + return 0; 209 + } 210 + 211 + static struct of_device_id pasemi_nand_match[] = 212 + { 213 + { 214 + .compatible = "pasemi,localbus-nand", 215 + }, 216 + {}, 217 + }; 218 + 219 + MODULE_DEVICE_TABLE(of, pasemi_nand_match); 220 + 221 + static struct of_platform_driver pasemi_nand_driver = 222 + { 223 + .name = (char*)driver_name, 224 + .match_table = pasemi_nand_match, 225 + .probe = pasemi_nand_probe, 226 + .remove = pasemi_nand_remove, 227 + }; 228 + 229 + static int __init pasemi_nand_init(void) 230 + { 231 + return of_register_platform_driver(&pasemi_nand_driver); 232 + } 233 + module_init(pasemi_nand_init); 234 + 235 + static void __exit pasemi_nand_exit(void) 236 + { 237 + of_unregister_platform_driver(&pasemi_nand_driver); 238 + } 239 + module_exit(pasemi_nand_exit); 240 + 241 + MODULE_LICENSE("GPL"); 242 + MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); 243 + MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
+2
drivers/mtd/nand/plat_nand.c
··· 110 110 static int __devexit plat_nand_remove(struct platform_device *pdev) 111 111 { 112 112 struct plat_nand_data *data = platform_get_drvdata(pdev); 113 + #ifdef CONFIG_MTD_PARTITIONS 113 114 struct platform_nand_data *pdata = pdev->dev.platform_data; 115 + #endif 114 116 115 117 nand_release(&data->mtd); 116 118 #ifdef CONFIG_MTD_PARTITIONS
+33 -15
drivers/mtd/nand/s3c2410.c
··· 120 120 int sel_bit; 121 121 int mtd_count; 122 122 123 + unsigned long save_nfconf; 124 + 123 125 enum s3c_cpu_type cpu_type; 124 126 }; 125 127 ··· 366 364 ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { 367 365 /* calculate the bit position of the error */ 368 366 369 - bit = (diff2 >> 2) & 1; 370 - bit |= (diff2 >> 3) & 2; 371 - bit |= (diff2 >> 4) & 4; 367 + bit = ((diff2 >> 3) & 1) | 368 + ((diff2 >> 4) & 2) | 369 + ((diff2 >> 5) & 4); 372 370 373 371 /* calculate the byte position of the error */ 374 372 375 - byte = (diff1 << 1) & 0x80; 376 - byte |= (diff1 << 2) & 0x40; 377 - byte |= (diff1 << 3) & 0x20; 378 - byte |= (diff1 << 4) & 0x10; 379 - 380 - byte |= (diff0 >> 3) & 0x08; 381 - byte |= (diff0 >> 2) & 0x04; 382 - byte |= (diff0 >> 1) & 0x02; 383 - byte |= (diff0 >> 0) & 0x01; 384 - 385 - byte |= (diff2 << 8) & 0x100; 373 + byte = ((diff2 << 7) & 0x100) | 374 + ((diff1 << 0) & 0x80) | 375 + ((diff1 << 1) & 0x40) | 376 + ((diff1 << 2) & 0x20) | 377 + ((diff1 << 3) & 0x10) | 378 + ((diff0 >> 4) & 0x08) | 379 + ((diff0 >> 3) & 0x04) | 380 + ((diff0 >> 2) & 0x02) | 381 + ((diff0 >> 1) & 0x01); 386 382 387 383 dev_dbg(info->device, "correcting error bit %d, byte %d\n", 388 384 bit, byte); ··· 399 399 if ((diff0 & ~(1<<fls(diff0))) == 0) 400 400 return 1; 401 401 402 - return 0; 402 + return -1; 403 403 } 404 404 405 405 /* ECC functions ··· 810 810 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 811 811 812 812 if (info) { 813 + info->save_nfconf = readl(info->regs + S3C2410_NFCONF); 814 + 815 + /* For the moment, we must ensure nFCE is high during 816 + * the time we are suspended. This really should be 817 + * handled by suspending the MTDs we are using, but 818 + * that is currently not the case. */ 819 + 820 + writel(info->save_nfconf | info->sel_bit, 821 + info->regs + S3C2410_NFCONF); 822 + 813 823 if (!allow_clk_stop(info)) 814 824 clk_disable(info->clk); 815 825 } ··· 830 820 static int s3c24xx_nand_resume(struct platform_device *dev) 831 821 { 832 822 struct s3c2410_nand_info *info = platform_get_drvdata(dev); 823 + unsigned long nfconf; 833 824 834 825 if (info) { 835 826 clk_enable(info->clk); 836 827 s3c2410_nand_inithw(info, dev); 828 + 829 + /* Restore the state of the nFCE line. */ 830 + 831 + nfconf = readl(info->regs + S3C2410_NFCONF); 832 + nfconf &= ~info->sel_bit; 833 + nfconf |= info->save_nfconf & info->sel_bit; 834 + writel(nfconf, info->regs + S3C2410_NFCONF); 837 835 838 836 if (allow_clk_stop(info)) 839 837 clk_disable(info->clk);
+74
drivers/mtd/ofpart.c
··· 1 + /* 2 + * Flash partitions described by the OF (or flattened) device tree 3 + * 4 + * Copyright (C) 2006 MontaVista Software Inc. 5 + * Author: Vitaly Wool <vwool@ru.mvista.com> 6 + * 7 + * Revised to handle newer style flash binding by: 8 + * Copyright (C) 2007 David Gibson, IBM Corporation. 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the 12 + * Free Software Foundation; either version 2 of the License, or (at your 13 + * option) any later version. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/init.h> 18 + #include <linux/of.h> 19 + #include <linux/mtd/mtd.h> 20 + #include <linux/mtd/partitions.h> 21 + 22 + int __devinit of_mtd_parse_partitions(struct device *dev, 23 + struct mtd_info *mtd, 24 + struct device_node *node, 25 + struct mtd_partition **pparts) 26 + { 27 + const char *partname; 28 + struct device_node *pp; 29 + int nr_parts, i; 30 + 31 + /* First count the subnodes */ 32 + pp = NULL; 33 + nr_parts = 0; 34 + while ((pp = of_get_next_child(node, pp))) 35 + nr_parts++; 36 + 37 + if (nr_parts == 0) 38 + return 0; 39 + 40 + *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL); 41 + if (!*pparts) 42 + return -ENOMEM; 43 + 44 + pp = NULL; 45 + i = 0; 46 + while ((pp = of_get_next_child(node, pp))) { 47 + const u32 *reg; 48 + int len; 49 + 50 + reg = of_get_property(pp, "reg", &len); 51 + if (!reg || (len != 2 * sizeof(u32))) { 52 + of_node_put(pp); 53 + dev_err(dev, "Invalid 'reg' on %s\n", node->full_name); 54 + kfree(*pparts); 55 + *pparts = NULL; 56 + return -EINVAL; 57 + } 58 + (*pparts)[i].offset = reg[0]; 59 + (*pparts)[i].size = reg[1]; 60 + 61 + partname = of_get_property(pp, "label", &len); 62 + if (!partname) 63 + partname = of_get_property(pp, "name", &len); 64 + (*pparts)[i].name = (char *)partname; 65 + 66 + if (of_get_property(pp, "read-only", &len)) 67 + (*pparts)[i].mask_flags = MTD_WRITEABLE; 68 + 69 + i++; 70 + } 71 + 72 + return nr_parts; 73 + } 74 + EXPORT_SYMBOL(of_mtd_parse_partitions);
+158 -41
drivers/mtd/onenand/onenand_base.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/init.h> 20 20 #include <linux/sched.h> 21 + #include <linux/delay.h> 21 22 #include <linux/interrupt.h> 22 23 #include <linux/jiffies.h> 23 24 #include <linux/mtd/mtd.h> ··· 171 170 } 172 171 173 172 /** 173 + * onenand_get_density - [DEFAULT] Get OneNAND density 174 + * @param dev_id OneNAND device ID 175 + * 176 + * Get OneNAND density from device ID 177 + */ 178 + static inline int onenand_get_density(int dev_id) 179 + { 180 + int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; 181 + return (density & ONENAND_DEVICE_DENSITY_MASK); 182 + } 183 + 184 + /** 174 185 * onenand_command - [DEFAULT] Send command to OneNAND device 175 186 * @param mtd MTD device structure 176 187 * @param cmd the command to be sent ··· 195 182 static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) 196 183 { 197 184 struct onenand_chip *this = mtd->priv; 198 - int value, readcmd = 0, block_cmd = 0; 199 - int block, page; 185 + int value, block, page; 200 186 201 187 /* Address translation */ 202 188 switch (cmd) { ··· 210 198 case ONENAND_CMD_ERASE: 211 199 case ONENAND_CMD_BUFFERRAM: 212 200 case ONENAND_CMD_OTP_ACCESS: 213 - block_cmd = 1; 214 201 block = (int) (addr >> this->erase_shift); 215 202 page = -1; 216 203 break; ··· 251 240 value = onenand_block_address(this, block); 252 241 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 253 242 254 - if (block_cmd) { 255 - /* Select DataRAM for DDP */ 256 - value = onenand_bufferram_address(this, block); 257 - this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 258 - } 243 + /* Select DataRAM for DDP */ 244 + value = onenand_bufferram_address(this, block); 245 + this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 259 246 } 260 247 261 248 if (page != -1) { ··· 265 256 case ONENAND_CMD_READ: 266 257 case ONENAND_CMD_READOOB: 267 258 dataram = ONENAND_SET_NEXT_BUFFERRAM(this); 268 - readcmd = 1; 269 259 break; 270 260 271 261 default: ··· 281 273 /* Write 'BSA, BSC' of DataRAM */ 282 274 value = onenand_buffer_address(dataram, sectors, count); 283 275 this->write_word(value, this->base + ONENAND_REG_START_BUFFER); 284 - 285 - if (readcmd) { 286 - /* Select DataRAM for DDP */ 287 - value = onenand_bufferram_address(this, block); 288 - this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); 289 - } 290 276 } 291 277 292 278 /* Interrupt clear */ ··· 857 855 this->command(mtd, ONENAND_CMD_READ, from, writesize); 858 856 ret = this->wait(mtd, FL_READING); 859 857 onenand_update_bufferram(mtd, from, !ret); 858 + if (ret == -EBADMSG) 859 + ret = 0; 860 860 } 861 861 } 862 862 ··· 917 913 /* Now wait for load */ 918 914 ret = this->wait(mtd, FL_READING); 919 915 onenand_update_bufferram(mtd, from, !ret); 916 + if (ret == -EBADMSG) 917 + ret = 0; 920 918 } 921 919 922 920 /* ··· 929 923 ops->retlen = read; 930 924 ops->oobretlen = oobread; 931 925 932 - if (mtd->ecc_stats.failed - stats.failed) 933 - return -EBADMSG; 934 - 935 926 if (ret) 936 927 return ret; 928 + 929 + if (mtd->ecc_stats.failed - stats.failed) 930 + return -EBADMSG; 937 931 938 932 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 939 933 } ··· 950 944 struct mtd_oob_ops *ops) 951 945 { 952 946 struct onenand_chip *this = mtd->priv; 947 + struct mtd_ecc_stats stats; 953 948 int read = 0, thislen, column, oobsize; 954 949 size_t len = ops->ooblen; 955 950 mtd_oob_mode_t mode = ops->mode; ··· 984 977 return -EINVAL; 985 978 } 986 979 980 + stats = mtd->ecc_stats; 981 + 987 982 while (read < len) { 988 983 cond_resched(); 989 984 ··· 997 988 onenand_update_bufferram(mtd, from, 0); 998 989 999 990 ret = this->wait(mtd, FL_READING); 1000 - /* First copy data and check return value for ECC handling */ 991 + if (ret && ret != -EBADMSG) { 992 + printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 993 + break; 994 + } 1001 995 1002 996 if (mode == MTD_OOB_AUTO) 1003 997 onenand_transfer_auto_oob(mtd, buf, column, thislen); 1004 998 else 1005 999 this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 1006 - 1007 - if (ret) { 1008 - printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); 1009 - break; 1010 - } 1011 1000 1012 1001 read += thislen; 1013 1002 ··· 1023 1016 } 1024 1017 1025 1018 ops->oobretlen = read; 1026 - return ret; 1019 + 1020 + if (ret) 1021 + return ret; 1022 + 1023 + if (mtd->ecc_stats.failed - stats.failed) 1024 + return -EBADMSG; 1025 + 1026 + return 0; 1027 1027 } 1028 1028 1029 1029 /** ··· 1120 1106 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1121 1107 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1122 1108 1109 + /* Initial bad block case: 0x2400 or 0x0400 */ 1123 1110 if (ctrl & ONENAND_CTRL_ERROR) { 1124 1111 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl); 1125 - /* Initial bad block case */ 1126 - if (ctrl & ONENAND_CTRL_LOAD) 1127 - return ONENAND_BBT_READ_ERROR; 1128 - return ONENAND_BBT_READ_FATAL_ERROR; 1112 + return ONENAND_BBT_READ_ERROR; 1129 1113 } 1130 1114 1131 1115 if (interrupt & ONENAND_INT_READ) { ··· 1218 1206 static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) 1219 1207 { 1220 1208 struct onenand_chip *this = mtd->priv; 1221 - char oobbuf[64]; 1209 + u_char *oob_buf = this->oob_buf; 1222 1210 int status, i; 1223 1211 1224 1212 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize); ··· 1227 1215 if (status) 1228 1216 return status; 1229 1217 1230 - this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); 1218 + this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); 1231 1219 for (i = 0; i < mtd->oobsize; i++) 1232 - if (buf[i] != 0xFF && buf[i] != oobbuf[i]) 1220 + if (buf[i] != 0xFF && buf[i] != oob_buf[i]) 1233 1221 return -EBADMSG; 1234 1222 1235 1223 return 0; ··· 1284 1272 #endif 1285 1273 1286 1274 #define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0) 1275 + 1276 + static void onenand_panic_wait(struct mtd_info *mtd) 1277 + { 1278 + struct onenand_chip *this = mtd->priv; 1279 + unsigned int interrupt; 1280 + int i; 1281 + 1282 + for (i = 0; i < 2000; i++) { 1283 + interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1284 + if (interrupt & ONENAND_INT_MASTER) 1285 + break; 1286 + udelay(10); 1287 + } 1288 + } 1289 + 1290 + /** 1291 + * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context 1292 + * @param mtd MTD device structure 1293 + * @param to offset to write to 1294 + * @param len number of bytes to write 1295 + * @param retlen pointer to variable to store the number of written bytes 1296 + * @param buf the data to write 1297 + * 1298 + * Write with ECC 1299 + */ 1300 + static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 1301 + size_t *retlen, const u_char *buf) 1302 + { 1303 + struct onenand_chip *this = mtd->priv; 1304 + int column, subpage; 1305 + int written = 0; 1306 + int ret = 0; 1307 + 1308 + if (this->state == FL_PM_SUSPENDED) 1309 + return -EBUSY; 1310 + 1311 + /* Wait for any existing operation to clear */ 1312 + onenand_panic_wait(mtd); 1313 + 1314 + DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n", 1315 + (unsigned int) to, (int) len); 1316 + 1317 + /* Initialize retlen, in case of early exit */ 1318 + *retlen = 0; 1319 + 1320 + /* Do not allow writes past end of device */ 1321 + if (unlikely((to + len) > mtd->size)) { 1322 + printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n"); 1323 + return -EINVAL; 1324 + } 1325 + 1326 + /* Reject writes, which are not page aligned */ 1327 + if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 1328 + printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n"); 1329 + return -EINVAL; 1330 + } 1331 + 1332 + column = to & (mtd->writesize - 1); 1333 + 1334 + /* Loop until all data write */ 1335 + while (written < len) { 1336 + int thislen = min_t(int, mtd->writesize - column, len - written); 1337 + u_char *wbuf = (u_char *) buf; 1338 + 1339 + this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); 1340 + 1341 + /* Partial page write */ 1342 + subpage = thislen < mtd->writesize; 1343 + if (subpage) { 1344 + memset(this->page_buf, 0xff, mtd->writesize); 1345 + memcpy(this->page_buf + column, buf, thislen); 1346 + wbuf = this->page_buf; 1347 + } 1348 + 1349 + this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize); 1350 + this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 1351 + 1352 + this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); 1353 + 1354 + onenand_panic_wait(mtd); 1355 + 1356 + /* In partial page write we don't update bufferram */ 1357 + onenand_update_bufferram(mtd, to, !ret && !subpage); 1358 + if (ONENAND_IS_2PLANE(this)) { 1359 + ONENAND_SET_BUFFERRAM1(this); 1360 + onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage); 1361 + } 1362 + 1363 + if (ret) { 1364 + printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret); 1365 + break; 1366 + } 1367 + 1368 + written += thislen; 1369 + 1370 + if (written == len) 1371 + break; 1372 + 1373 + column = 0; 1374 + to += thislen; 1375 + buf += thislen; 1376 + } 1377 + 1378 + *retlen = written; 1379 + return ret; 1380 + } 1287 1381 1288 1382 /** 1289 1383 * onenand_fill_auto_oob - [Internal] oob auto-placement transfer ··· 1537 1419 } 1538 1420 1539 1421 /* Only check verify write turn on */ 1540 - ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen); 1422 + ret = onenand_verify(mtd, buf, to, thislen); 1541 1423 if (ret) { 1542 1424 printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); 1543 1425 break; ··· 1552 1434 to += thislen; 1553 1435 buf += thislen; 1554 1436 } 1555 - 1556 - /* Deselect and wake up anyone waiting on the device */ 1557 - onenand_release_device(mtd); 1558 1437 1559 1438 ops->retlen = written; 1560 1439 ··· 2263 2148 2264 2149 *retlen = 0; 2265 2150 2266 - density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2151 + density = onenand_get_density(this->device_id); 2267 2152 if (density < ONENAND_DEVICE_DENSITY_512Mb) 2268 2153 otp_pages = 20; 2269 2154 else ··· 2414 2299 static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 2415 2300 size_t len) 2416 2301 { 2417 - unsigned char oob_buf[64]; 2302 + struct onenand_chip *this = mtd->priv; 2303 + u_char *oob_buf = this->oob_buf; 2418 2304 size_t retlen; 2419 2305 int ret; 2420 2306 ··· 2455 2339 unsigned int density, process; 2456 2340 2457 2341 /* Lock scheme depends on density and process */ 2458 - density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2342 + density = onenand_get_density(this->device_id); 2459 2343 process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; 2460 2344 2461 2345 /* Lock scheme */ ··· 2504 2388 vcc = device & ONENAND_DEVICE_VCC_MASK; 2505 2389 demuxed = device & ONENAND_DEVICE_IS_DEMUX; 2506 2390 ddp = device & ONENAND_DEVICE_IS_DDP; 2507 - density = device >> ONENAND_DEVICE_DENSITY_SHIFT; 2391 + density = onenand_get_density(device); 2508 2392 printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n", 2509 2393 demuxed ? "" : "Muxed ", 2510 2394 ddp ? "(DDP)" : "", ··· 2596 2480 this->device_id = dev_id; 2597 2481 this->version_id = ver_id; 2598 2482 2599 - density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; 2483 + density = onenand_get_density(dev_id); 2600 2484 this->chipsize = (16 << density) << 20; 2601 2485 /* Set density mask. it is used for DDP */ 2602 2486 if (ONENAND_IS_DDP(this)) ··· 2780 2664 mtd->write = onenand_write; 2781 2665 mtd->read_oob = onenand_read_oob; 2782 2666 mtd->write_oob = onenand_write_oob; 2667 + mtd->panic_write = onenand_panic_write; 2783 2668 #ifdef CONFIG_MTD_ONENAND_OTP 2784 2669 mtd->get_fact_prot_info = onenand_get_fact_prot_info; 2785 2670 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
+20 -5
drivers/mtd/redboot.c
··· 59 59 static char nullstring[] = "unallocated"; 60 60 #endif 61 61 62 + if ( directory < 0 ) { 63 + offset = master->size + directory * master->erasesize; 64 + while (master->block_isbad && 65 + master->block_isbad(master, offset)) { 66 + if (!offset) { 67 + nogood: 68 + printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); 69 + return -EIO; 70 + } 71 + offset -= master->erasesize; 72 + } 73 + } else { 74 + offset = directory * master->erasesize; 75 + while (master->block_isbad && 76 + master->block_isbad(master, offset)) { 77 + offset += master->erasesize; 78 + if (offset == master->size) 79 + goto nogood; 80 + } 81 + } 62 82 buf = vmalloc(master->erasesize); 63 83 64 84 if (!buf) 65 85 return -ENOMEM; 66 - 67 - if ( directory < 0 ) 68 - offset = master->size + directory*master->erasesize; 69 - else 70 - offset = directory*master->erasesize; 71 86 72 87 printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", 73 88 master->name, offset);
+504 -198
drivers/mtd/ubi/build.c
··· 21 21 */ 22 22 23 23 /* 24 - * This file includes UBI initialization and building of UBI devices. At the 25 - * moment UBI devices may only be added while UBI is initialized, but dynamic 26 - * device add/remove functionality is planned. Also, at the moment we only 27 - * attach UBI devices by scanning, which will become a bottleneck when flashes 28 - * reach certain large size. Then one may improve UBI and add other methods. 24 + * This file includes UBI initialization and building of UBI devices. 25 + * 26 + * When UBI is initialized, it attaches all the MTD devices specified as the 27 + * module load parameters or the kernel boot parameters. If MTD devices were 28 + * specified, UBI does not attach any MTD device, but it is possible to do 29 + * later using the "UBI control device". 30 + * 31 + * At the moment we only attach UBI devices by scanning, which will become a 32 + * bottleneck when flashes reach certain large size. Then one may improve UBI 33 + * and add other methods, although it does not seem to be easy to do. 29 34 */ 30 35 31 36 #include <linux/err.h> ··· 38 33 #include <linux/moduleparam.h> 39 34 #include <linux/stringify.h> 40 35 #include <linux/stat.h> 36 + #include <linux/miscdevice.h> 41 37 #include <linux/log2.h> 38 + #include <linux/kthread.h> 42 39 #include "ubi.h" 43 40 44 41 /* Maximum length of the 'mtd=' parameter */ ··· 50 43 * struct mtd_dev_param - MTD device parameter description data structure. 51 44 * @name: MTD device name or number string 52 45 * @vid_hdr_offs: VID header offset 53 - * @data_offs: data offset 54 46 */ 55 47 struct mtd_dev_param 56 48 { 57 49 char name[MTD_PARAM_LEN_MAX]; 58 50 int vid_hdr_offs; 59 - int data_offs; 60 51 }; 61 52 62 53 /* Numbers of elements set in the @mtd_dev_param array */ ··· 63 58 /* MTD devices specification parameters */ 64 59 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; 65 60 66 - /* Number of UBI devices in system */ 67 - int ubi_devices_cnt; 68 - 69 - /* All UBI devices in system */ 70 - struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 71 - 72 61 /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 73 62 struct class *ubi_class; 63 + 64 + /* Slab cache for wear-leveling entries */ 65 + struct kmem_cache *ubi_wl_entry_slab; 66 + 67 + /* UBI control character device */ 68 + static struct miscdevice ubi_ctrl_cdev = { 69 + .minor = MISC_DYNAMIC_MINOR, 70 + .name = "ubi_ctrl", 71 + .fops = &ubi_ctrl_cdev_operations, 72 + }; 73 + 74 + /* All UBI devices in system */ 75 + static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 76 + 77 + /* Serializes UBI devices creations and removals */ 78 + DEFINE_MUTEX(ubi_devices_mutex); 79 + 80 + /* Protects @ubi_devices and @ubi->ref_count */ 81 + static DEFINE_SPINLOCK(ubi_devices_lock); 74 82 75 83 /* "Show" method for files in '/<sysfs>/class/ubi/' */ 76 84 static ssize_t ubi_version_show(struct class *class, char *buf) ··· 119 101 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 120 102 static struct device_attribute dev_bgt_enabled = 121 103 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 104 + static struct device_attribute dev_mtd_num = 105 + __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 106 + 107 + /** 108 + * ubi_get_device - get UBI device. 109 + * @ubi_num: UBI device number 110 + * 111 + * This function returns UBI device description object for UBI device number 112 + * @ubi_num, or %NULL if the device does not exist. This function increases the 113 + * device reference count to prevent removal of the device. In other words, the 114 + * device cannot be removed if its reference count is not zero. 115 + */ 116 + struct ubi_device *ubi_get_device(int ubi_num) 117 + { 118 + struct ubi_device *ubi; 119 + 120 + spin_lock(&ubi_devices_lock); 121 + ubi = ubi_devices[ubi_num]; 122 + if (ubi) { 123 + ubi_assert(ubi->ref_count >= 0); 124 + ubi->ref_count += 1; 125 + get_device(&ubi->dev); 126 + } 127 + spin_unlock(&ubi_devices_lock); 128 + 129 + return ubi; 130 + } 131 + 132 + /** 133 + * ubi_put_device - drop an UBI device reference. 134 + * @ubi: UBI device description object 135 + */ 136 + void ubi_put_device(struct ubi_device *ubi) 137 + { 138 + spin_lock(&ubi_devices_lock); 139 + ubi->ref_count -= 1; 140 + put_device(&ubi->dev); 141 + spin_unlock(&ubi_devices_lock); 142 + } 143 + 144 + /** 145 + * ubi_get_by_major - get UBI device description object by character device 146 + * major number. 147 + * @major: major number 148 + * 149 + * This function is similar to 'ubi_get_device()', but it searches the device 150 + * by its major number. 151 + */ 152 + struct ubi_device *ubi_get_by_major(int major) 153 + { 154 + int i; 155 + struct ubi_device *ubi; 156 + 157 + spin_lock(&ubi_devices_lock); 158 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 159 + ubi = ubi_devices[i]; 160 + if (ubi && MAJOR(ubi->cdev.dev) == major) { 161 + ubi_assert(ubi->ref_count >= 0); 162 + ubi->ref_count += 1; 163 + get_device(&ubi->dev); 164 + spin_unlock(&ubi_devices_lock); 165 + return ubi; 166 + } 167 + } 168 + spin_unlock(&ubi_devices_lock); 169 + 170 + return NULL; 171 + } 172 + 173 + /** 174 + * ubi_major2num - get UBI device number by character device major number. 175 + * @major: major number 176 + * 177 + * This function searches UBI device number object by its major number. If UBI 178 + * device was not found, this function returns -ENODEV, otherwise the UBI device 179 + * number is returned. 180 + */ 181 + int ubi_major2num(int major) 182 + { 183 + int i, ubi_num = -ENODEV; 184 + 185 + spin_lock(&ubi_devices_lock); 186 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 187 + struct ubi_device *ubi = ubi_devices[i]; 188 + 189 + if (ubi && MAJOR(ubi->cdev.dev) == major) { 190 + ubi_num = ubi->ubi_num; 191 + break; 192 + } 193 + } 194 + spin_unlock(&ubi_devices_lock); 195 + 196 + return ubi_num; 197 + } 122 198 123 199 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 124 200 static ssize_t dev_attribute_show(struct device *dev, 125 201 struct device_attribute *attr, char *buf) 126 202 { 127 - const struct ubi_device *ubi; 203 + ssize_t ret; 204 + struct ubi_device *ubi; 128 205 206 + /* 207 + * The below code looks weird, but it actually makes sense. We get the 208 + * UBI device reference from the contained 'struct ubi_device'. But it 209 + * is unclear if the device was removed or not yet. Indeed, if the 210 + * device was removed before we increased its reference count, 211 + * 'ubi_get_device()' will return -ENODEV and we fail. 212 + * 213 + * Remember, 'struct ubi_device' is freed in the release function, so 214 + * we still can use 'ubi->ubi_num'. 215 + */ 129 216 ubi = container_of(dev, struct ubi_device, dev); 130 - if (attr == &dev_eraseblock_size) 131 - return sprintf(buf, "%d\n", ubi->leb_size); 132 - else if (attr == &dev_avail_eraseblocks) 133 - return sprintf(buf, "%d\n", ubi->avail_pebs); 134 - else if (attr == &dev_total_eraseblocks) 135 - return sprintf(buf, "%d\n", ubi->good_peb_count); 136 - else if (attr == &dev_volumes_count) 137 - return sprintf(buf, "%d\n", ubi->vol_count); 138 - else if (attr == &dev_max_ec) 139 - return sprintf(buf, "%d\n", ubi->max_ec); 140 - else if (attr == &dev_reserved_for_bad) 141 - return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 142 - else if (attr == &dev_bad_peb_count) 143 - return sprintf(buf, "%d\n", ubi->bad_peb_count); 144 - else if (attr == &dev_max_vol_count) 145 - return sprintf(buf, "%d\n", ubi->vtbl_slots); 146 - else if (attr == &dev_min_io_size) 147 - return sprintf(buf, "%d\n", ubi->min_io_size); 148 - else if (attr == &dev_bgt_enabled) 149 - return sprintf(buf, "%d\n", ubi->thread_enabled); 150 - else 151 - BUG(); 217 + ubi = ubi_get_device(ubi->ubi_num); 218 + if (!ubi) 219 + return -ENODEV; 152 220 153 - return 0; 221 + if (attr == &dev_eraseblock_size) 222 + ret = sprintf(buf, "%d\n", ubi->leb_size); 223 + else if (attr == &dev_avail_eraseblocks) 224 + ret = sprintf(buf, "%d\n", ubi->avail_pebs); 225 + else if (attr == &dev_total_eraseblocks) 226 + ret = sprintf(buf, "%d\n", ubi->good_peb_count); 227 + else if (attr == &dev_volumes_count) 228 + ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 229 + else if (attr == &dev_max_ec) 230 + ret = sprintf(buf, "%d\n", ubi->max_ec); 231 + else if (attr == &dev_reserved_for_bad) 232 + ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 233 + else if (attr == &dev_bad_peb_count) 234 + ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 235 + else if (attr == &dev_max_vol_count) 236 + ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 237 + else if (attr == &dev_min_io_size) 238 + ret = sprintf(buf, "%d\n", ubi->min_io_size); 239 + else if (attr == &dev_bgt_enabled) 240 + ret = sprintf(buf, "%d\n", ubi->thread_enabled); 241 + else if (attr == &dev_mtd_num) 242 + ret = sprintf(buf, "%d\n", ubi->mtd->index); 243 + else 244 + ret = -EINVAL; 245 + 246 + ubi_put_device(ubi); 247 + return ret; 154 248 } 155 249 156 250 /* Fake "release" method for UBI devices */ ··· 280 150 int err; 281 151 282 152 ubi->dev.release = dev_release; 283 - ubi->dev.devt = MKDEV(ubi->major, 0); 153 + ubi->dev.devt = ubi->cdev.dev; 284 154 ubi->dev.class = ubi_class; 285 155 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 286 156 err = device_register(&ubi->dev); 287 157 if (err) 288 - goto out; 158 + return err; 289 159 290 160 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 291 161 if (err) 292 - goto out_unregister; 162 + return err; 293 163 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 294 164 if (err) 295 - goto out_eraseblock_size; 165 + return err; 296 166 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 297 167 if (err) 298 - goto out_avail_eraseblocks; 168 + return err; 299 169 err = device_create_file(&ubi->dev, &dev_volumes_count); 300 170 if (err) 301 - goto out_total_eraseblocks; 171 + return err; 302 172 err = device_create_file(&ubi->dev, &dev_max_ec); 303 173 if (err) 304 - goto out_volumes_count; 174 + return err; 305 175 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 306 176 if (err) 307 - goto out_volumes_max_ec; 177 + return err; 308 178 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 309 179 if (err) 310 - goto out_reserved_for_bad; 180 + return err; 311 181 err = device_create_file(&ubi->dev, &dev_max_vol_count); 312 182 if (err) 313 - goto out_bad_peb_count; 183 + return err; 314 184 err = device_create_file(&ubi->dev, &dev_min_io_size); 315 185 if (err) 316 - goto out_max_vol_count; 186 + return err; 317 187 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 318 188 if (err) 319 - goto out_min_io_size; 320 - 321 - return 0; 322 - 323 - out_min_io_size: 324 - device_remove_file(&ubi->dev, &dev_min_io_size); 325 - out_max_vol_count: 326 - device_remove_file(&ubi->dev, &dev_max_vol_count); 327 - out_bad_peb_count: 328 - device_remove_file(&ubi->dev, &dev_bad_peb_count); 329 - out_reserved_for_bad: 330 - device_remove_file(&ubi->dev, &dev_reserved_for_bad); 331 - out_volumes_max_ec: 332 - device_remove_file(&ubi->dev, &dev_max_ec); 333 - out_volumes_count: 334 - device_remove_file(&ubi->dev, &dev_volumes_count); 335 - out_total_eraseblocks: 336 - device_remove_file(&ubi->dev, &dev_total_eraseblocks); 337 - out_avail_eraseblocks: 338 - device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 339 - out_eraseblock_size: 340 - device_remove_file(&ubi->dev, &dev_eraseblock_size); 341 - out_unregister: 342 - device_unregister(&ubi->dev); 343 - out: 344 - ubi_err("failed to initialize sysfs for %s", ubi->ubi_name); 189 + return err; 190 + err = device_create_file(&ubi->dev, &dev_mtd_num); 345 191 return err; 346 192 } 347 193 ··· 327 221 */ 328 222 static void ubi_sysfs_close(struct ubi_device *ubi) 329 223 { 224 + device_remove_file(&ubi->dev, &dev_mtd_num); 330 225 device_remove_file(&ubi->dev, &dev_bgt_enabled); 331 226 device_remove_file(&ubi->dev, &dev_min_io_size); 332 227 device_remove_file(&ubi->dev, &dev_max_vol_count); ··· 351 244 352 245 for (i = 0; i < ubi->vtbl_slots; i++) 353 246 if (ubi->volumes[i]) 354 - ubi_free_volume(ubi, i); 247 + ubi_free_volume(ubi, ubi->volumes[i]); 355 248 } 356 249 357 250 /** ··· 365 258 { 366 259 int i, err; 367 260 dev_t dev; 368 - 369 - mutex_init(&ubi->vtbl_mutex); 370 - spin_lock_init(&ubi->volumes_lock); 371 261 372 262 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 373 263 ··· 382 278 return err; 383 279 } 384 280 281 + ubi_assert(MINOR(dev) == 0); 385 282 cdev_init(&ubi->cdev, &ubi_cdev_operations); 386 - ubi->major = MAJOR(dev); 387 - dbg_msg("%s major is %u", ubi->ubi_name, ubi->major); 283 + dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); 388 284 ubi->cdev.owner = THIS_MODULE; 389 285 390 - dev = MKDEV(ubi->major, 0); 391 286 err = cdev_add(&ubi->cdev, dev, 1); 392 287 if (err) { 393 - ubi_err("cannot add character device %s", ubi->ubi_name); 288 + ubi_err("cannot add character device"); 394 289 goto out_unreg; 395 290 } 396 291 397 292 err = ubi_sysfs_init(ubi); 398 293 if (err) 399 - goto out_cdev; 294 + goto out_sysfs; 400 295 401 296 for (i = 0; i < ubi->vtbl_slots; i++) 402 297 if (ubi->volumes[i]) { 403 - err = ubi_add_volume(ubi, i); 404 - if (err) 298 + err = ubi_add_volume(ubi, ubi->volumes[i]); 299 + if (err) { 300 + ubi_err("cannot add volume %d", i); 405 301 goto out_volumes; 302 + } 406 303 } 407 304 408 305 return 0; 409 306 410 307 out_volumes: 411 308 kill_volumes(ubi); 309 + out_sysfs: 412 310 ubi_sysfs_close(ubi); 413 - out_cdev: 414 311 cdev_del(&ubi->cdev); 415 312 out_unreg: 416 - unregister_chrdev_region(MKDEV(ubi->major, 0), 417 - ubi->vtbl_slots + 1); 313 + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 314 + ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 418 315 return err; 419 316 } 420 317 ··· 428 323 kill_volumes(ubi); 429 324 ubi_sysfs_close(ubi); 430 325 cdev_del(&ubi->cdev); 431 - unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1); 326 + unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 432 327 } 433 328 434 329 /** ··· 489 384 * assumed: 490 385 * o EC header is always at offset zero - this cannot be changed; 491 386 * o VID header starts just after the EC header at the closest address 492 - * aligned to @io->@hdrs_min_io_size; 387 + * aligned to @io->hdrs_min_io_size; 493 388 * o data starts just after the VID header at the closest address aligned to 494 - * @io->@min_io_size 389 + * @io->min_io_size 495 390 * 496 391 * This function returns zero in case of success and a negative error code in 497 392 * case of failure. ··· 512 407 return -EINVAL; 513 408 } 514 409 410 + if (ubi->vid_hdr_offset < 0) 411 + return -EINVAL; 412 + 515 413 /* 516 414 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 517 415 * physical eraseblocks maximum. ··· 532 424 533 425 /* Make sure minimal I/O unit is power of 2 */ 534 426 if (!is_power_of_2(ubi->min_io_size)) { 535 - ubi_err("bad min. I/O unit"); 427 + ubi_err("min. I/O unit (%d) is not power of 2", 428 + ubi->min_io_size); 536 429 return -EINVAL; 537 430 } 538 431 ··· 562 453 } 563 454 564 455 /* Similar for the data offset */ 565 - if (ubi->leb_start == 0) { 566 - ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize; 567 - ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 568 - } 456 + ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 457 + ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 569 458 570 459 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 571 460 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); ··· 621 514 } 622 515 623 516 /** 624 - * attach_mtd_dev - attach an MTD device. 625 - * @mtd_dev: MTD device name or number string 626 - * @vid_hdr_offset: VID header offset 627 - * @data_offset: data offset 517 + * autoresize - re-size the volume which has the "auto-resize" flag set. 518 + * @ubi: UBI device description object 519 + * @vol_id: ID of the volume to re-size 628 520 * 629 - * This function attaches an MTD device to UBI. It first treats @mtd_dev as the 630 - * MTD device name, and tries to open it by this name. If it is unable to open, 631 - * it tries to convert @mtd_dev to an integer and open the MTD device by its 632 - * number. Returns zero in case of success and a negative error code in case of 633 - * failure. 521 + * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 522 + * the volume table to the largest possible size. See comments in ubi-header.h 523 + * for more description of the flag. Returns zero in case of success and a 524 + * negative error code in case of failure. 634 525 */ 635 - static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset, 636 - int data_offset) 526 + static int autoresize(struct ubi_device *ubi, int vol_id) 637 527 { 638 - struct ubi_device *ubi; 639 - struct mtd_info *mtd; 640 - int i, err; 528 + struct ubi_volume_desc desc; 529 + struct ubi_volume *vol = ubi->volumes[vol_id]; 530 + int err, old_reserved_pebs = vol->reserved_pebs; 641 531 642 - mtd = get_mtd_device_nm(mtd_dev); 643 - if (IS_ERR(mtd)) { 644 - int mtd_num; 645 - char *endp; 532 + /* 533 + * Clear the auto-resize flag in the volume in-memory copy of the 534 + * volume table, and 'ubi_resize_volume()' will propogate this change 535 + * to the flash. 536 + */ 537 + ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 646 538 647 - if (PTR_ERR(mtd) != -ENODEV) 648 - return PTR_ERR(mtd); 539 + if (ubi->avail_pebs == 0) { 540 + struct ubi_vtbl_record vtbl_rec; 649 541 650 542 /* 651 - * Probably this is not MTD device name but MTD device number - 652 - * check this out. 543 + * No avalilable PEBs to re-size the volume, clear the flag on 544 + * flash and exit. 653 545 */ 654 - mtd_num = simple_strtoul(mtd_dev, &endp, 0); 655 - if (*endp != '\0' || mtd_dev == endp) { 656 - ubi_err("incorrect MTD device: \"%s\"", mtd_dev); 657 - return -ENODEV; 658 - } 659 - 660 - mtd = get_mtd_device(NULL, mtd_num); 661 - if (IS_ERR(mtd)) 662 - return PTR_ERR(mtd); 546 + memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 547 + sizeof(struct ubi_vtbl_record)); 548 + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 549 + if (err) 550 + ubi_err("cannot clean auto-resize flag for volume %d", 551 + vol_id); 552 + } else { 553 + desc.vol = vol; 554 + err = ubi_resize_volume(&desc, 555 + old_reserved_pebs + ubi->avail_pebs); 556 + if (err) 557 + ubi_err("cannot auto-resize volume %d", vol_id); 663 558 } 664 559 665 - /* Check if we already have the same MTD device attached */ 666 - for (i = 0; i < ubi_devices_cnt; i++) 667 - if (ubi_devices[i]->mtd->index == mtd->index) { 668 - ubi_err("mtd%d is already attached to ubi%d", 560 + if (err) 561 + return err; 562 + 563 + ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 564 + vol->name, old_reserved_pebs, vol->reserved_pebs); 565 + return 0; 566 + } 567 + 568 + /** 569 + * ubi_attach_mtd_dev - attach an MTD device. 570 + * @mtd_dev: MTD device description object 571 + * @ubi_num: number to assign to the new UBI device 572 + * @vid_hdr_offset: VID header offset 573 + * 574 + * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 575 + * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 576 + * which case this function finds a vacant device nubert and assings it 577 + * automatically. Returns the new UBI device number in case of success and a 578 + * negative error code in case of failure. 579 + * 580 + * Note, the invocations of this function has to be serialized by the 581 + * @ubi_devices_mutex. 582 + */ 583 + int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 584 + { 585 + struct ubi_device *ubi; 586 + int i, err; 587 + 588 + /* 589 + * Check if we already have the same MTD device attached. 590 + * 591 + * Note, this function assumes that UBI devices creations and deletions 592 + * are serialized, so it does not take the &ubi_devices_lock. 593 + */ 594 + for (i = 0; i < UBI_MAX_DEVICES; i++) { 595 + ubi = ubi_devices[i]; 596 + if (ubi && mtd->index == ubi->mtd->index) { 597 + dbg_err("mtd%d is already attached to ubi%d", 669 598 mtd->index, i); 670 - err = -EINVAL; 671 - goto out_mtd; 599 + return -EEXIST; 672 600 } 673 - 674 - ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device), 675 - GFP_KERNEL); 676 - if (!ubi) { 677 - err = -ENOMEM; 678 - goto out_mtd; 679 601 } 680 602 681 - ubi->ubi_num = ubi_devices_cnt; 603 + /* 604 + * Make sure this MTD device is not emulated on top of an UBI volume 605 + * already. Well, generally this recursion works fine, but there are 606 + * different problems like the UBI module takes a reference to itself 607 + * by attaching (and thus, opening) the emulated MTD device. This 608 + * results in inability to unload the module. And in general it makes 609 + * no sense to attach emulated MTD devices, so we prohibit this. 610 + */ 611 + if (mtd->type == MTD_UBIVOLUME) { 612 + ubi_err("refuse attaching mtd%d - it is already emulated on " 613 + "top of UBI", mtd->index); 614 + return -EINVAL; 615 + } 616 + 617 + if (ubi_num == UBI_DEV_NUM_AUTO) { 618 + /* Search for an empty slot in the @ubi_devices array */ 619 + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 620 + if (!ubi_devices[ubi_num]) 621 + break; 622 + if (ubi_num == UBI_MAX_DEVICES) { 623 + dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); 624 + return -ENFILE; 625 + } 626 + } else { 627 + if (ubi_num >= UBI_MAX_DEVICES) 628 + return -EINVAL; 629 + 630 + /* Make sure ubi_num is not busy */ 631 + if (ubi_devices[ubi_num]) { 632 + dbg_err("ubi%d already exists", ubi_num); 633 + return -EEXIST; 634 + } 635 + } 636 + 637 + ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 638 + if (!ubi) 639 + return -ENOMEM; 640 + 682 641 ubi->mtd = mtd; 683 - 684 - dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d", 685 - ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset); 686 - 642 + ubi->ubi_num = ubi_num; 687 643 ubi->vid_hdr_offset = vid_hdr_offset; 688 - ubi->leb_start = data_offset; 644 + ubi->autoresize_vol_id = -1; 645 + 646 + mutex_init(&ubi->buf_mutex); 647 + mutex_init(&ubi->ckvol_mutex); 648 + mutex_init(&ubi->volumes_mutex); 649 + spin_lock_init(&ubi->volumes_lock); 650 + 651 + dbg_msg("attaching mtd%d to ubi%d: VID header offset %d", 652 + mtd->index, ubi_num, vid_hdr_offset); 653 + 689 654 err = io_init(ubi); 690 655 if (err) 691 656 goto out_free; 692 657 693 - mutex_init(&ubi->buf_mutex); 694 658 ubi->peb_buf1 = vmalloc(ubi->peb_size); 695 659 if (!ubi->peb_buf1) 696 660 goto out_free; ··· 783 605 goto out_free; 784 606 } 785 607 608 + if (ubi->autoresize_vol_id != -1) { 609 + err = autoresize(ubi, ubi->autoresize_vol_id); 610 + if (err) 611 + goto out_detach; 612 + } 613 + 786 614 err = uif_init(ubi); 787 615 if (err) 788 616 goto out_detach; 789 617 790 - ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt); 791 - ubi_msg("MTD device name: \"%s\"", ubi->mtd->name); 618 + ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 619 + if (IS_ERR(ubi->bgt_thread)) { 620 + err = PTR_ERR(ubi->bgt_thread); 621 + ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 622 + err); 623 + goto out_uif; 624 + } 625 + 626 + ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 627 + ubi_msg("MTD device name: \"%s\"", mtd->name); 792 628 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 793 629 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 794 630 ubi->peb_size, ubi->peb_size >> 10); ··· 830 638 wake_up_process(ubi->bgt_thread); 831 639 } 832 640 833 - ubi_devices_cnt += 1; 834 - return 0; 641 + ubi_devices[ubi_num] = ubi; 642 + return ubi_num; 835 643 644 + out_uif: 645 + uif_close(ubi); 836 646 out_detach: 837 647 ubi_eba_close(ubi); 838 648 ubi_wl_close(ubi); ··· 846 652 vfree(ubi->dbg_peb_buf); 847 653 #endif 848 654 kfree(ubi); 849 - out_mtd: 850 - put_mtd_device(mtd); 851 - ubi_devices[ubi_devices_cnt] = NULL; 852 655 return err; 853 656 } 854 657 855 658 /** 856 - * detach_mtd_dev - detach an MTD device. 857 - * @ubi: UBI device description object 659 + * ubi_detach_mtd_dev - detach an MTD device. 660 + * @ubi_num: UBI device number to detach from 661 + * @anyway: detach MTD even if device reference count is not zero 662 + * 663 + * This function destroys an UBI device number @ubi_num and detaches the 664 + * underlying MTD device. Returns zero in case of success and %-EBUSY if the 665 + * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 666 + * exist. 667 + * 668 + * Note, the invocations of this function has to be serialized by the 669 + * @ubi_devices_mutex. 858 670 */ 859 - static void detach_mtd_dev(struct ubi_device *ubi) 671 + int ubi_detach_mtd_dev(int ubi_num, int anyway) 860 672 { 861 - int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index; 673 + struct ubi_device *ubi; 862 674 675 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 676 + return -EINVAL; 677 + 678 + spin_lock(&ubi_devices_lock); 679 + ubi = ubi_devices[ubi_num]; 680 + if (!ubi) { 681 + spin_unlock(&ubi_devices_lock); 682 + return -EINVAL; 683 + } 684 + 685 + if (ubi->ref_count) { 686 + if (!anyway) { 687 + spin_unlock(&ubi_devices_lock); 688 + return -EBUSY; 689 + } 690 + /* This may only happen if there is a bug */ 691 + ubi_err("%s reference count %d, destroy anyway", 692 + ubi->ubi_name, ubi->ref_count); 693 + } 694 + ubi_devices[ubi_num] = NULL; 695 + spin_unlock(&ubi_devices_lock); 696 + 697 + ubi_assert(ubi_num == ubi->ubi_num); 863 698 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 699 + 700 + /* 701 + * Before freeing anything, we have to stop the background thread to 702 + * prevent it from doing anything on this device while we are freeing. 703 + */ 704 + if (ubi->bgt_thread) 705 + kthread_stop(ubi->bgt_thread); 706 + 864 707 uif_close(ubi); 865 708 ubi_eba_close(ubi); 866 709 ubi_wl_close(ubi); ··· 908 677 #ifdef CONFIG_MTD_UBI_DEBUG 909 678 vfree(ubi->dbg_peb_buf); 910 679 #endif 911 - kfree(ubi_devices[ubi_num]); 912 - ubi_devices[ubi_num] = NULL; 913 - ubi_devices_cnt -= 1; 914 - ubi_assert(ubi_devices_cnt >= 0); 915 - ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num); 680 + ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 681 + kfree(ubi); 682 + return 0; 683 + } 684 + 685 + /** 686 + * find_mtd_device - open an MTD device by its name or number. 687 + * @mtd_dev: name or number of the device 688 + * 689 + * This function tries to open and MTD device described by @mtd_dev string, 690 + * which is first treated as an ASCII number, and if it is not true, it is 691 + * treated as MTD device name. Returns MTD device description object in case of 692 + * success and a negative error code in case of failure. 693 + */ 694 + static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 695 + { 696 + struct mtd_info *mtd; 697 + int mtd_num; 698 + char *endp; 699 + 700 + mtd_num = simple_strtoul(mtd_dev, &endp, 0); 701 + if (*endp != '\0' || mtd_dev == endp) { 702 + /* 703 + * This does not look like an ASCII integer, probably this is 704 + * MTD device name. 705 + */ 706 + mtd = get_mtd_device_nm(mtd_dev); 707 + } else 708 + mtd = get_mtd_device(NULL, mtd_num); 709 + 710 + return mtd; 916 711 } 917 712 918 713 static int __init ubi_init(void) ··· 950 693 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 951 694 952 695 if (mtd_devs > UBI_MAX_DEVICES) { 953 - printk("UBI error: too many MTD devices, maximum is %d\n", 954 - UBI_MAX_DEVICES); 696 + printk(KERN_ERR "UBI error: too many MTD devices, " 697 + "maximum is %d\n", UBI_MAX_DEVICES); 955 698 return -EINVAL; 956 699 } 957 700 701 + /* Create base sysfs directory and sysfs files */ 958 702 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 959 - if (IS_ERR(ubi_class)) 960 - return PTR_ERR(ubi_class); 703 + if (IS_ERR(ubi_class)) { 704 + err = PTR_ERR(ubi_class); 705 + printk(KERN_ERR "UBI error: cannot create UBI class\n"); 706 + goto out; 707 + } 961 708 962 709 err = class_create_file(ubi_class, &ubi_version); 963 - if (err) 710 + if (err) { 711 + printk(KERN_ERR "UBI error: cannot create sysfs file\n"); 964 712 goto out_class; 713 + } 714 + 715 + err = misc_register(&ubi_ctrl_cdev); 716 + if (err) { 717 + printk(KERN_ERR "UBI error: cannot register device\n"); 718 + goto out_version; 719 + } 720 + 721 + ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 722 + sizeof(struct ubi_wl_entry), 723 + 0, 0, NULL); 724 + if (!ubi_wl_entry_slab) 725 + goto out_dev_unreg; 965 726 966 727 /* Attach MTD devices */ 967 728 for (i = 0; i < mtd_devs; i++) { 968 729 struct mtd_dev_param *p = &mtd_dev_param[i]; 730 + struct mtd_info *mtd; 969 731 970 732 cond_resched(); 971 - err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs); 972 - if (err) 733 + 734 + mtd = open_mtd_device(p->name); 735 + if (IS_ERR(mtd)) { 736 + err = PTR_ERR(mtd); 973 737 goto out_detach; 738 + } 739 + 740 + mutex_lock(&ubi_devices_mutex); 741 + err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 742 + p->vid_hdr_offs); 743 + mutex_unlock(&ubi_devices_mutex); 744 + if (err < 0) { 745 + put_mtd_device(mtd); 746 + printk(KERN_ERR "UBI error: cannot attach %s\n", 747 + p->name); 748 + goto out_detach; 749 + } 974 750 } 975 751 976 752 return 0; 977 753 978 754 out_detach: 979 755 for (k = 0; k < i; k++) 980 - detach_mtd_dev(ubi_devices[k]); 756 + if (ubi_devices[k]) { 757 + mutex_lock(&ubi_devices_mutex); 758 + ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 759 + mutex_unlock(&ubi_devices_mutex); 760 + } 761 + kmem_cache_destroy(ubi_wl_entry_slab); 762 + out_dev_unreg: 763 + misc_deregister(&ubi_ctrl_cdev); 764 + out_version: 981 765 class_remove_file(ubi_class, &ubi_version); 982 766 out_class: 983 767 class_destroy(ubi_class); 768 + out: 769 + printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err); 984 770 return err; 985 771 } 986 772 module_init(ubi_init); 987 773 988 774 static void __exit ubi_exit(void) 989 775 { 990 - int i, n = ubi_devices_cnt; 776 + int i; 991 777 992 - for (i = 0; i < n; i++) 993 - detach_mtd_dev(ubi_devices[i]); 778 + for (i = 0; i < UBI_MAX_DEVICES; i++) 779 + if (ubi_devices[i]) { 780 + mutex_lock(&ubi_devices_mutex); 781 + ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 782 + mutex_unlock(&ubi_devices_mutex); 783 + } 784 + kmem_cache_destroy(ubi_wl_entry_slab); 785 + misc_deregister(&ubi_ctrl_cdev); 994 786 class_remove_file(ubi_class, &ubi_version); 995 787 class_destroy(ubi_class); 996 788 } ··· 1060 754 1061 755 result = simple_strtoul(str, &endp, 0); 1062 756 if (str == endp || result < 0) { 1063 - printk("UBI error: incorrect bytes count: \"%s\"\n", str); 757 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 758 + str); 1064 759 return -EINVAL; 1065 760 } 1066 761 ··· 1071 764 case 'M': 1072 765 result *= 1024; 1073 766 case 'K': 1074 - case 'k': 1075 767 result *= 1024; 1076 - if (endp[1] == 'i' && (endp[2] == '\0' || 1077 - endp[2] == 'B' || endp[2] == 'b')) 768 + if (endp[1] == 'i' && endp[2] == 'B') 1078 769 endp += 2; 1079 770 case '\0': 1080 771 break; 1081 772 default: 1082 - printk("UBI error: incorrect bytes count: \"%s\"\n", str); 773 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 774 + str); 1083 775 return -EINVAL; 1084 776 } 1085 777 ··· 1099 793 struct mtd_dev_param *p; 1100 794 char buf[MTD_PARAM_LEN_MAX]; 1101 795 char *pbuf = &buf[0]; 1102 - char *tokens[3] = {NULL, NULL, NULL}; 796 + char *tokens[2] = {NULL, NULL}; 797 + 798 + if (!val) 799 + return -EINVAL; 1103 800 1104 801 if (mtd_devs == UBI_MAX_DEVICES) { 1105 - printk("UBI error: too many parameters, max. is %d\n", 802 + printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1106 803 UBI_MAX_DEVICES); 1107 804 return -EINVAL; 1108 805 } 1109 806 1110 807 len = strnlen(val, MTD_PARAM_LEN_MAX); 1111 808 if (len == MTD_PARAM_LEN_MAX) { 1112 - printk("UBI error: parameter \"%s\" is too long, max. is %d\n", 1113 - val, MTD_PARAM_LEN_MAX); 809 + printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 810 + "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1114 811 return -EINVAL; 1115 812 } 1116 813 1117 814 if (len == 0) { 1118 - printk("UBI warning: empty 'mtd=' parameter - ignored\n"); 815 + printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 816 + "ignored\n"); 1119 817 return 0; 1120 818 } 1121 819 ··· 1129 819 if (buf[len - 1] == '\n') 1130 820 buf[len - 1] = '\0'; 1131 821 1132 - for (i = 0; i < 3; i++) 822 + for (i = 0; i < 2; i++) 1133 823 tokens[i] = strsep(&pbuf, ","); 1134 824 1135 825 if (pbuf) { 1136 - printk("UBI error: too many arguments at \"%s\"\n", val); 826 + printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 827 + val); 1137 828 return -EINVAL; 1138 829 } 1139 830 ··· 1143 832 1144 833 if (tokens[1]) 1145 834 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1146 - if (tokens[2]) 1147 - p->data_offs = bytes_str_to_int(tokens[2]); 1148 835 1149 836 if (p->vid_hdr_offs < 0) 1150 837 return p->vid_hdr_offs; 1151 - if (p->data_offs < 0) 1152 - return p->data_offs; 1153 838 1154 839 mtd_devs += 1; 1155 840 return 0; ··· 1153 846 1154 847 module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1155 848 MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1156 - "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. " 849 + "mtd=<name|num>[,<vid_hdr_offs>].\n" 1157 850 "Multiple \"mtd\" parameters may be specified.\n" 1158 - "MTD devices may be specified by their number or name. " 1159 - "Optional \"vid_hdr_offs\" and \"data_offs\" parameters " 1160 - "specify UBI VID header position and data starting " 1161 - "position to be used by UBI.\n" 1162 - "Example: mtd=content,1984,2048 mtd=4 - attach MTD device" 1163 - "with name content using VID header offset 1984 and data " 1164 - "start 2048, and MTD device number 4 using default " 1165 - "offsets"); 851 + "MTD devices may be specified by their number or name.\n" 852 + "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 853 + "header position and data starting position to be used " 854 + "by UBI.\n" 855 + "Example: mtd=content,1984 mtd=4 - attach MTD device" 856 + "with name \"content\" using VID header offset 1984, and " 857 + "MTD device number 4 with default VID header offset."); 1166 858 1167 859 MODULE_VERSION(__stringify(UBI_VERSION)); 1168 860 MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+181 -63
drivers/mtd/ubi/cdev.c
··· 28 28 * 29 29 * Major and minor numbers are assigned dynamically to both UBI and volume 30 30 * character devices. 31 + * 32 + * Well, there is the third kind of character devices - the UBI control 33 + * character device, which allows to manipulate by UBI devices - create and 34 + * delete them. In other words, it is used for attaching and detaching MTD 35 + * devices. 31 36 */ 32 37 33 38 #include <linux/module.h> ··· 43 38 #include <asm/uaccess.h> 44 39 #include <asm/div64.h> 45 40 #include "ubi.h" 46 - 47 - /* 48 - * Maximum sequence numbers of UBI and volume character device IOCTLs (direct 49 - * logical eraseblock erase is a debug-only feature). 50 - */ 51 - #define UBI_CDEV_IOC_MAX_SEQ 2 52 - #ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO 53 - #define VOL_CDEV_IOC_MAX_SEQ 1 54 - #else 55 - #define VOL_CDEV_IOC_MAX_SEQ 2 56 - #endif 57 - 58 - /** 59 - * major_to_device - get UBI device object by character device major number. 60 - * @major: major number 61 - * 62 - * This function returns a pointer to the UBI device object. 63 - */ 64 - static struct ubi_device *major_to_device(int major) 65 - { 66 - int i; 67 - 68 - for (i = 0; i < ubi_devices_cnt; i++) 69 - if (ubi_devices[i] && ubi_devices[i]->major == major) 70 - return ubi_devices[i]; 71 - BUG(); 72 - return NULL; 73 - } 74 41 75 42 /** 76 43 * get_exclusive - get exclusive access to an UBI volume. ··· 101 124 static int vol_cdev_open(struct inode *inode, struct file *file) 102 125 { 103 126 struct ubi_volume_desc *desc; 104 - const struct ubi_device *ubi = major_to_device(imajor(inode)); 105 - int vol_id = iminor(inode) - 1; 106 - int mode; 127 + int vol_id = iminor(inode) - 1, mode, ubi_num; 128 + 129 + ubi_num = ubi_major2num(imajor(inode)); 130 + if (ubi_num < 0) 131 + return ubi_num; 107 132 108 133 if (file->f_mode & FMODE_WRITE) 109 134 mode = UBI_READWRITE; ··· 114 135 115 136 dbg_msg("open volume %d, mode %d", vol_id, mode); 116 137 117 - desc = ubi_open_volume(ubi->ubi_num, vol_id, mode); 138 + desc = ubi_open_volume(ubi_num, vol_id, mode); 118 139 if (IS_ERR(desc)) 119 140 return PTR_ERR(desc); 120 141 ··· 132 153 if (vol->updating) { 133 154 ubi_warn("update of volume %d not finished, volume is damaged", 134 155 vol->vol_id); 156 + ubi_assert(!vol->changing_leb); 135 157 vol->updating = 0; 158 + vfree(vol->upd_buf); 159 + } else if (vol->changing_leb) { 160 + dbg_msg("only %lld of %lld bytes received for atomic LEB change" 161 + " for volume %d:%d, cancel", vol->upd_received, 162 + vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 163 + vol->changing_leb = 0; 136 164 vfree(vol->upd_buf); 137 165 } 138 166 ··· 191 205 struct ubi_volume_desc *desc = file->private_data; 192 206 struct ubi_volume *vol = desc->vol; 193 207 struct ubi_device *ubi = vol->ubi; 194 - int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size; 208 + int err, lnum, off, len, tbuf_size; 195 209 size_t count_save = count; 196 210 void *tbuf; 197 211 uint64_t tmp; 198 212 199 213 dbg_msg("read %zd bytes from offset %lld of volume %d", 200 - count, *offp, vol_id); 214 + count, *offp, vol->vol_id); 201 215 202 216 if (vol->updating) { 203 217 dbg_err("updating"); ··· 211 225 return 0; 212 226 213 227 if (vol->corrupted) 214 - dbg_msg("read from corrupted volume %d", vol_id); 228 + dbg_msg("read from corrupted volume %d", vol->vol_id); 215 229 216 230 if (*offp + count > vol->used_bytes) 217 231 count_save = count = vol->used_bytes - *offp; ··· 235 249 if (off + len >= vol->usable_leb_size) 236 250 len = vol->usable_leb_size - off; 237 251 238 - err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0); 252 + err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0); 239 253 if (err) 240 254 break; 241 255 ··· 275 289 struct ubi_volume_desc *desc = file->private_data; 276 290 struct ubi_volume *vol = desc->vol; 277 291 struct ubi_device *ubi = vol->ubi; 278 - int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; 292 + int lnum, off, len, tbuf_size, err = 0; 279 293 size_t count_save = count; 280 294 char *tbuf; 281 295 uint64_t tmp; 282 296 283 297 dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 284 - count, *offp, desc->vol->vol_id); 298 + count, *offp, vol->vol_id); 285 299 286 300 if (vol->vol_type == UBI_STATIC_VOLUME) 287 301 return -EROFS; ··· 325 339 break; 326 340 } 327 341 328 - err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len, 342 + err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len, 329 343 UBI_UNKNOWN); 330 344 if (err) 331 345 break; ··· 358 372 struct ubi_volume *vol = desc->vol; 359 373 struct ubi_device *ubi = vol->ubi; 360 374 361 - if (!vol->updating) 375 + if (!vol->updating && !vol->changing_leb) 362 376 return vol_cdev_direct_write(file, buf, count, offp); 363 377 364 - err = ubi_more_update_data(ubi, vol->vol_id, buf, count); 378 + if (vol->updating) 379 + err = ubi_more_update_data(ubi, vol, buf, count); 380 + else 381 + err = ubi_more_leb_change_data(ubi, vol, buf, count); 382 + 365 383 if (err < 0) { 366 - ubi_err("cannot write %zd bytes of update data", count); 384 + ubi_err("cannot accept more %zd bytes of data, error %d", 385 + count, err); 367 386 return err; 368 387 } 369 388 370 389 if (err) { 371 390 /* 372 - * Update is finished, @err contains number of actually written 373 - * bytes now. 391 + * The operation is finished, @err contains number of actually 392 + * written bytes. 374 393 */ 375 394 count = err; 395 + 396 + if (vol->changing_leb) { 397 + revoke_exclusive(desc, UBI_READWRITE); 398 + return count; 399 + } 376 400 377 401 err = ubi_check_volume(ubi, vol->vol_id); 378 402 if (err < 0) ··· 398 402 revoke_exclusive(desc, UBI_READWRITE); 399 403 } 400 404 401 - *offp += count; 402 405 return count; 403 406 } 404 407 ··· 442 447 if (err < 0) 443 448 break; 444 449 445 - err = ubi_start_update(ubi, vol->vol_id, bytes); 450 + err = ubi_start_update(ubi, vol, bytes); 446 451 if (bytes == 0) 447 452 revoke_exclusive(desc, UBI_READWRITE); 453 + break; 454 + } 448 455 449 - file->f_pos = 0; 456 + /* Atomic logical eraseblock change command */ 457 + case UBI_IOCEBCH: 458 + { 459 + struct ubi_leb_change_req req; 460 + 461 + err = copy_from_user(&req, argp, 462 + sizeof(struct ubi_leb_change_req)); 463 + if (err) { 464 + err = -EFAULT; 465 + break; 466 + } 467 + 468 + if (desc->mode == UBI_READONLY || 469 + vol->vol_type == UBI_STATIC_VOLUME) { 470 + err = -EROFS; 471 + break; 472 + } 473 + 474 + /* Validate the request */ 475 + err = -EINVAL; 476 + if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || 477 + req.bytes < 0 || req.lnum >= vol->usable_leb_size) 478 + break; 479 + if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM && 480 + req.dtype != UBI_UNKNOWN) 481 + break; 482 + 483 + err = get_exclusive(desc); 484 + if (err < 0) 485 + break; 486 + 487 + err = ubi_start_leb_change(ubi, vol, &req); 488 + if (req.bytes == 0) 489 + revoke_exclusive(desc, UBI_READWRITE); 450 490 break; 451 491 } 452 492 ··· 497 467 break; 498 468 } 499 469 500 - if (desc->mode == UBI_READONLY) { 470 + if (desc->mode == UBI_READONLY || 471 + vol->vol_type == UBI_STATIC_VOLUME) { 501 472 err = -EROFS; 502 473 break; 503 474 } ··· 508 477 break; 509 478 } 510 479 511 - if (vol->vol_type != UBI_DYNAMIC_VOLUME) { 512 - err = -EROFS; 513 - break; 514 - } 515 - 516 480 dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 517 - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum); 481 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 518 482 if (err) 519 483 break; 520 484 ··· 606 580 if (!capable(CAP_SYS_RESOURCE)) 607 581 return -EPERM; 608 582 609 - ubi = major_to_device(imajor(inode)); 610 - if (IS_ERR(ubi)) 611 - return PTR_ERR(ubi); 583 + ubi = ubi_get_by_major(imajor(inode)); 584 + if (!ubi) 585 + return -ENODEV; 612 586 613 587 switch (cmd) { 614 588 /* Create volume command */ ··· 617 591 struct ubi_mkvol_req req; 618 592 619 593 dbg_msg("create volume"); 620 - err = copy_from_user(&req, argp, 621 - sizeof(struct ubi_mkvol_req)); 594 + err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 622 595 if (err) { 623 596 err = -EFAULT; 624 597 break; ··· 629 604 630 605 req.name[req.name_len] = '\0'; 631 606 607 + mutex_lock(&ubi->volumes_mutex); 632 608 err = ubi_create_volume(ubi, &req); 609 + mutex_unlock(&ubi->volumes_mutex); 633 610 if (err) 634 611 break; 635 612 ··· 660 633 break; 661 634 } 662 635 636 + mutex_lock(&ubi->volumes_mutex); 663 637 err = ubi_remove_volume(desc); 664 - if (err) 665 - ubi_close_volume(desc); 638 + mutex_unlock(&ubi->volumes_mutex); 666 639 640 + /* 641 + * The volume is deleted (unless an error occurred), and the 642 + * 'struct ubi_volume' object will be freed when 643 + * 'ubi_close_volume()' will call 'put_device()'. 644 + */ 645 + ubi_close_volume(desc); 667 646 break; 668 647 } 669 648 ··· 681 648 struct ubi_rsvol_req req; 682 649 683 650 dbg_msg("re-size volume"); 684 - err = copy_from_user(&req, argp, 685 - sizeof(struct ubi_rsvol_req)); 651 + err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 686 652 if (err) { 687 653 err = -EFAULT; 688 654 break; ··· 701 669 pebs = !!do_div(tmp, desc->vol->usable_leb_size); 702 670 pebs += tmp; 703 671 672 + mutex_lock(&ubi->volumes_mutex); 704 673 err = ubi_resize_volume(desc, pebs); 674 + mutex_unlock(&ubi->volumes_mutex); 705 675 ubi_close_volume(desc); 676 + break; 677 + } 678 + 679 + default: 680 + err = -ENOTTY; 681 + break; 682 + } 683 + 684 + ubi_put_device(ubi); 685 + return err; 686 + } 687 + 688 + static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, 689 + unsigned int cmd, unsigned long arg) 690 + { 691 + int err = 0; 692 + void __user *argp = (void __user *)arg; 693 + 694 + if (!capable(CAP_SYS_RESOURCE)) 695 + return -EPERM; 696 + 697 + switch (cmd) { 698 + /* Attach an MTD device command */ 699 + case UBI_IOCATT: 700 + { 701 + struct ubi_attach_req req; 702 + struct mtd_info *mtd; 703 + 704 + dbg_msg("attach MTD device"); 705 + err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 706 + if (err) { 707 + err = -EFAULT; 708 + break; 709 + } 710 + 711 + if (req.mtd_num < 0 || 712 + (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { 713 + err = -EINVAL; 714 + break; 715 + } 716 + 717 + mtd = get_mtd_device(NULL, req.mtd_num); 718 + if (IS_ERR(mtd)) { 719 + err = PTR_ERR(mtd); 720 + break; 721 + } 722 + 723 + /* 724 + * Note, further request verification is done by 725 + * 'ubi_attach_mtd_dev()'. 726 + */ 727 + mutex_lock(&ubi_devices_mutex); 728 + err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 729 + mutex_unlock(&ubi_devices_mutex); 730 + if (err < 0) 731 + put_mtd_device(mtd); 732 + else 733 + /* @err contains UBI device number */ 734 + err = put_user(err, (__user int32_t *)argp); 735 + 736 + break; 737 + } 738 + 739 + /* Detach an MTD device command */ 740 + case UBI_IOCDET: 741 + { 742 + int ubi_num; 743 + 744 + dbg_msg("dettach MTD device"); 745 + err = get_user(ubi_num, (__user int32_t *)argp); 746 + if (err) { 747 + err = -EFAULT; 748 + break; 749 + } 750 + 751 + mutex_lock(&ubi_devices_mutex); 752 + err = ubi_detach_mtd_dev(ubi_num, 0); 753 + mutex_unlock(&ubi_devices_mutex); 706 754 break; 707 755 } 708 756 ··· 793 681 794 682 return err; 795 683 } 684 + 685 + /* UBI control character device operations */ 686 + struct file_operations ubi_ctrl_cdev_operations = { 687 + .ioctl = ctrl_cdev_ioctl, 688 + .owner = THIS_MODULE, 689 + }; 796 690 797 691 /* UBI character device operations */ 798 692 struct file_operations ubi_cdev_operations = {
+7 -14
drivers/mtd/ubi/debug.h
··· 39 39 40 40 #ifdef CONFIG_MTD_UBI_DEBUG_MSG 41 41 /* Generic debugging message */ 42 - #define dbg_msg(fmt, ...) \ 43 - printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__) 42 + #define dbg_msg(fmt, ...) \ 43 + printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 44 + current->pid, __FUNCTION__, ##__VA_ARGS__) 44 45 45 46 #define ubi_dbg_dump_stack() dump_stack() 46 47 ··· 77 76 78 77 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 79 78 /* Messages from the eraseblock association unit */ 80 - #define dbg_eba(fmt, ...) \ 81 - printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \ 82 - ##__VA_ARGS__) 79 + #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 83 80 #else 84 81 #define dbg_eba(fmt, ...) ({}) 85 82 #endif 86 83 87 84 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 88 85 /* Messages from the wear-leveling unit */ 89 - #define dbg_wl(fmt, ...) \ 90 - printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \ 91 - ##__VA_ARGS__) 86 + #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 92 87 #else 93 88 #define dbg_wl(fmt, ...) ({}) 94 89 #endif 95 90 96 91 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 97 92 /* Messages from the input/output unit */ 98 - #define dbg_io(fmt, ...) \ 99 - printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \ 100 - ##__VA_ARGS__) 93 + #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 101 94 #else 102 95 #define dbg_io(fmt, ...) ({}) 103 96 #endif 104 97 105 98 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD 106 99 /* Initialization and build messages */ 107 - #define dbg_bld(fmt, ...) \ 108 - printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \ 109 - ##__VA_ARGS__) 100 + #define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 110 101 #else 111 102 #define dbg_bld(fmt, ...) ({}) 112 103 #endif
+175 -150
drivers/mtd/ubi/eba.c
··· 31 31 * logical eraseblock it is locked for reading or writing. The per-logical 32 32 * eraseblock locking is implemented by means of the lock tree. The lock tree 33 33 * is an RB-tree which refers all the currently locked logical eraseblocks. The 34 - * lock tree elements are &struct ltree_entry objects. They are indexed by 34 + * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by 35 35 * (@vol_id, @lnum) pairs. 36 36 * 37 37 * EBA also maintains the global sequence counter which is incremented each ··· 48 48 49 49 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 50 50 #define EBA_RESERVED_PEBS 1 51 - 52 - /** 53 - * struct ltree_entry - an entry in the lock tree. 54 - * @rb: links RB-tree nodes 55 - * @vol_id: volume ID of the locked logical eraseblock 56 - * @lnum: locked logical eraseblock number 57 - * @users: how many tasks are using this logical eraseblock or wait for it 58 - * @mutex: read/write mutex to implement read/write access serialization to 59 - * the (@vol_id, @lnum) logical eraseblock 60 - * 61 - * When a logical eraseblock is being locked - corresponding &struct ltree_entry 62 - * object is inserted to the lock tree (@ubi->ltree). 63 - */ 64 - struct ltree_entry { 65 - struct rb_node rb; 66 - int vol_id; 67 - int lnum; 68 - int users; 69 - struct rw_semaphore mutex; 70 - }; 71 - 72 - /* Slab cache for lock-tree entries */ 73 - static struct kmem_cache *ltree_slab; 74 51 75 52 /** 76 53 * next_sqnum - get next sequence number. ··· 78 101 */ 79 102 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 80 103 { 81 - if (vol_id == UBI_LAYOUT_VOL_ID) 104 + if (vol_id == UBI_LAYOUT_VOLUME_ID) 82 105 return UBI_LAYOUT_VOLUME_COMPAT; 83 106 return 0; 84 107 } ··· 89 112 * @vol_id: volume ID 90 113 * @lnum: logical eraseblock number 91 114 * 92 - * This function returns a pointer to the corresponding &struct ltree_entry 115 + * This function returns a pointer to the corresponding &struct ubi_ltree_entry 93 116 * object if the logical eraseblock is locked and %NULL if it is not. 94 117 * @ubi->ltree_lock has to be locked. 95 118 */ 96 - static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 97 - int lnum) 119 + static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 120 + int lnum) 98 121 { 99 122 struct rb_node *p; 100 123 101 124 p = ubi->ltree.rb_node; 102 125 while (p) { 103 - struct ltree_entry *le; 126 + struct ubi_ltree_entry *le; 104 127 105 - le = rb_entry(p, struct ltree_entry, rb); 128 + le = rb_entry(p, struct ubi_ltree_entry, rb); 106 129 107 130 if (vol_id < le->vol_id) 108 131 p = p->rb_left; ··· 132 155 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 133 156 * failed. 134 157 */ 135 - static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id, 136 - int lnum) 158 + static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 159 + int vol_id, int lnum) 137 160 { 138 - struct ltree_entry *le, *le1, *le_free; 161 + struct ubi_ltree_entry *le, *le1, *le_free; 139 162 140 - le = kmem_cache_alloc(ltree_slab, GFP_NOFS); 163 + le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 141 164 if (!le) 142 165 return ERR_PTR(-ENOMEM); 143 166 167 + le->users = 0; 168 + init_rwsem(&le->mutex); 144 169 le->vol_id = vol_id; 145 170 le->lnum = lnum; 146 171 ··· 168 189 p = &ubi->ltree.rb_node; 169 190 while (*p) { 170 191 parent = *p; 171 - le1 = rb_entry(parent, struct ltree_entry, rb); 192 + le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 172 193 173 194 if (vol_id < le1->vol_id) 174 195 p = &(*p)->rb_left; ··· 190 211 spin_unlock(&ubi->ltree_lock); 191 212 192 213 if (le_free) 193 - kmem_cache_free(ltree_slab, le_free); 214 + kfree(le_free); 194 215 195 216 return le; 196 217 } ··· 206 227 */ 207 228 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 208 229 { 209 - struct ltree_entry *le; 230 + struct ubi_ltree_entry *le; 210 231 211 232 le = ltree_add_entry(ubi, vol_id, lnum); 212 233 if (IS_ERR(le)) ··· 224 245 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 225 246 { 226 247 int free = 0; 227 - struct ltree_entry *le; 248 + struct ubi_ltree_entry *le; 228 249 229 250 spin_lock(&ubi->ltree_lock); 230 251 le = ltree_lookup(ubi, vol_id, lnum); ··· 238 259 239 260 up_read(&le->mutex); 240 261 if (free) 241 - kmem_cache_free(ltree_slab, le); 262 + kfree(le); 242 263 } 243 264 244 265 /** ··· 252 273 */ 253 274 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 254 275 { 255 - struct ltree_entry *le; 276 + struct ubi_ltree_entry *le; 256 277 257 278 le = ltree_add_entry(ubi, vol_id, lnum); 258 279 if (IS_ERR(le)) 259 280 return PTR_ERR(le); 260 281 down_write(&le->mutex); 261 282 return 0; 283 + } 284 + 285 + /** 286 + * leb_write_lock - lock logical eraseblock for writing. 287 + * @ubi: UBI device description object 288 + * @vol_id: volume ID 289 + * @lnum: logical eraseblock number 290 + * 291 + * This function locks a logical eraseblock for writing if there is no 292 + * contention and does nothing if there is contention. Returns %0 in case of 293 + * success, %1 in case of contention, and and a negative error code in case of 294 + * failure. 295 + */ 296 + static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 297 + { 298 + int free; 299 + struct ubi_ltree_entry *le; 300 + 301 + le = ltree_add_entry(ubi, vol_id, lnum); 302 + if (IS_ERR(le)) 303 + return PTR_ERR(le); 304 + if (down_write_trylock(&le->mutex)) 305 + return 0; 306 + 307 + /* Contention, cancel */ 308 + spin_lock(&ubi->ltree_lock); 309 + le->users -= 1; 310 + ubi_assert(le->users >= 0); 311 + if (le->users == 0) { 312 + rb_erase(&le->rb, &ubi->ltree); 313 + free = 1; 314 + } else 315 + free = 0; 316 + spin_unlock(&ubi->ltree_lock); 317 + if (free) 318 + kfree(le); 319 + 320 + return 1; 262 321 } 263 322 264 323 /** ··· 308 291 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 309 292 { 310 293 int free; 311 - struct ltree_entry *le; 294 + struct ubi_ltree_entry *le; 312 295 313 296 spin_lock(&ubi->ltree_lock); 314 297 le = ltree_lookup(ubi, vol_id, lnum); ··· 323 306 324 307 up_write(&le->mutex); 325 308 if (free) 326 - kmem_cache_free(ltree_slab, le); 309 + kfree(le); 327 310 } 328 311 329 312 /** 330 313 * ubi_eba_unmap_leb - un-map logical eraseblock. 331 314 * @ubi: UBI device description object 332 - * @vol_id: volume ID 315 + * @vol: volume description object 333 316 * @lnum: logical eraseblock number 334 317 * 335 318 * This function un-maps logical eraseblock @lnum and schedules corresponding 336 319 * physical eraseblock for erasure. Returns zero in case of success and a 337 320 * negative error code in case of failure. 338 321 */ 339 - int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum) 322 + int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 323 + int lnum) 340 324 { 341 - int idx = vol_id2idx(ubi, vol_id), err, pnum; 342 - struct ubi_volume *vol = ubi->volumes[idx]; 325 + int err, pnum, vol_id = vol->vol_id; 343 326 344 327 if (ubi->ro_mode) 345 328 return -EROFS; ··· 366 349 /** 367 350 * ubi_eba_read_leb - read data. 368 351 * @ubi: UBI device description object 369 - * @vol_id: volume ID 352 + * @vol: volume description object 370 353 * @lnum: logical eraseblock number 371 354 * @buf: buffer to store the read data 372 355 * @offset: offset from where to read ··· 382 365 * returned for any volume type if an ECC error was detected by the MTD device 383 366 * driver. Other negative error cored may be returned in case of other errors. 384 367 */ 385 - int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 386 - int offset, int len, int check) 368 + int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 369 + void *buf, int offset, int len, int check) 387 370 { 388 - int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 371 + int err, pnum, scrub = 0, vol_id = vol->vol_id; 389 372 struct ubi_vid_hdr *vid_hdr; 390 - struct ubi_volume *vol = ubi->volumes[idx]; 391 373 uint32_t uninitialized_var(crc); 392 374 393 375 err = leb_read_lock(ubi, vol_id, lnum); ··· 594 578 /** 595 579 * ubi_eba_write_leb - write data to dynamic volume. 596 580 * @ubi: UBI device description object 597 - * @vol_id: volume ID 581 + * @vol: volume description object 598 582 * @lnum: logical eraseblock number 599 583 * @buf: the data to write 600 584 * @offset: offset within the logical eraseblock where to write ··· 602 586 * @dtype: data type 603 587 * 604 588 * This function writes data to logical eraseblock @lnum of a dynamic volume 605 - * @vol_id. Returns zero in case of success and a negative error code in case 589 + * @vol. Returns zero in case of success and a negative error code in case 606 590 * of failure. In case of error, it is possible that something was still 607 591 * written to the flash media, but may be some garbage. 608 592 */ 609 - int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 593 + int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 610 594 const void *buf, int offset, int len, int dtype) 611 595 { 612 - int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0; 613 - struct ubi_volume *vol = ubi->volumes[idx]; 596 + int err, pnum, tries = 0, vol_id = vol->vol_id; 614 597 struct ubi_vid_hdr *vid_hdr; 615 598 616 599 if (ubi->ro_mode) ··· 628 613 if (err) { 629 614 ubi_warn("failed to write data to PEB %d", pnum); 630 615 if (err == -EIO && ubi->bad_allowed) 631 - err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len); 616 + err = recover_peb(ubi, pnum, vol_id, lnum, buf, 617 + offset, len); 632 618 if (err) 633 619 ubi_ro_mode(ubi); 634 620 } ··· 672 656 goto write_error; 673 657 } 674 658 675 - err = ubi_io_write_data(ubi, buf, pnum, offset, len); 676 - if (err) { 677 - ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, " 678 - "PEB %d", len, offset, vol_id, lnum, pnum); 679 - goto write_error; 659 + if (len) { 660 + err = ubi_io_write_data(ubi, buf, pnum, offset, len); 661 + if (err) { 662 + ubi_warn("failed to write %d bytes at offset %d of " 663 + "LEB %d:%d, PEB %d", len, offset, vol_id, 664 + lnum, pnum); 665 + goto write_error; 666 + } 680 667 } 681 668 682 669 vol->eba_tbl[lnum] = pnum; ··· 717 698 /** 718 699 * ubi_eba_write_leb_st - write data to static volume. 719 700 * @ubi: UBI device description object 720 - * @vol_id: volume ID 701 + * @vol: volume description object 721 702 * @lnum: logical eraseblock number 722 703 * @buf: data to write 723 704 * @len: how many bytes to write ··· 725 706 * @used_ebs: how many logical eraseblocks will this volume contain 726 707 * 727 708 * This function writes data to logical eraseblock @lnum of static volume 728 - * @vol_id. The @used_ebs argument should contain total number of logical 709 + * @vol. The @used_ebs argument should contain total number of logical 729 710 * eraseblock in this static volume. 730 711 * 731 712 * When writing to the last logical eraseblock, the @len argument doesn't have ··· 737 718 * volumes. This function returns zero in case of success and a negative error 738 719 * code in case of failure. 739 720 */ 740 - int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 741 - const void *buf, int len, int dtype, int used_ebs) 721 + int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 722 + int lnum, const void *buf, int len, int dtype, 723 + int used_ebs) 742 724 { 743 - int err, pnum, tries = 0, data_size = len; 744 - int idx = vol_id2idx(ubi, vol_id); 745 - struct ubi_volume *vol = ubi->volumes[idx]; 725 + int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 746 726 struct ubi_vid_hdr *vid_hdr; 747 727 uint32_t crc; 748 728 ··· 837 819 /* 838 820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 839 821 * @ubi: UBI device description object 840 - * @vol_id: volume ID 822 + * @vol: volume description object 841 823 * @lnum: logical eraseblock number 842 824 * @buf: data to write 843 825 * @len: how many bytes to write ··· 852 834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 853 835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 854 836 */ 855 - int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 856 - const void *buf, int len, int dtype) 837 + int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 838 + int lnum, const void *buf, int len, int dtype) 857 839 { 858 - int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id); 859 - struct ubi_volume *vol = ubi->volumes[idx]; 840 + int err, pnum, tries = 0, vol_id = vol->vol_id; 860 841 struct ubi_vid_hdr *vid_hdr; 861 842 uint32_t crc; 862 843 863 844 if (ubi->ro_mode) 864 845 return -EROFS; 846 + 847 + if (len == 0) { 848 + /* 849 + * Special case when data length is zero. In this case the LEB 850 + * has to be unmapped and mapped somewhere else. 851 + */ 852 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 853 + if (err) 854 + return err; 855 + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); 856 + } 865 857 866 858 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 867 859 if (!vid_hdr) ··· 956 928 } 957 929 958 930 /** 959 - * ltree_entry_ctor - lock tree entries slab cache constructor. 960 - * @obj: the lock-tree entry to construct 961 - * @cache: the lock tree entry slab cache 962 - * @flags: constructor flags 963 - */ 964 - static void ltree_entry_ctor(struct kmem_cache *cache, void *obj) 965 - { 966 - struct ltree_entry *le = obj; 967 - 968 - le->users = 0; 969 - init_rwsem(&le->mutex); 970 - } 971 - 972 - /** 973 931 * ubi_eba_copy_leb - copy logical eraseblock. 974 932 * @ubi: UBI device description object 975 933 * @from: physical eraseblock number from where to copy ··· 964 950 * 965 951 * This function copies logical eraseblock from physical eraseblock @from to 966 952 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 967 - * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation 968 - * was canceled because bit-flips were detected at the target PEB, and a 969 - * negative error code in case of failure. 953 + * function. Returns: 954 + * o %0 in case of success; 955 + * o %1 if the operation was canceled and should be tried later (e.g., 956 + * because a bit-flip was detected at the target PEB); 957 + * o %2 if the volume is being deleted and this LEB should not be moved. 970 958 */ 971 959 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 972 960 struct ubi_vid_hdr *vid_hdr) 973 961 { 974 - int err, vol_id, lnum, data_size, aldata_size, pnum, idx; 962 + int err, vol_id, lnum, data_size, aldata_size, idx; 975 963 struct ubi_volume *vol; 976 964 uint32_t crc; 977 965 ··· 989 973 data_size = aldata_size = 990 974 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 991 975 992 - /* 993 - * We do not want anybody to write to this logical eraseblock while we 994 - * are moving it, so we lock it. 995 - */ 996 - err = leb_write_lock(ubi, vol_id, lnum); 997 - if (err) 998 - return err; 999 - 1000 - mutex_lock(&ubi->buf_mutex); 1001 - 1002 - /* 1003 - * But the logical eraseblock might have been put by this time. 1004 - * Cancel if it is true. 1005 - */ 1006 976 idx = vol_id2idx(ubi, vol_id); 1007 - 1008 - /* 1009 - * We may race with volume deletion/re-size, so we have to hold 1010 - * @ubi->volumes_lock. 1011 - */ 1012 977 spin_lock(&ubi->volumes_lock); 978 + /* 979 + * Note, we may race with volume deletion, which means that the volume 980 + * this logical eraseblock belongs to might be being deleted. Since the 981 + * volume deletion unmaps all the volume's logical eraseblocks, it will 982 + * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 983 + */ 1013 984 vol = ubi->volumes[idx]; 1014 985 if (!vol) { 1015 - dbg_eba("volume %d was removed meanwhile", vol_id); 986 + /* No need to do further work, cancel */ 987 + dbg_eba("volume %d is being removed, cancel", vol_id); 1016 988 spin_unlock(&ubi->volumes_lock); 1017 - goto out_unlock; 1018 - } 1019 - 1020 - pnum = vol->eba_tbl[lnum]; 1021 - if (pnum != from) { 1022 - dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1023 - "PEB %d, cancel", vol_id, lnum, from, pnum); 1024 - spin_unlock(&ubi->volumes_lock); 1025 - goto out_unlock; 989 + return 2; 1026 990 } 1027 991 spin_unlock(&ubi->volumes_lock); 1028 992 1029 - /* OK, now the LEB is locked and we can safely start moving it */ 993 + /* 994 + * We do not want anybody to write to this logical eraseblock while we 995 + * are moving it, so lock it. 996 + * 997 + * Note, we are using non-waiting locking here, because we cannot sleep 998 + * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 999 + * unmapping the LEB which is mapped to the PEB we are going to move 1000 + * (@from). This task locks the LEB and goes sleep in the 1001 + * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1002 + * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1003 + * LEB is already locked, we just do not move it and return %1. 1004 + */ 1005 + err = leb_write_trylock(ubi, vol_id, lnum); 1006 + if (err) { 1007 + dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); 1008 + return err; 1009 + } 1030 1010 1011 + /* 1012 + * The LEB might have been put meanwhile, and the task which put it is 1013 + * probably waiting on @ubi->move_mutex. No need to continue the work, 1014 + * cancel it. 1015 + */ 1016 + if (vol->eba_tbl[lnum] != from) { 1017 + dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1018 + "PEB %d, cancel", vol_id, lnum, from, 1019 + vol->eba_tbl[lnum]); 1020 + err = 1; 1021 + goto out_unlock_leb; 1022 + } 1023 + 1024 + /* 1025 + * OK, now the LEB is locked and we can safely start moving iy. Since 1026 + * this function utilizes thie @ubi->peb1_buf buffer which is shared 1027 + * with some other functions, so lock the buffer by taking the 1028 + * @ubi->buf_mutex. 1029 + */ 1030 + mutex_lock(&ubi->buf_mutex); 1031 1031 dbg_eba("read %d bytes of data", aldata_size); 1032 1032 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1033 1033 if (err && err != UBI_IO_BITFLIPS) { 1034 1034 ubi_warn("error %d while reading data from PEB %d", 1035 1035 err, from); 1036 - goto out_unlock; 1036 + goto out_unlock_buf; 1037 1037 } 1038 1038 1039 1039 /* ··· 1085 1053 1086 1054 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1087 1055 if (err) 1088 - goto out_unlock; 1056 + goto out_unlock_buf; 1089 1057 1090 1058 cond_resched(); 1091 1059 ··· 1094 1062 if (err) { 1095 1063 if (err != UBI_IO_BITFLIPS) 1096 1064 ubi_warn("cannot read VID header back from PEB %d", to); 1097 - goto out_unlock; 1065 + else 1066 + err = 1; 1067 + goto out_unlock_buf; 1098 1068 } 1099 1069 1100 1070 if (data_size > 0) { 1101 1071 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1102 1072 if (err) 1103 - goto out_unlock; 1073 + goto out_unlock_buf; 1104 1074 1105 1075 cond_resched(); 1106 1076 ··· 1116 1082 if (err != UBI_IO_BITFLIPS) 1117 1083 ubi_warn("cannot read data back from PEB %d", 1118 1084 to); 1119 - goto out_unlock; 1085 + else 1086 + err = 1; 1087 + goto out_unlock_buf; 1120 1088 } 1121 1089 1122 1090 cond_resched(); ··· 1126 1090 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1127 1091 ubi_warn("read data back from PEB %d - it is different", 1128 1092 to); 1129 - goto out_unlock; 1093 + goto out_unlock_buf; 1130 1094 } 1131 1095 } 1132 1096 1133 1097 ubi_assert(vol->eba_tbl[lnum] == from); 1134 1098 vol->eba_tbl[lnum] = to; 1135 1099 1136 - out_unlock: 1100 + out_unlock_buf: 1137 1101 mutex_unlock(&ubi->buf_mutex); 1102 + out_unlock_leb: 1138 1103 leb_write_unlock(ubi, vol_id, lnum); 1139 1104 return err; 1140 1105 } ··· 1161 1124 spin_lock_init(&ubi->ltree_lock); 1162 1125 mutex_init(&ubi->alc_mutex); 1163 1126 ubi->ltree = RB_ROOT; 1164 - 1165 - if (ubi_devices_cnt == 0) { 1166 - ltree_slab = kmem_cache_create("ubi_ltree_slab", 1167 - sizeof(struct ltree_entry), 0, 1168 - 0, &ltree_entry_ctor); 1169 - if (!ltree_slab) 1170 - return -ENOMEM; 1171 - } 1172 1127 1173 1128 ubi->global_sqnum = si->max_sqnum + 1; 1174 1129 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; ··· 1197 1168 } 1198 1169 } 1199 1170 1171 + if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1172 + ubi_err("no enough physical eraseblocks (%d, need %d)", 1173 + ubi->avail_pebs, EBA_RESERVED_PEBS); 1174 + err = -ENOSPC; 1175 + goto out_free; 1176 + } 1177 + ubi->avail_pebs -= EBA_RESERVED_PEBS; 1178 + ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1179 + 1200 1180 if (ubi->bad_allowed) { 1201 1181 ubi_calculate_reserved(ubi); 1202 1182 ··· 1222 1184 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1223 1185 } 1224 1186 1225 - if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1226 - ubi_err("no enough physical eraseblocks (%d, need %d)", 1227 - ubi->avail_pebs, EBA_RESERVED_PEBS); 1228 - err = -ENOSPC; 1229 - goto out_free; 1230 - } 1231 - ubi->avail_pebs -= EBA_RESERVED_PEBS; 1232 - ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1233 - 1234 1187 dbg_eba("EBA unit is initialized"); 1235 1188 return 0; 1236 1189 ··· 1231 1202 continue; 1232 1203 kfree(ubi->volumes[i]->eba_tbl); 1233 1204 } 1234 - if (ubi_devices_cnt == 0) 1235 - kmem_cache_destroy(ltree_slab); 1236 1205 return err; 1237 1206 } 1238 1207 ··· 1249 1222 continue; 1250 1223 kfree(ubi->volumes[i]->eba_tbl); 1251 1224 } 1252 - if (ubi_devices_cnt == 1) 1253 - kmem_cache_destroy(ltree_slab); 1254 1225 }
+4 -5
drivers/mtd/ubi/gluebi.c
··· 129 129 if (to_read > total_read) 130 130 to_read = total_read; 131 131 132 - err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs, 133 - to_read, 0); 132 + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); 134 133 if (err) 135 134 break; 136 135 ··· 186 187 if (to_write > total_written) 187 188 to_write = total_written; 188 189 189 - err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs, 190 - to_write, UBI_UNKNOWN); 190 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, 191 + UBI_UNKNOWN); 191 192 if (err) 192 193 break; 193 194 ··· 236 237 return -EROFS; 237 238 238 239 for (i = 0; i < count; i++) { 239 - err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i); 240 + err = ubi_eba_unmap_leb(ubi, vol, lnum + i); 240 241 if (err) 241 242 goto out_err; 242 243 }
+10
drivers/mtd/ubi/io.c
··· 173 173 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 174 174 "read %zd bytes", err, len, pnum, offset, read); 175 175 ubi_dbg_dump_stack(); 176 + 177 + /* 178 + * The driver should never return -EBADMSG if it failed to read 179 + * all the requested data. But some buggy drivers might do 180 + * this, so we change it to -EIO. 181 + */ 182 + if (read != len && err == -EBADMSG) { 183 + ubi_assert(0); 184 + err = -EIO; 185 + } 176 186 } else { 177 187 ubi_assert(len == read); 178 188
+116 -61
drivers/mtd/ubi/kapi.c
··· 30 30 * @ubi_num: UBI device number 31 31 * @di: the information is stored here 32 32 * 33 - * This function returns %0 in case of success and a %-ENODEV if there is no 34 - * such UBI device. 33 + * This function returns %0 in case of success, %-EINVAL if the UBI device 34 + * number is invalid, and %-ENODEV if there is no such UBI device. 35 35 */ 36 36 int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) 37 37 { 38 - const struct ubi_device *ubi; 38 + struct ubi_device *ubi; 39 39 40 - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || 41 - !ubi_devices[ubi_num]) 40 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 41 + return -EINVAL; 42 + 43 + ubi = ubi_get_device(ubi_num); 44 + if (!ubi) 42 45 return -ENODEV; 43 46 44 - ubi = ubi_devices[ubi_num]; 45 47 di->ubi_num = ubi->ubi_num; 46 48 di->leb_size = ubi->leb_size; 47 49 di->min_io_size = ubi->min_io_size; 48 50 di->ro_mode = ubi->ro_mode; 49 - di->cdev = MKDEV(ubi->major, 0); 51 + di->cdev = ubi->cdev.dev; 52 + 53 + ubi_put_device(ubi); 50 54 return 0; 51 55 } 52 56 EXPORT_SYMBOL_GPL(ubi_get_device_info); ··· 77 73 vi->usable_leb_size = vol->usable_leb_size; 78 74 vi->name_len = vol->name_len; 79 75 vi->name = vol->name; 80 - vi->cdev = MKDEV(ubi->major, vi->vol_id + 1); 76 + vi->cdev = vol->cdev.dev; 81 77 } 82 78 EXPORT_SYMBOL_GPL(ubi_get_volume_info); 83 79 ··· 108 104 109 105 dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 110 106 111 - err = -ENODEV; 112 - if (ubi_num < 0) 113 - return ERR_PTR(err); 107 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 108 + return ERR_PTR(-EINVAL); 114 109 115 - ubi = ubi_devices[ubi_num]; 116 - 117 - if (!try_module_get(THIS_MODULE)) 118 - return ERR_PTR(err); 119 - 120 - if (ubi_num >= UBI_MAX_DEVICES || !ubi) 121 - goto out_put; 122 - 123 - err = -EINVAL; 124 - if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 125 - goto out_put; 126 110 if (mode != UBI_READONLY && mode != UBI_READWRITE && 127 111 mode != UBI_EXCLUSIVE) 128 - goto out_put; 112 + return ERR_PTR(-EINVAL); 113 + 114 + /* 115 + * First of all, we have to get the UBI device to prevent its removal. 116 + */ 117 + ubi = ubi_get_device(ubi_num); 118 + if (!ubi) 119 + return ERR_PTR(-ENODEV); 120 + 121 + if (vol_id < 0 || vol_id >= ubi->vtbl_slots) { 122 + err = -EINVAL; 123 + goto out_put_ubi; 124 + } 129 125 130 126 desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); 131 127 if (!desc) { 132 128 err = -ENOMEM; 133 - goto out_put; 129 + goto out_put_ubi; 134 130 } 131 + 132 + err = -ENODEV; 133 + if (!try_module_get(THIS_MODULE)) 134 + goto out_free; 135 135 136 136 spin_lock(&ubi->volumes_lock); 137 137 vol = ubi->volumes[vol_id]; 138 - if (!vol) { 139 - err = -ENODEV; 138 + if (!vol) 140 139 goto out_unlock; 141 - } 142 140 143 141 err = -EBUSY; 144 142 switch (mode) { ··· 162 156 vol->exclusive = 1; 163 157 break; 164 158 } 159 + get_device(&vol->dev); 160 + vol->ref_count += 1; 165 161 spin_unlock(&ubi->volumes_lock); 166 162 167 163 desc->vol = vol; 168 164 desc->mode = mode; 169 165 170 - /* 171 - * To prevent simultaneous checks of the same volume we use @vtbl_mutex, 172 - * although it is not the purpose it was introduced for. 173 - */ 174 - mutex_lock(&ubi->vtbl_mutex); 166 + mutex_lock(&ubi->ckvol_mutex); 175 167 if (!vol->checked) { 176 168 /* This is the first open - check the volume */ 177 169 err = ubi_check_volume(ubi, vol_id); 178 170 if (err < 0) { 179 - mutex_unlock(&ubi->vtbl_mutex); 171 + mutex_unlock(&ubi->ckvol_mutex); 180 172 ubi_close_volume(desc); 181 173 return ERR_PTR(err); 182 174 } ··· 185 181 } 186 182 vol->checked = 1; 187 183 } 188 - mutex_unlock(&ubi->vtbl_mutex); 184 + mutex_unlock(&ubi->ckvol_mutex); 185 + 189 186 return desc; 190 187 191 188 out_unlock: 192 189 spin_unlock(&ubi->volumes_lock); 193 - kfree(desc); 194 - out_put: 195 190 module_put(THIS_MODULE); 191 + out_free: 192 + kfree(desc); 193 + out_put_ubi: 194 + ubi_put_device(ubi); 196 195 return ERR_PTR(err); 197 196 } 198 197 EXPORT_SYMBOL_GPL(ubi_open_volume); ··· 212 205 int mode) 213 206 { 214 207 int i, vol_id = -1, len; 215 - struct ubi_volume_desc *ret; 216 208 struct ubi_device *ubi; 209 + struct ubi_volume_desc *ret; 217 210 218 211 dbg_msg("open volume %s, mode %d", name, mode); 219 212 ··· 224 217 if (len > UBI_VOL_NAME_MAX) 225 218 return ERR_PTR(-EINVAL); 226 219 227 - ret = ERR_PTR(-ENODEV); 228 - if (!try_module_get(THIS_MODULE)) 229 - return ret; 220 + if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 221 + return ERR_PTR(-EINVAL); 230 222 231 - if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num]) 232 - goto out_put; 233 - 234 - ubi = ubi_devices[ubi_num]; 223 + ubi = ubi_get_device(ubi_num); 224 + if (!ubi) 225 + return ERR_PTR(-ENODEV); 235 226 236 227 spin_lock(&ubi->volumes_lock); 237 228 /* Walk all volumes of this UBI device */ ··· 243 238 } 244 239 spin_unlock(&ubi->volumes_lock); 245 240 246 - if (vol_id < 0) 247 - goto out_put; 241 + if (vol_id >= 0) 242 + ret = ubi_open_volume(ubi_num, vol_id, mode); 243 + else 244 + ret = ERR_PTR(-ENODEV); 248 245 249 - ret = ubi_open_volume(ubi_num, vol_id, mode); 250 - 251 - out_put: 252 - module_put(THIS_MODULE); 246 + /* 247 + * We should put the UBI device even in case of success, because 248 + * 'ubi_open_volume()' took a reference as well. 249 + */ 250 + ubi_put_device(ubi); 253 251 return ret; 254 252 } 255 253 EXPORT_SYMBOL_GPL(ubi_open_volume_nm); ··· 264 256 void ubi_close_volume(struct ubi_volume_desc *desc) 265 257 { 266 258 struct ubi_volume *vol = desc->vol; 259 + struct ubi_device *ubi = vol->ubi; 267 260 268 261 dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 269 262 270 - spin_lock(&vol->ubi->volumes_lock); 263 + spin_lock(&ubi->volumes_lock); 271 264 switch (desc->mode) { 272 265 case UBI_READONLY: 273 266 vol->readers -= 1; ··· 279 270 case UBI_EXCLUSIVE: 280 271 vol->exclusive = 0; 281 272 } 282 - spin_unlock(&vol->ubi->volumes_lock); 273 + vol->ref_count -= 1; 274 + spin_unlock(&ubi->volumes_lock); 283 275 284 276 kfree(desc); 277 + put_device(&vol->dev); 278 + ubi_put_device(ubi); 285 279 module_put(THIS_MODULE); 286 280 } 287 281 EXPORT_SYMBOL_GPL(ubi_close_volume); ··· 344 332 if (len == 0) 345 333 return 0; 346 334 347 - err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check); 335 + err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 348 336 if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { 349 337 ubi_warn("mark volume %d as corrupted", vol_id); 350 338 vol->corrupted = 1; ··· 411 399 if (len == 0) 412 400 return 0; 413 401 414 - return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype); 402 + return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype); 415 403 } 416 404 EXPORT_SYMBOL_GPL(ubi_leb_write); 417 405 ··· 460 448 if (len == 0) 461 449 return 0; 462 450 463 - return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype); 451 + return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype); 464 452 } 465 453 EXPORT_SYMBOL_GPL(ubi_leb_change); 466 454 ··· 480 468 { 481 469 struct ubi_volume *vol = desc->vol; 482 470 struct ubi_device *ubi = vol->ubi; 483 - int err, vol_id = vol->vol_id; 471 + int err; 484 472 485 - dbg_msg("erase LEB %d:%d", vol_id, lnum); 473 + dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 486 474 487 475 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 488 476 return -EROFS; ··· 493 481 if (vol->upd_marker) 494 482 return -EBADF; 495 483 496 - err = ubi_eba_unmap_leb(ubi, vol_id, lnum); 484 + err = ubi_eba_unmap_leb(ubi, vol, lnum); 497 485 if (err) 498 486 return err; 499 487 ··· 541 529 { 542 530 struct ubi_volume *vol = desc->vol; 543 531 struct ubi_device *ubi = vol->ubi; 544 - int vol_id = vol->vol_id; 545 532 546 - dbg_msg("unmap LEB %d:%d", vol_id, lnum); 533 + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 547 534 548 535 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 549 536 return -EROFS; ··· 553 542 if (vol->upd_marker) 554 543 return -EBADF; 555 544 556 - return ubi_eba_unmap_leb(ubi, vol_id, lnum); 545 + return ubi_eba_unmap_leb(ubi, vol, lnum); 557 546 } 558 547 EXPORT_SYMBOL_GPL(ubi_leb_unmap); 548 + 549 + /** 550 + * ubi_leb_map - map logical erasblock to a physical eraseblock. 551 + * @desc: volume descriptor 552 + * @lnum: logical eraseblock number 553 + * @dtype: expected data type 554 + * 555 + * This function maps an un-mapped logical eraseblock @lnum to a physical 556 + * eraseblock. This means, that after a successfull invocation of this 557 + * function the logical eraseblock @lnum will be empty (contain only %0xFF 558 + * bytes) and be mapped to a physical eraseblock, even if an unclean reboot 559 + * happens. 560 + * 561 + * This function returns zero in case of success, %-EBADF if the volume is 562 + * damaged because of an interrupted update, %-EBADMSG if the logical 563 + * eraseblock is already mapped, and other negative error codes in case of 564 + * other failures. 565 + */ 566 + int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) 567 + { 568 + struct ubi_volume *vol = desc->vol; 569 + struct ubi_device *ubi = vol->ubi; 570 + 571 + dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 572 + 573 + if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 574 + return -EROFS; 575 + 576 + if (lnum < 0 || lnum >= vol->reserved_pebs) 577 + return -EINVAL; 578 + 579 + if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && 580 + dtype != UBI_UNKNOWN) 581 + return -EINVAL; 582 + 583 + if (vol->upd_marker) 584 + return -EBADF; 585 + 586 + if (vol->eba_tbl[lnum] >= 0) 587 + return -EBADMSG; 588 + 589 + return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); 590 + } 591 + EXPORT_SYMBOL_GPL(ubi_leb_map); 559 592 560 593 /** 561 594 * ubi_is_mapped - check if logical eraseblock is mapped.
+1 -1
drivers/mtd/ubi/misc.c
··· 79 79 else 80 80 size = vol->usable_leb_size; 81 81 82 - err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1); 82 + err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); 83 83 if (err) { 84 84 if (err == -EBADMSG) 85 85 err = 1;
+8 -4
drivers/mtd/ubi/scan.c
··· 286 286 * FIXME: but this is anyway obsolete and will be removed at 287 287 * some point. 288 288 */ 289 - 290 289 dbg_bld("using old crappy leb_ver stuff"); 290 + 291 + if (v1 == v2) { 292 + ubi_err("PEB %d and PEB %d have the same version %lld", 293 + seb->pnum, pnum, v1); 294 + return -EINVAL; 295 + } 291 296 292 297 abs = v1 - v2; 293 298 if (abs < 0) ··· 395 390 vfree(buf); 396 391 out_free_vidh: 397 392 ubi_free_vid_hdr(ubi, vh); 398 - ubi_assert(err < 0); 399 393 return err; 400 394 } 401 395 ··· 773 769 */ 774 770 static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 775 771 { 776 - long long ec; 772 + long long uninitialized_var(ec); 777 773 int err, bitflips = 0, vol_id, ec_corr = 0; 778 774 779 775 dbg_bld("scan PEB %d", pnum); ··· 858 854 } 859 855 860 856 vol_id = be32_to_cpu(vidh->vol_id); 861 - if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) { 857 + if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { 862 858 int lnum = be32_to_cpu(vidh->lnum); 863 859 864 860 /* Unsupported internal volume */
+124 -47
drivers/mtd/ubi/ubi.h
··· 94 94 UBI_IO_BITFLIPS 95 95 }; 96 96 97 - extern int ubi_devices_cnt; 98 - extern struct ubi_device *ubi_devices[]; 97 + /** 98 + * struct ubi_wl_entry - wear-leveling entry. 99 + * @rb: link in the corresponding RB-tree 100 + * @ec: erase counter 101 + * @pnum: physical eraseblock number 102 + * 103 + * This data structure is used in the WL unit. Each physical eraseblock has a 104 + * corresponding &struct wl_entry object which may be kept in different 105 + * RB-trees. See WL unit for details. 106 + */ 107 + struct ubi_wl_entry { 108 + struct rb_node rb; 109 + int ec; 110 + int pnum; 111 + }; 112 + 113 + /** 114 + * struct ubi_ltree_entry - an entry in the lock tree. 115 + * @rb: links RB-tree nodes 116 + * @vol_id: volume ID of the locked logical eraseblock 117 + * @lnum: locked logical eraseblock number 118 + * @users: how many tasks are using this logical eraseblock or wait for it 119 + * @mutex: read/write mutex to implement read/write access serialization to 120 + * the (@vol_id, @lnum) logical eraseblock 121 + * 122 + * This data structure is used in the EBA unit to implement per-LEB locking. 123 + * When a logical eraseblock is being locked - corresponding 124 + * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). 125 + * See EBA unit for details. 126 + */ 127 + struct ubi_ltree_entry { 128 + struct rb_node rb; 129 + int vol_id; 130 + int lnum; 131 + int users; 132 + struct rw_semaphore mutex; 133 + }; 99 134 100 135 struct ubi_volume_desc; 101 136 ··· 140 105 * @cdev: character device object to create character device 141 106 * @ubi: reference to the UBI device description object 142 107 * @vol_id: volume ID 108 + * @ref_count: volume reference count 143 109 * @readers: number of users holding this volume in read-only mode 144 110 * @writers: number of users holding this volume in read-write mode 145 111 * @exclusive: whether somebody holds this volume in exclusive mode 146 - * @removed: if the volume was removed 147 - * @checked: if this static volume was checked 148 112 * 149 113 * @reserved_pebs: how many physical eraseblocks are reserved for this volume 150 114 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) ··· 151 117 * @used_ebs: how many logical eraseblocks in this volume contain data 152 118 * @last_eb_bytes: how many bytes are stored in the last logical eraseblock 153 119 * @used_bytes: how many bytes of data this volume contains 154 - * @upd_marker: non-zero if the update marker is set for this volume 155 - * @corrupted: non-zero if the volume is corrupted (static volumes only) 156 120 * @alignment: volume alignment 157 121 * @data_pad: how many bytes are not used at the end of physical eraseblocks to 158 - * satisfy the requested alignment 122 + * satisfy the requested alignment 159 123 * @name_len: volume name length 160 124 * @name: volume name 161 125 * 162 - * @updating: whether the volume is being updated 163 126 * @upd_ebs: how many eraseblocks are expected to be updated 164 - * @upd_bytes: how many bytes are expected to be received 165 - * @upd_received: how many update bytes were already received 166 - * @upd_buf: update buffer which is used to collect update data 127 + * @ch_lnum: LEB number which is being changing by the atomic LEB change 128 + * operation 129 + * @ch_dtype: data persistency type which is being changing by the atomic LEB 130 + * change operation 131 + * @upd_bytes: how many bytes are expected to be received for volume update or 132 + * atomic LEB change 133 + * @upd_received: how many bytes were already received for volume update or 134 + * atomic LEB change 135 + * @upd_buf: update buffer which is used to collect update data or data for 136 + * atomic LEB change 167 137 * 168 138 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 139 + * @checked: %1 if this static volume was checked 140 + * @corrupted: %1 if the volume is corrupted (static volumes only) 141 + * @upd_marker: %1 if the update marker is set for this volume 142 + * @updating: %1 if the volume is being updated 143 + * @changing_leb: %1 if the atomic LEB change ioctl command is in progress 169 144 * 170 145 * @gluebi_desc: gluebi UBI volume descriptor 171 146 * @gluebi_refcount: reference count of the gluebi MTD device ··· 193 150 struct cdev cdev; 194 151 struct ubi_device *ubi; 195 152 int vol_id; 153 + int ref_count; 196 154 int readers; 197 155 int writers; 198 156 int exclusive; 199 - int removed; 200 - int checked; 201 157 202 158 int reserved_pebs; 203 159 int vol_type; ··· 204 162 int used_ebs; 205 163 int last_eb_bytes; 206 164 long long used_bytes; 207 - int upd_marker; 208 - int corrupted; 209 165 int alignment; 210 166 int data_pad; 211 167 int name_len; 212 168 char name[UBI_VOL_NAME_MAX+1]; 213 169 214 - int updating; 215 170 int upd_ebs; 171 + int ch_lnum; 172 + int ch_dtype; 216 173 long long upd_bytes; 217 174 long long upd_received; 218 175 void *upd_buf; 219 176 220 177 int *eba_tbl; 178 + int checked:1; 179 + int corrupted:1; 180 + int upd_marker:1; 181 + int updating:1; 182 + int changing_leb:1; 221 183 222 184 #ifdef CONFIG_MTD_UBI_GLUEBI 223 - /* Gluebi-related stuff may be compiled out */ 185 + /* 186 + * Gluebi-related stuff may be compiled out. 187 + * TODO: this should not be built into UBI but should be a separate 188 + * ubimtd driver which works on top of UBI and emulates MTD devices. 189 + */ 224 190 struct ubi_volume_desc *gluebi_desc; 225 191 int gluebi_refcount; 226 192 struct mtd_info gluebi_mtd; ··· 250 200 251 201 /** 252 202 * struct ubi_device - UBI device description structure 253 - * @dev: class device object to use the the Linux device model 203 + * @dev: UBI device object to use the the Linux device model 254 204 * @cdev: character device object to create character device 255 205 * @ubi_num: UBI device number 256 206 * @ubi_name: UBI device name 257 - * @major: character device major number 258 207 * @vol_count: number of volumes in this UBI device 259 208 * @volumes: volumes of this UBI device 260 209 * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, 261 - * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers, 262 - * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and 263 - * @vol->eba_tbl. 210 + * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, 211 + * @vol->readers, @vol->writers, @vol->exclusive, 212 + * @vol->ref_count, @vol->mapping and @vol->eba_tbl. 213 + * @ref_count: count of references on the UBI device 264 214 * 265 215 * @rsvd_pebs: count of reserved physical eraseblocks 266 216 * @avail_pebs: count of available physical eraseblocks 267 217 * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB 268 - * handling 218 + * handling 269 219 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 270 220 * 221 + * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 222 + * of UBI ititializetion 271 223 * @vtbl_slots: how many slots are available in the volume table 272 224 * @vtbl_size: size of the volume table in bytes 273 225 * @vtbl: in-RAM volume table copy 274 - * @vtbl_mutex: protects on-flash volume table 226 + * @volumes_mutex: protects on-flash volume table and serializes volume 227 + * changes, like creation, deletion, update, resize 275 228 * 276 229 * @max_ec: current highest erase counter value 277 230 * @mean_ec: current mean erase counter value ··· 291 238 * @prot.pnum: protection tree indexed by physical eraseblock numbers 292 239 * @prot.aec: protection tree indexed by absolute erase counter value 293 240 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 294 - * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 295 - * fields 241 + * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 242 + * fields 243 + * @move_mutex: serializes eraseblock moves 296 244 * @wl_scheduled: non-zero if the wear-leveling was scheduled 297 245 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 298 - * physical eraseblock 246 + * physical eraseblock 299 247 * @abs_ec: absolute erase counter 300 248 * @move_from: physical eraseblock from where the data is being moved 301 249 * @move_to: physical eraseblock where the data is being moved to 302 - * @move_from_put: if the "from" PEB was put 303 250 * @move_to_put: if the "to" PEB was put 304 251 * @works: list of pending works 305 252 * @works_count: count of pending works ··· 326 273 * @hdrs_min_io_size 327 274 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset 328 275 * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or 329 - * not 276 + * not 330 277 * @mtd: MTD device descriptor 331 278 * 332 279 * @peb_buf1: a buffer of PEB size used for different purposes 333 280 * @peb_buf2: another buffer of PEB size used for different purposes 334 281 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 335 - * @dbg_peb_buf: buffer of PEB size used for debugging 282 + * @dbg_peb_buf: buffer of PEB size used for debugging 336 283 * @dbg_buf_mutex: proptects @dbg_peb_buf 337 284 */ 338 285 struct ubi_device { ··· 340 287 struct device dev; 341 288 int ubi_num; 342 289 char ubi_name[sizeof(UBI_NAME_STR)+5]; 343 - int major; 344 290 int vol_count; 345 291 struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; 346 292 spinlock_t volumes_lock; 293 + int ref_count; 347 294 348 295 int rsvd_pebs; 349 296 int avail_pebs; 350 297 int beb_rsvd_pebs; 351 298 int beb_rsvd_level; 352 299 300 + int autoresize_vol_id; 353 301 int vtbl_slots; 354 302 int vtbl_size; 355 303 struct ubi_vtbl_record *vtbl; 356 - struct mutex vtbl_mutex; 304 + struct mutex volumes_mutex; 357 305 358 306 int max_ec; 307 + /* TODO: mean_ec is not updated run-time, fix */ 359 308 int mean_ec; 360 309 361 310 /* EBA unit's stuff */ ··· 375 320 struct rb_root aec; 376 321 } prot; 377 322 spinlock_t wl_lock; 323 + struct mutex move_mutex; 324 + struct rw_semaphore work_sem; 378 325 int wl_scheduled; 379 326 struct ubi_wl_entry **lookuptbl; 380 327 unsigned long long abs_ec; 381 328 struct ubi_wl_entry *move_from; 382 329 struct ubi_wl_entry *move_to; 383 - int move_from_put; 384 330 int move_to_put; 385 331 struct list_head works; 386 332 int works_count; ··· 411 355 void *peb_buf1; 412 356 void *peb_buf2; 413 357 struct mutex buf_mutex; 358 + struct mutex ckvol_mutex; 414 359 #ifdef CONFIG_MTD_UBI_DEBUG 415 360 void *dbg_peb_buf; 416 361 struct mutex dbg_buf_mutex; 417 362 #endif 418 363 }; 419 364 365 + extern struct kmem_cache *ubi_wl_entry_slab; 366 + extern struct file_operations ubi_ctrl_cdev_operations; 420 367 extern struct file_operations ubi_cdev_operations; 421 368 extern struct file_operations ubi_vol_cdev_operations; 422 369 extern struct class *ubi_class; 370 + extern struct mutex ubi_devices_mutex; 423 371 424 372 /* vtbl.c */ 425 373 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, ··· 434 374 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 435 375 int ubi_remove_volume(struct ubi_volume_desc *desc); 436 376 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 437 - int ubi_add_volume(struct ubi_device *ubi, int vol_id); 438 - void ubi_free_volume(struct ubi_device *ubi, int vol_id); 377 + int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); 378 + void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); 439 379 440 380 /* upd.c */ 441 - int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes); 442 - int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 381 + int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, 382 + long long bytes); 383 + int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, 443 384 const void __user *buf, int count); 385 + int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 386 + const struct ubi_leb_change_req *req); 387 + int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, 388 + const void __user *buf, int count); 444 389 445 390 /* misc.c */ 446 391 int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); ··· 464 399 #endif 465 400 466 401 /* eba.c */ 467 - int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum); 468 - int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 469 - int offset, int len, int check); 470 - int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum, 402 + int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 403 + int lnum); 404 + int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 405 + void *buf, int offset, int len, int check); 406 + int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 471 407 const void *buf, int offset, int len, int dtype); 472 - int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum, 473 - const void *buf, int len, int dtype, 408 + int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 409 + int lnum, const void *buf, int len, int dtype, 474 410 int used_ebs); 475 - int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 476 - const void *buf, int len, int dtype); 411 + int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 412 + int lnum, const void *buf, int len, int dtype); 477 413 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 478 414 struct ubi_vid_hdr *vid_hdr); 479 415 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); ··· 487 421 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); 488 422 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 489 423 void ubi_wl_close(struct ubi_device *ubi); 424 + int ubi_thread(void *u); 490 425 491 426 /* io.c */ 492 427 int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, ··· 505 438 struct ubi_vid_hdr *vid_hdr, int verbose); 506 439 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, 507 440 struct ubi_vid_hdr *vid_hdr); 441 + 442 + /* build.c */ 443 + int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); 444 + int ubi_detach_mtd_dev(int ubi_num, int anyway); 445 + struct ubi_device *ubi_get_device(int ubi_num); 446 + void ubi_put_device(struct ubi_device *ubi); 447 + struct ubi_device *ubi_get_by_major(int major); 448 + int ubi_major2num(int major); 508 449 509 450 /* 510 451 * ubi_rb_for_each_entry - walk an RB-tree. ··· 598 523 */ 599 524 static inline void ubi_ro_mode(struct ubi_device *ubi) 600 525 { 601 - ubi->ro_mode = 1; 602 - ubi_warn("switch to read-only mode"); 526 + if (!ubi->ro_mode) { 527 + ubi->ro_mode = 1; 528 + ubi_warn("switch to read-only mode"); 529 + } 603 530 } 604 531 605 532 /**
+137 -48
drivers/mtd/ubi/upd.c
··· 22 22 */ 23 23 24 24 /* 25 - * This file contains implementation of the volume update functionality. 25 + * This file contains implementation of the volume update and atomic LEB change 26 + * functionality. 26 27 * 27 28 * The update operation is based on the per-volume update marker which is 28 29 * stored in the volume table. The update marker is set before the update ··· 46 45 /** 47 46 * set_update_marker - set update marker. 48 47 * @ubi: UBI device description object 49 - * @vol_id: volume ID 48 + * @vol: volume description object 50 49 * 51 - * This function sets the update marker flag for volume @vol_id. Returns zero 50 + * This function sets the update marker flag for volume @vol. Returns zero 52 51 * in case of success and a negative error code in case of failure. 53 52 */ 54 - static int set_update_marker(struct ubi_device *ubi, int vol_id) 53 + static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) 55 54 { 56 55 int err; 57 56 struct ubi_vtbl_record vtbl_rec; 58 - struct ubi_volume *vol = ubi->volumes[vol_id]; 59 57 60 - dbg_msg("set update marker for volume %d", vol_id); 58 + dbg_msg("set update marker for volume %d", vol->vol_id); 61 59 62 60 if (vol->upd_marker) { 63 - ubi_assert(ubi->vtbl[vol_id].upd_marker); 61 + ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); 64 62 dbg_msg("already set"); 65 63 return 0; 66 64 } 67 65 68 - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 66 + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 67 + sizeof(struct ubi_vtbl_record)); 69 68 vtbl_rec.upd_marker = 1; 70 69 71 - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 70 + mutex_lock(&ubi->volumes_mutex); 71 + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 72 + mutex_unlock(&ubi->volumes_mutex); 72 73 vol->upd_marker = 1; 73 74 return err; 74 75 } ··· 78 75 /** 79 76 * clear_update_marker - clear update marker. 80 77 * @ubi: UBI device description object 81 - * @vol_id: volume ID 78 + * @vol: volume description object 82 79 * @bytes: new data size in bytes 83 80 * 84 - * This function clears the update marker for volume @vol_id, sets new volume 81 + * This function clears the update marker for volume @vol, sets new volume 85 82 * data size and clears the "corrupted" flag (static volumes only). Returns 86 83 * zero in case of success and a negative error code in case of failure. 87 84 */ 88 - static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes) 85 + static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, 86 + long long bytes) 89 87 { 90 88 int err; 91 89 uint64_t tmp; 92 90 struct ubi_vtbl_record vtbl_rec; 93 - struct ubi_volume *vol = ubi->volumes[vol_id]; 94 91 95 - dbg_msg("clear update marker for volume %d", vol_id); 92 + dbg_msg("clear update marker for volume %d", vol->vol_id); 96 93 97 - memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); 94 + memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 95 + sizeof(struct ubi_vtbl_record)); 98 96 ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); 99 97 vtbl_rec.upd_marker = 0; 100 98 ··· 110 106 vol->last_eb_bytes = vol->usable_leb_size; 111 107 } 112 108 113 - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 109 + mutex_lock(&ubi->volumes_mutex); 110 + err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); 111 + mutex_unlock(&ubi->volumes_mutex); 114 112 vol->upd_marker = 0; 115 113 return err; 116 114 } ··· 120 114 /** 121 115 * ubi_start_update - start volume update. 122 116 * @ubi: UBI device description object 123 - * @vol_id: volume ID 117 + * @vol: volume description object 124 118 * @bytes: update bytes 125 119 * 126 120 * This function starts volume update operation. If @bytes is zero, the volume 127 121 * is just wiped out. Returns zero in case of success and a negative error code 128 122 * in case of failure. 129 123 */ 130 - int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes) 124 + int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, 125 + long long bytes) 131 126 { 132 127 int i, err; 133 128 uint64_t tmp; 134 - struct ubi_volume *vol = ubi->volumes[vol_id]; 135 129 136 - dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes); 130 + dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); 131 + ubi_assert(!vol->updating && !vol->changing_leb); 137 132 vol->updating = 1; 138 133 139 - err = set_update_marker(ubi, vol_id); 134 + err = set_update_marker(ubi, vol); 140 135 if (err) 141 136 return err; 142 137 143 138 /* Before updating - wipe out the volume */ 144 139 for (i = 0; i < vol->reserved_pebs; i++) { 145 - err = ubi_eba_unmap_leb(ubi, vol_id, i); 140 + err = ubi_eba_unmap_leb(ubi, vol, i); 146 141 if (err) 147 142 return err; 148 143 } 149 144 150 145 if (bytes == 0) { 151 - err = clear_update_marker(ubi, vol_id, 0); 146 + err = clear_update_marker(ubi, vol, 0); 152 147 if (err) 153 148 return err; 154 149 err = ubi_wl_flush(ubi); ··· 170 163 } 171 164 172 165 /** 166 + * ubi_start_leb_change - start atomic LEB change. 167 + * @ubi: UBI device description object 168 + * @vol: volume description object 169 + * @req: operation request 170 + * 171 + * This function starts atomic LEB change operation. Returns zero in case of 172 + * success and a negative error code in case of failure. 173 + */ 174 + int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 175 + const struct ubi_leb_change_req *req) 176 + { 177 + ubi_assert(!vol->updating && !vol->changing_leb); 178 + 179 + dbg_msg("start changing LEB %d:%d, %u bytes", 180 + vol->vol_id, req->lnum, req->bytes); 181 + if (req->bytes == 0) 182 + return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, 183 + req->dtype); 184 + 185 + vol->upd_bytes = req->bytes; 186 + vol->upd_received = 0; 187 + vol->changing_leb = 1; 188 + vol->ch_lnum = req->lnum; 189 + vol->ch_dtype = req->dtype; 190 + 191 + vol->upd_buf = vmalloc(req->bytes); 192 + if (!vol->upd_buf) 193 + return -ENOMEM; 194 + 195 + return 0; 196 + } 197 + 198 + /** 173 199 * write_leb - write update data. 174 200 * @ubi: UBI device description object 175 - * @vol_id: volume ID 201 + * @vol: volume description object 176 202 * @lnum: logical eraseblock number 177 203 * @buf: data to write 178 204 * @len: data size ··· 231 191 * This function returns zero in case of success and a negative error code in 232 192 * case of failure. 233 193 */ 234 - static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf, 235 - int len, int used_ebs) 194 + static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 195 + void *buf, int len, int used_ebs) 236 196 { 237 - int err, l; 238 - struct ubi_volume *vol = ubi->volumes[vol_id]; 197 + int err; 239 198 240 199 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 241 - l = ALIGN(len, ubi->min_io_size); 242 - memset(buf + len, 0xFF, l - len); 200 + len = ALIGN(len, ubi->min_io_size); 201 + memset(buf + len, 0xFF, len - len); 243 202 244 - l = ubi_calc_data_len(ubi, buf, l); 245 - if (l == 0) { 203 + len = ubi_calc_data_len(ubi, buf, len); 204 + if (len == 0) { 246 205 dbg_msg("all %d bytes contain 0xFF - skip", len); 247 206 return 0; 248 207 } 249 - if (len != l) 250 - dbg_msg("skip last %d bytes (0xFF)", len - l); 251 208 252 - err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l, 253 - UBI_UNKNOWN); 209 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); 254 210 } else { 255 211 /* 256 212 * When writing static volume, and this is the last logical ··· 258 222 * contain zeros, not random trash. 259 223 */ 260 224 memset(buf + len, 0, vol->usable_leb_size - len); 261 - err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len, 225 + err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, 262 226 UBI_UNKNOWN, used_ebs); 263 227 } 264 228 ··· 272 236 * @count: how much bytes to write 273 237 * 274 238 * This function writes more data to the volume which is being updated. It may 275 - * be called arbitrary number of times until all of the update data arrive. 276 - * This function returns %0 in case of success, number of bytes written during 277 - * the last call if the whole volume update was successfully finished, and a 239 + * be called arbitrary number of times until all the update data arriveis. This 240 + * function returns %0 in case of success, number of bytes written during the 241 + * last call if the whole volume update has been successfully finished, and a 278 242 * negative error code in case of failure. 279 243 */ 280 - int ubi_more_update_data(struct ubi_device *ubi, int vol_id, 244 + int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, 281 245 const void __user *buf, int count) 282 246 { 283 247 uint64_t tmp; 284 - struct ubi_volume *vol = ubi->volumes[vol_id]; 285 248 int lnum, offs, err = 0, len, to_write = count; 286 249 287 250 dbg_msg("write %d of %lld bytes, %lld already passed", ··· 325 290 * is the last chunk, it's time to flush the buffer. 326 291 */ 327 292 ubi_assert(flush_len <= vol->usable_leb_size); 328 - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, 329 - flush_len, vol->upd_ebs); 293 + err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len, 294 + vol->upd_ebs); 330 295 if (err) 331 296 return err; 332 297 } ··· 353 318 354 319 if (len == vol->usable_leb_size || 355 320 vol->upd_received + len == vol->upd_bytes) { 356 - err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len, 357 - vol->upd_ebs); 321 + err = write_leb(ubi, vol, lnum, vol->upd_buf, 322 + len, vol->upd_ebs); 358 323 if (err) 359 324 break; 360 325 } ··· 368 333 ubi_assert(vol->upd_received <= vol->upd_bytes); 369 334 if (vol->upd_received == vol->upd_bytes) { 370 335 /* The update is finished, clear the update marker */ 371 - err = clear_update_marker(ubi, vol_id, vol->upd_bytes); 336 + err = clear_update_marker(ubi, vol, vol->upd_bytes); 372 337 if (err) 373 338 return err; 374 339 err = ubi_wl_flush(ubi); 375 340 if (err == 0) { 341 + vol->updating = 0; 376 342 err = to_write; 377 343 vfree(vol->upd_buf); 378 - vol->updating = 0; 379 344 } 345 + } 346 + 347 + return err; 348 + } 349 + 350 + /** 351 + * ubi_more_leb_change_data - accept more data for atomic LEB change. 352 + * @vol: volume description object 353 + * @buf: write data (user-space memory buffer) 354 + * @count: how much bytes to write 355 + * 356 + * This function accepts more data to the volume which is being under the 357 + * "atomic LEB change" operation. It may be called arbitrary number of times 358 + * until all data arrives. This function returns %0 in case of success, number 359 + * of bytes written during the last call if the whole "atomic LEB change" 360 + * operation has been successfully finished, and a negative error code in case 361 + * of failure. 362 + */ 363 + int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, 364 + const void __user *buf, int count) 365 + { 366 + int err; 367 + 368 + dbg_msg("write %d of %lld bytes, %lld already passed", 369 + count, vol->upd_bytes, vol->upd_received); 370 + 371 + if (ubi->ro_mode) 372 + return -EROFS; 373 + 374 + if (vol->upd_received + count > vol->upd_bytes) 375 + count = vol->upd_bytes - vol->upd_received; 376 + 377 + err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count); 378 + if (err) 379 + return -EFAULT; 380 + 381 + vol->upd_received += count; 382 + 383 + if (vol->upd_received == vol->upd_bytes) { 384 + int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 385 + 386 + memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); 387 + len = ubi_calc_data_len(ubi, vol->upd_buf, len); 388 + err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 389 + vol->upd_buf, len, UBI_UNKNOWN); 390 + if (err) 391 + return err; 392 + } 393 + 394 + ubi_assert(vol->upd_received <= vol->upd_bytes); 395 + if (vol->upd_received == vol->upd_bytes) { 396 + vol->changing_leb = 0; 397 + err = count; 398 + vfree(vol->upd_buf); 380 399 } 381 400 382 401 return err;
+116 -92
drivers/mtd/ubi/vmt.c
··· 63 63 * B. process 2 removes volume Y; 64 64 * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; 65 65 * 66 - * What we want to do in a situation like that is to return error when the file 67 - * is read. This is done by means of the 'removed' flag and the 'vol_lock' of 68 - * the UBI volume description object. 66 + * In this situation, this function will return %-ENODEV because it will find 67 + * out that the volume was removed from the @ubi->volumes array. 69 68 */ 70 69 static ssize_t vol_attribute_show(struct device *dev, 71 70 struct device_attribute *attr, char *buf) 72 71 { 73 72 int ret; 74 73 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 74 + struct ubi_device *ubi; 75 75 76 - spin_lock(&vol->ubi->volumes_lock); 77 - if (vol->removed) { 78 - spin_unlock(&vol->ubi->volumes_lock); 76 + ubi = ubi_get_device(vol->ubi->ubi_num); 77 + if (!ubi) 78 + return -ENODEV; 79 + 80 + spin_lock(&ubi->volumes_lock); 81 + if (!ubi->volumes[vol->vol_id]) { 82 + spin_unlock(&ubi->volumes_lock); 83 + ubi_put_device(ubi); 79 84 return -ENODEV; 80 85 } 86 + /* Take a reference to prevent volume removal */ 87 + vol->ref_count += 1; 88 + spin_unlock(&ubi->volumes_lock); 89 + 81 90 if (attr == &attr_vol_reserved_ebs) 82 91 ret = sprintf(buf, "%d\n", vol->reserved_pebs); 83 92 else if (attr == &attr_vol_type) { ··· 103 94 ret = sprintf(buf, "%d\n", vol->corrupted); 104 95 else if (attr == &attr_vol_alignment) 105 96 ret = sprintf(buf, "%d\n", vol->alignment); 106 - else if (attr == &attr_vol_usable_eb_size) { 97 + else if (attr == &attr_vol_usable_eb_size) 107 98 ret = sprintf(buf, "%d\n", vol->usable_leb_size); 108 - } else if (attr == &attr_vol_data_bytes) 99 + else if (attr == &attr_vol_data_bytes) 109 100 ret = sprintf(buf, "%lld\n", vol->used_bytes); 110 101 else if (attr == &attr_vol_upd_marker) 111 102 ret = sprintf(buf, "%d\n", vol->upd_marker); 112 103 else 113 - BUG(); 114 - spin_unlock(&vol->ubi->volumes_lock); 104 + /* This must be a bug */ 105 + ret = -EINVAL; 106 + 107 + /* We've done the operation, drop volume and UBI device references */ 108 + spin_lock(&ubi->volumes_lock); 109 + vol->ref_count -= 1; 110 + ubi_assert(vol->ref_count >= 0); 111 + spin_unlock(&ubi->volumes_lock); 112 + ubi_put_device(ubi); 115 113 return ret; 116 114 } 117 115 ··· 126 110 static void vol_release(struct device *dev) 127 111 { 128 112 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 129 - ubi_assert(vol->removed); 113 + 130 114 kfree(vol); 131 115 } 132 116 ··· 168 152 if (err) 169 153 return err; 170 154 err = device_create_file(&vol->dev, &attr_vol_upd_marker); 171 - if (err) 172 - return err; 173 - return 0; 155 + return err; 174 156 } 175 157 176 158 /** ··· 194 180 * @req: volume creation request 195 181 * 196 182 * This function creates volume described by @req. If @req->vol_id id 197 - * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume 183 + * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume 198 184 * and saves it in @req->vol_id. Returns zero in case of success and a negative 199 - * error code in case of failure. 185 + * error code in case of failure. Note, the caller has to have the 186 + * @ubi->volumes_mutex locked. 200 187 */ 201 188 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 202 189 { 203 - int i, err, vol_id = req->vol_id; 190 + int i, err, vol_id = req->vol_id, dont_free = 0; 204 191 struct ubi_volume *vol; 205 192 struct ubi_vtbl_record vtbl_rec; 206 193 uint64_t bytes; 194 + dev_t dev; 207 195 208 196 if (ubi->ro_mode) 209 197 return -EROFS; ··· 215 199 return -ENOMEM; 216 200 217 201 spin_lock(&ubi->volumes_lock); 218 - 219 202 if (vol_id == UBI_VOL_NUM_AUTO) { 220 203 /* Find unused volume ID */ 221 204 dbg_msg("search for vacant volume ID"); ··· 267 252 } 268 253 ubi->avail_pebs -= vol->reserved_pebs; 269 254 ubi->rsvd_pebs += vol->reserved_pebs; 255 + spin_unlock(&ubi->volumes_lock); 270 256 271 257 vol->vol_id = vol_id; 272 258 vol->alignment = req->alignment; ··· 275 259 vol->vol_type = req->vol_type; 276 260 vol->name_len = req->name_len; 277 261 memcpy(vol->name, req->name, vol->name_len + 1); 278 - vol->exclusive = 1; 279 262 vol->ubi = ubi; 280 - ubi->volumes[vol_id] = vol; 281 - spin_unlock(&ubi->volumes_lock); 282 263 283 264 /* 284 265 * Finish all pending erases because there may be some LEBs belonging ··· 312 299 /* Register character device for the volume */ 313 300 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 314 301 vol->cdev.owner = THIS_MODULE; 315 - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1); 302 + dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); 303 + err = cdev_add(&vol->cdev, dev, 1); 316 304 if (err) { 317 - ubi_err("cannot add character device for volume %d", vol_id); 305 + ubi_err("cannot add character device"); 318 306 goto out_mapping; 319 307 } 320 308 ··· 325 311 326 312 vol->dev.release = vol_release; 327 313 vol->dev.parent = &ubi->dev; 328 - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 314 + vol->dev.devt = dev; 329 315 vol->dev.class = ubi_class; 316 + 330 317 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 331 318 err = device_register(&vol->dev); 332 - if (err) 319 + if (err) { 320 + ubi_err("cannot register device"); 333 321 goto out_gluebi; 322 + } 334 323 335 324 err = volume_sysfs_init(ubi, vol); 336 325 if (err) ··· 356 339 goto out_sysfs; 357 340 358 341 spin_lock(&ubi->volumes_lock); 342 + ubi->volumes[vol_id] = vol; 359 343 ubi->vol_count += 1; 360 - vol->exclusive = 0; 361 344 spin_unlock(&ubi->volumes_lock); 362 345 363 346 paranoid_check_volumes(ubi); 364 347 return 0; 365 348 349 + out_sysfs: 350 + /* 351 + * We have registered our device, we should not free the volume* 352 + * description object in this function in case of an error - it is 353 + * freed by the release function. 354 + * 355 + * Get device reference to prevent the release function from being 356 + * called just after sysfs has been closed. 357 + */ 358 + dont_free = 1; 359 + get_device(&vol->dev); 360 + volume_sysfs_close(vol); 366 361 out_gluebi: 367 - err = ubi_destroy_gluebi(vol); 362 + ubi_destroy_gluebi(vol); 368 363 out_cdev: 369 364 cdev_del(&vol->cdev); 370 365 out_mapping: ··· 385 356 spin_lock(&ubi->volumes_lock); 386 357 ubi->rsvd_pebs -= vol->reserved_pebs; 387 358 ubi->avail_pebs += vol->reserved_pebs; 388 - ubi->volumes[vol_id] = NULL; 389 359 out_unlock: 390 360 spin_unlock(&ubi->volumes_lock); 391 - kfree(vol); 392 - return err; 393 - 394 - /* 395 - * We are registered, so @vol is destroyed in the release function and 396 - * we have to de-initialize differently. 397 - */ 398 - out_sysfs: 399 - err = ubi_destroy_gluebi(vol); 400 - cdev_del(&vol->cdev); 401 - kfree(vol->eba_tbl); 402 - spin_lock(&ubi->volumes_lock); 403 - ubi->rsvd_pebs -= vol->reserved_pebs; 404 - ubi->avail_pebs += vol->reserved_pebs; 405 - ubi->volumes[vol_id] = NULL; 406 - spin_unlock(&ubi->volumes_lock); 407 - volume_sysfs_close(vol); 361 + if (dont_free) 362 + put_device(&vol->dev); 363 + else 364 + kfree(vol); 365 + ubi_err("cannot create volume %d, error %d", vol_id, err); 408 366 return err; 409 367 } 410 368 ··· 401 385 * 402 386 * This function removes volume described by @desc. The volume has to be opened 403 387 * in "exclusive" mode. Returns zero in case of success and a negative error 404 - * code in case of failure. 388 + * code in case of failure. The caller has to have the @ubi->volumes_mutex 389 + * locked. 405 390 */ 406 391 int ubi_remove_volume(struct ubi_volume_desc *desc) 407 392 { ··· 417 400 if (ubi->ro_mode) 418 401 return -EROFS; 419 402 403 + spin_lock(&ubi->volumes_lock); 404 + if (vol->ref_count > 1) { 405 + /* 406 + * The volume is busy, probably someone is reading one of its 407 + * sysfs files. 408 + */ 409 + err = -EBUSY; 410 + goto out_unlock; 411 + } 412 + ubi->volumes[vol_id] = NULL; 413 + spin_unlock(&ubi->volumes_lock); 414 + 420 415 err = ubi_destroy_gluebi(vol); 421 416 if (err) 422 - return err; 417 + goto out_err; 423 418 424 419 err = ubi_change_vtbl_record(ubi, vol_id, NULL); 425 420 if (err) 426 - return err; 421 + goto out_err; 427 422 428 423 for (i = 0; i < vol->reserved_pebs; i++) { 429 - err = ubi_eba_unmap_leb(ubi, vol_id, i); 424 + err = ubi_eba_unmap_leb(ubi, vol, i); 430 425 if (err) 431 - return err; 426 + goto out_err; 432 427 } 433 - 434 - spin_lock(&ubi->volumes_lock); 435 - vol->removed = 1; 436 - ubi->volumes[vol_id] = NULL; 437 - spin_unlock(&ubi->volumes_lock); 438 428 439 429 kfree(vol->eba_tbl); 440 430 vol->eba_tbl = NULL; 441 431 cdev_del(&vol->cdev); 442 432 volume_sysfs_close(vol); 443 - kfree(desc); 444 433 445 434 spin_lock(&ubi->volumes_lock); 446 435 ubi->rsvd_pebs -= reserved_pebs; ··· 464 441 spin_unlock(&ubi->volumes_lock); 465 442 466 443 paranoid_check_volumes(ubi); 467 - module_put(THIS_MODULE); 468 444 return 0; 445 + 446 + out_err: 447 + ubi_err("cannot remove volume %d, error %d", vol_id, err); 448 + spin_lock(&ubi->volumes_lock); 449 + ubi->volumes[vol_id] = vol; 450 + out_unlock: 451 + spin_unlock(&ubi->volumes_lock); 452 + return err; 469 453 } 470 454 471 455 /** ··· 480 450 * @desc: volume descriptor 481 451 * @reserved_pebs: new size in physical eraseblocks 482 452 * 483 - * This function returns zero in case of success, and a negative error code in 484 - * case of failure. 453 + * This function re-sizes the volume and returns zero in case of success, and a 454 + * negative error code in case of failure. The caller has to have the 455 + * @ubi->volumes_mutex locked. 485 456 */ 486 457 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) 487 458 { ··· 497 466 498 467 dbg_msg("re-size volume %d to from %d to %d PEBs", 499 468 vol_id, vol->reserved_pebs, reserved_pebs); 500 - ubi_assert(desc->mode == UBI_EXCLUSIVE); 501 - ubi_assert(vol == ubi->volumes[vol_id]); 502 469 503 470 if (vol->vol_type == UBI_STATIC_VOLUME && 504 471 reserved_pebs < vol->used_ebs) { ··· 515 486 516 487 for (i = 0; i < reserved_pebs; i++) 517 488 new_mapping[i] = UBI_LEB_UNMAPPED; 489 + 490 + spin_lock(&ubi->volumes_lock); 491 + if (vol->ref_count > 1) { 492 + spin_unlock(&ubi->volumes_lock); 493 + err = -EBUSY; 494 + goto out_free; 495 + } 496 + spin_unlock(&ubi->volumes_lock); 518 497 519 498 /* Reserve physical eraseblocks */ 520 499 pebs = reserved_pebs - vol->reserved_pebs; ··· 553 516 554 517 if (pebs < 0) { 555 518 for (i = 0; i < -pebs; i++) { 556 - err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i); 519 + err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); 557 520 if (err) 558 521 goto out_acc; 559 522 } ··· 602 565 /** 603 566 * ubi_add_volume - add volume. 604 567 * @ubi: UBI device description object 605 - * @vol_id: volume ID 568 + * @vol: volume description object 606 569 * 607 - * This function adds an existin volume and initializes all its data 608 - * structures. Returnes zero in case of success and a negative error code in 570 + * This function adds an existing volume and initializes all its data 571 + * structures. Returns zero in case of success and a negative error code in 609 572 * case of failure. 610 573 */ 611 - int ubi_add_volume(struct ubi_device *ubi, int vol_id) 574 + int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) 612 575 { 613 - int err; 614 - struct ubi_volume *vol = ubi->volumes[vol_id]; 576 + int err, vol_id = vol->vol_id; 577 + dev_t dev; 615 578 616 579 dbg_msg("add volume %d", vol_id); 617 580 ubi_dbg_dump_vol_info(vol); 618 - ubi_assert(vol); 619 581 620 582 /* Register character device for the volume */ 621 583 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); 622 584 vol->cdev.owner = THIS_MODULE; 623 - err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1); 585 + dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); 586 + err = cdev_add(&vol->cdev, dev, 1); 624 587 if (err) { 625 - ubi_err("cannot add character device for volume %d", vol_id); 588 + ubi_err("cannot add character device for volume %d, error %d", 589 + vol_id, err); 626 590 return err; 627 591 } 628 592 ··· 633 595 634 596 vol->dev.release = vol_release; 635 597 vol->dev.parent = &ubi->dev; 636 - vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1); 598 + vol->dev.devt = dev; 637 599 vol->dev.class = ubi_class; 638 600 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 639 601 err = device_register(&vol->dev); ··· 661 623 /** 662 624 * ubi_free_volume - free volume. 663 625 * @ubi: UBI device description object 664 - * @vol_id: volume ID 626 + * @vol: volume description object 665 627 * 666 - * This function frees all resources for volume @vol_id but does not remove it. 628 + * This function frees all resources for volume @vol but does not remove it. 667 629 * Used only when the UBI device is detached. 668 630 */ 669 - void ubi_free_volume(struct ubi_device *ubi, int vol_id) 631 + void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) 670 632 { 671 633 int err; 672 - struct ubi_volume *vol = ubi->volumes[vol_id]; 673 634 674 - dbg_msg("free volume %d", vol_id); 675 - ubi_assert(vol); 635 + dbg_msg("free volume %d", vol->vol_id); 676 636 677 - vol->removed = 1; 637 + ubi->volumes[vol->vol_id] = NULL; 678 638 err = ubi_destroy_gluebi(vol); 679 - ubi->volumes[vol_id] = NULL; 680 639 cdev_del(&vol->cdev); 681 640 volume_sysfs_close(vol); 682 641 } ··· 743 708 goto fail; 744 709 } 745 710 746 - if (vol->upd_marker != 0 && vol->upd_marker != 1) { 747 - ubi_err("bad upd_marker"); 748 - goto fail; 749 - } 750 - 751 711 if (vol->upd_marker && vol->corrupted) { 752 712 dbg_err("update marker and corrupted simultaneously"); 753 713 goto fail; ··· 777 747 778 748 n = (long long)vol->used_ebs * vol->usable_leb_size; 779 749 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 780 - if (vol->corrupted != 0) { 750 + if (vol->corrupted) { 781 751 ubi_err("corrupted dynamic volume"); 782 752 goto fail; 783 753 } ··· 794 764 goto fail; 795 765 } 796 766 } else { 797 - if (vol->corrupted != 0 && vol->corrupted != 1) { 798 - ubi_err("bad corrupted"); 799 - goto fail; 800 - } 801 767 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 802 768 ubi_err("bad used_ebs"); 803 769 goto fail; ··· 846 820 { 847 821 int i; 848 822 849 - mutex_lock(&ubi->vtbl_mutex); 850 823 for (i = 0; i < ubi->vtbl_slots; i++) 851 824 paranoid_check_volume(ubi, i); 852 - mutex_unlock(&ubi->vtbl_mutex); 853 825 } 854 826 #endif
+27 -18
drivers/mtd/ubi/vtbl.c
··· 86 86 { 87 87 int i, err; 88 88 uint32_t crc; 89 + struct ubi_volume *layout_vol; 89 90 90 91 ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); 92 + layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; 91 93 92 94 if (!vtbl_rec) 93 95 vtbl_rec = &empty_vtbl_record; ··· 98 96 vtbl_rec->crc = cpu_to_be32(crc); 99 97 } 100 98 101 - mutex_lock(&ubi->vtbl_mutex); 102 99 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 103 100 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 104 - err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i); 105 - if (err) { 106 - mutex_unlock(&ubi->vtbl_mutex); 101 + err = ubi_eba_unmap_leb(ubi, layout_vol, i); 102 + if (err) 107 103 return err; 108 - } 109 - err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0, 104 + 105 + err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, 110 106 ubi->vtbl_size, UBI_LONGTERM); 111 - if (err) { 112 - mutex_unlock(&ubi->vtbl_mutex); 107 + if (err) 113 108 return err; 114 - } 115 109 } 116 110 117 111 paranoid_vtbl_check(ubi); 118 - mutex_unlock(&ubi->vtbl_mutex); 119 - return ubi_wl_flush(ubi); 112 + return 0; 120 113 } 121 114 122 115 /** 123 - * vol_til_check - check if volume table is not corrupted and contains sensible 124 - * data. 125 - * 116 + * vtbl_check - check if volume table is not corrupted and contains sensible 117 + * data. 126 118 * @ubi: UBI device description object 127 119 * @vtbl: volume table 128 120 * ··· 269 273 * this volume table copy was found during scanning. It has to be wiped 270 274 * out. 271 275 */ 272 - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 276 + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); 273 277 if (sv) 274 278 old_seb = ubi_scan_find_seb(sv, copy); 275 279 ··· 281 285 } 282 286 283 287 vid_hdr->vol_type = UBI_VID_DYNAMIC; 284 - vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID); 288 + vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); 285 289 vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; 286 290 vid_hdr->data_size = vid_hdr->used_ebs = 287 291 vid_hdr->data_pad = cpu_to_be32(0); ··· 514 518 vol->name[vol->name_len] = '\0'; 515 519 vol->vol_id = i; 516 520 521 + if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 522 + /* Auto re-size flag may be set only for one volume */ 523 + if (ubi->autoresize_vol_id != -1) { 524 + ubi_err("more then one auto-resize volume (%d " 525 + "and %d)", ubi->autoresize_vol_id, i); 526 + return -EINVAL; 527 + } 528 + 529 + ubi->autoresize_vol_id = i; 530 + } 531 + 517 532 ubi_assert(!ubi->volumes[i]); 518 533 ubi->volumes[i] = vol; 519 534 ubi->vol_count += 1; ··· 575 568 vol->last_eb_bytes = sv->last_data_size; 576 569 } 577 570 571 + /* And add the layout volume */ 578 572 vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); 579 573 if (!vol) 580 574 return -ENOMEM; ··· 590 582 vol->last_eb_bytes = vol->reserved_pebs; 591 583 vol->used_bytes = 592 584 (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); 593 - vol->vol_id = UBI_LAYOUT_VOL_ID; 585 + vol->vol_id = UBI_LAYOUT_VOLUME_ID; 586 + vol->ref_count = 1; 594 587 595 588 ubi_assert(!ubi->volumes[i]); 596 589 ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; ··· 743 734 ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; 744 735 ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); 745 736 746 - sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID); 737 + sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); 747 738 if (!sv) { 748 739 /* 749 740 * No logical eraseblocks belonging to the layout volume were
+184 -154
drivers/mtd/ubi/wl.c
··· 117 117 #define WL_MAX_FAILURES 32 118 118 119 119 /** 120 - * struct ubi_wl_entry - wear-leveling entry. 121 - * @rb: link in the corresponding RB-tree 122 - * @ec: erase counter 123 - * @pnum: physical eraseblock number 124 - * 125 - * Each physical eraseblock has a corresponding &struct wl_entry object which 126 - * may be kept in different RB-trees. 127 - */ 128 - struct ubi_wl_entry { 129 - struct rb_node rb; 130 - int ec; 131 - int pnum; 132 - }; 133 - 134 - /** 135 120 * struct ubi_wl_prot_entry - PEB protection entry. 136 121 * @rb_pnum: link in the @wl->prot.pnum RB-tree 137 122 * @rb_aec: link in the @wl->prot.aec RB-tree ··· 201 216 #define paranoid_check_in_wl_tree(e, root) 202 217 #endif 203 218 204 - /* Slab cache for wear-leveling entries */ 205 - static struct kmem_cache *wl_entries_slab; 206 - 207 219 /** 208 220 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 209 221 * @e: the wear-leveling entry to add ··· 249 267 int err; 250 268 struct ubi_work *wrk; 251 269 252 - spin_lock(&ubi->wl_lock); 270 + cond_resched(); 253 271 272 + /* 273 + * @ubi->work_sem is used to synchronize with the workers. Workers take 274 + * it in read mode, so many of them may be doing works at a time. But 275 + * the queue flush code has to be sure the whole queue of works is 276 + * done, and it takes the mutex in write mode. 277 + */ 278 + down_read(&ubi->work_sem); 279 + spin_lock(&ubi->wl_lock); 254 280 if (list_empty(&ubi->works)) { 255 281 spin_unlock(&ubi->wl_lock); 282 + up_read(&ubi->work_sem); 256 283 return 0; 257 284 } 258 285 259 286 wrk = list_entry(ubi->works.next, struct ubi_work, list); 260 287 list_del(&wrk->list); 288 + ubi->works_count -= 1; 289 + ubi_assert(ubi->works_count >= 0); 261 290 spin_unlock(&ubi->wl_lock); 262 291 263 292 /* ··· 279 286 err = wrk->func(ubi, wrk, 0); 280 287 if (err) 281 288 ubi_err("work failed with error code %d", err); 289 + up_read(&ubi->work_sem); 282 290 283 - spin_lock(&ubi->wl_lock); 284 - ubi->works_count -= 1; 285 - ubi_assert(ubi->works_count >= 0); 286 - spin_unlock(&ubi->wl_lock); 287 291 return err; 288 292 } 289 293 ··· 539 549 * prot_tree_del - remove a physical eraseblock from the protection trees 540 550 * @ubi: UBI device description object 541 551 * @pnum: the physical eraseblock to remove 552 + * 553 + * This function returns PEB @pnum from the protection trees and returns zero 554 + * in case of success and %-ENODEV if the PEB was not found in the protection 555 + * trees. 542 556 */ 543 - static void prot_tree_del(struct ubi_device *ubi, int pnum) 557 + static int prot_tree_del(struct ubi_device *ubi, int pnum) 544 558 { 545 559 struct rb_node *p; 546 560 struct ubi_wl_prot_entry *pe = NULL; ··· 555 561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 556 562 557 563 if (pnum == pe->e->pnum) 558 - break; 564 + goto found; 559 565 560 566 if (pnum < pe->e->pnum) 561 567 p = p->rb_left; ··· 563 569 p = p->rb_right; 564 570 } 565 571 572 + return -ENODEV; 573 + 574 + found: 566 575 ubi_assert(pe->e->pnum == pnum); 567 576 rb_erase(&pe->rb_aec, &ubi->prot.aec); 568 577 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 569 578 kfree(pe); 579 + return 0; 570 580 } 571 581 572 582 /** ··· 742 744 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 743 745 int cancel) 744 746 { 745 - int err, put = 0; 747 + int err, put = 0, scrubbing = 0, protect = 0; 748 + struct ubi_wl_prot_entry *uninitialized_var(pe); 746 749 struct ubi_wl_entry *e1, *e2; 747 750 struct ubi_vid_hdr *vid_hdr; 748 751 ··· 756 757 if (!vid_hdr) 757 758 return -ENOMEM; 758 759 760 + mutex_lock(&ubi->move_mutex); 759 761 spin_lock(&ubi->wl_lock); 762 + ubi_assert(!ubi->move_from && !ubi->move_to); 763 + ubi_assert(!ubi->move_to_put); 760 764 761 - /* 762 - * Only one WL worker at a time is supported at this implementation, so 763 - * make sure a PEB is not being moved already. 764 - */ 765 - if (ubi->move_to || !ubi->free.rb_node || 765 + if (!ubi->free.rb_node || 766 766 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 767 767 /* 768 - * Only one WL worker at a time is supported at this 769 - * implementation, so if a LEB is already being moved, cancel. 770 - * 771 - * No free physical eraseblocks? Well, we cancel wear-leveling 772 - * then. It will be triggered again when a free physical 773 - * eraseblock appears. 768 + * No free physical eraseblocks? Well, they must be waiting in 769 + * the queue to be erased. Cancel movement - it will be 770 + * triggered again when a free physical eraseblock appears. 774 771 * 775 772 * No used physical eraseblocks? They must be temporarily 776 773 * protected from being moved. They will be moved to the ··· 775 780 */ 776 781 dbg_wl("cancel WL, a list is empty: free %d, used %d", 777 782 !ubi->free.rb_node, !ubi->used.rb_node); 778 - ubi->wl_scheduled = 0; 779 - spin_unlock(&ubi->wl_lock); 780 - ubi_free_vid_hdr(ubi, vid_hdr); 781 - return 0; 783 + goto out_cancel; 782 784 } 783 785 784 786 if (!ubi->scrub.rb_node) { ··· 790 798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 791 799 dbg_wl("no WL needed: min used EC %d, max free EC %d", 792 800 e1->ec, e2->ec); 793 - ubi->wl_scheduled = 0; 794 - spin_unlock(&ubi->wl_lock); 795 - ubi_free_vid_hdr(ubi, vid_hdr); 796 - return 0; 801 + goto out_cancel; 797 802 } 798 803 paranoid_check_in_wl_tree(e1, &ubi->used); 799 804 rb_erase(&e1->rb, &ubi->used); 800 805 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 801 806 e1->pnum, e1->ec, e2->pnum, e2->ec); 802 807 } else { 808 + /* Perform scrubbing */ 809 + scrubbing = 1; 803 810 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 804 811 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 805 812 paranoid_check_in_wl_tree(e1, &ubi->scrub); 806 - rb_erase(&e1->rb, &ubi->scrub); 813 + rb_erase(&e1->rb, &ubi->scrub); 807 814 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 808 815 } 809 816 810 817 paranoid_check_in_wl_tree(e2, &ubi->free); 811 818 rb_erase(&e2->rb, &ubi->free); 812 - ubi_assert(!ubi->move_from && !ubi->move_to); 813 - ubi_assert(!ubi->move_to_put && !ubi->move_from_put); 814 819 ubi->move_from = e1; 815 820 ubi->move_to = e2; 816 821 spin_unlock(&ubi->wl_lock); ··· 817 828 * We so far do not know which logical eraseblock our physical 818 829 * eraseblock (@e1) belongs to. We have to read the volume identifier 819 830 * header first. 831 + * 832 + * Note, we are protected from this PEB being unmapped and erased. The 833 + * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB 834 + * which is being moved was unmapped. 820 835 */ 821 836 822 837 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); ··· 835 842 * likely have the VID header in place. 836 843 */ 837 844 dbg_wl("PEB %d has no VID header", e1->pnum); 838 - err = 0; 839 - } else { 840 - ubi_err("error %d while reading VID header from PEB %d", 841 - err, e1->pnum); 842 - if (err > 0) 843 - err = -EIO; 845 + goto out_not_moved; 844 846 } 845 - goto error; 847 + 848 + ubi_err("error %d while reading VID header from PEB %d", 849 + err, e1->pnum); 850 + if (err > 0) 851 + err = -EIO; 852 + goto out_error; 846 853 } 847 854 848 855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 849 856 if (err) { 850 - if (err == UBI_IO_BITFLIPS) 851 - err = 0; 852 - goto error; 857 + 858 + if (err < 0) 859 + goto out_error; 860 + if (err == 1) 861 + goto out_not_moved; 862 + 863 + /* 864 + * For some reason the LEB was not moved - it might be because 865 + * the volume is being deleted. We should prevent this PEB from 866 + * being selected for wear-levelling movement for some "time", 867 + * so put it to the protection tree. 868 + */ 869 + 870 + dbg_wl("cancelled moving PEB %d", e1->pnum); 871 + pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 872 + if (!pe) { 873 + err = -ENOMEM; 874 + goto out_error; 875 + } 876 + 877 + protect = 1; 853 878 } 854 879 855 880 ubi_free_vid_hdr(ubi, vid_hdr); 856 881 spin_lock(&ubi->wl_lock); 882 + if (protect) 883 + prot_tree_add(ubi, e1, pe, protect); 857 884 if (!ubi->move_to_put) 858 885 wl_tree_add(e2, &ubi->used); 859 886 else 860 887 put = 1; 861 888 ubi->move_from = ubi->move_to = NULL; 862 - ubi->move_from_put = ubi->move_to_put = 0; 863 - ubi->wl_scheduled = 0; 889 + ubi->move_to_put = ubi->wl_scheduled = 0; 864 890 spin_unlock(&ubi->wl_lock); 865 891 866 892 if (put) { ··· 889 877 */ 890 878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 891 879 err = schedule_erase(ubi, e2, 0); 892 - if (err) { 893 - kmem_cache_free(wl_entries_slab, e2); 894 - ubi_ro_mode(ubi); 895 - } 880 + if (err) 881 + goto out_error; 896 882 } 897 883 898 - err = schedule_erase(ubi, e1, 0); 899 - if (err) { 900 - kmem_cache_free(wl_entries_slab, e1); 901 - ubi_ro_mode(ubi); 884 + if (!protect) { 885 + err = schedule_erase(ubi, e1, 0); 886 + if (err) 887 + goto out_error; 902 888 } 889 + 903 890 904 891 dbg_wl("done"); 905 - return err; 892 + mutex_unlock(&ubi->move_mutex); 893 + return 0; 906 894 907 895 /* 908 - * Some error occurred. @e1 was not changed, so return it back. @e2 909 - * might be changed, schedule it for erasure. 896 + * For some reasons the LEB was not moved, might be an error, might be 897 + * something else. @e1 was not changed, so return it back. @e2 might 898 + * be changed, schedule it for erasure. 910 899 */ 911 - error: 912 - if (err) 913 - dbg_wl("error %d occurred, cancel operation", err); 914 - ubi_assert(err <= 0); 915 - 900 + out_not_moved: 916 901 ubi_free_vid_hdr(ubi, vid_hdr); 917 902 spin_lock(&ubi->wl_lock); 918 - ubi->wl_scheduled = 0; 919 - if (ubi->move_from_put) 920 - put = 1; 903 + if (scrubbing) 904 + wl_tree_add(e1, &ubi->scrub); 921 905 else 922 906 wl_tree_add(e1, &ubi->used); 923 907 ubi->move_from = ubi->move_to = NULL; 924 - ubi->move_from_put = ubi->move_to_put = 0; 908 + ubi->move_to_put = ubi->wl_scheduled = 0; 925 909 spin_unlock(&ubi->wl_lock); 926 910 927 - if (put) { 928 - /* 929 - * Well, the target PEB was put meanwhile, schedule it for 930 - * erasure. 931 - */ 932 - dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); 933 - err = schedule_erase(ubi, e1, 0); 934 - if (err) { 935 - kmem_cache_free(wl_entries_slab, e1); 936 - ubi_ro_mode(ubi); 937 - } 938 - } 939 - 940 911 err = schedule_erase(ubi, e2, 0); 941 - if (err) { 942 - kmem_cache_free(wl_entries_slab, e2); 943 - ubi_ro_mode(ubi); 944 - } 912 + if (err) 913 + goto out_error; 945 914 946 - yield(); 915 + mutex_unlock(&ubi->move_mutex); 916 + return 0; 917 + 918 + out_error: 919 + ubi_err("error %d while moving PEB %d to PEB %d", 920 + err, e1->pnum, e2->pnum); 921 + 922 + ubi_free_vid_hdr(ubi, vid_hdr); 923 + spin_lock(&ubi->wl_lock); 924 + ubi->move_from = ubi->move_to = NULL; 925 + ubi->move_to_put = ubi->wl_scheduled = 0; 926 + spin_unlock(&ubi->wl_lock); 927 + 928 + kmem_cache_free(ubi_wl_entry_slab, e1); 929 + kmem_cache_free(ubi_wl_entry_slab, e2); 930 + ubi_ro_mode(ubi); 931 + 932 + mutex_unlock(&ubi->move_mutex); 947 933 return err; 934 + 935 + out_cancel: 936 + ubi->wl_scheduled = 0; 937 + spin_unlock(&ubi->wl_lock); 938 + mutex_unlock(&ubi->move_mutex); 939 + ubi_free_vid_hdr(ubi, vid_hdr); 940 + return 0; 948 941 } 949 942 950 943 /** ··· 1037 1020 if (cancel) { 1038 1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1039 1022 kfree(wl_wrk); 1040 - kmem_cache_free(wl_entries_slab, e); 1023 + kmem_cache_free(ubi_wl_entry_slab, e); 1041 1024 return 0; 1042 1025 } 1043 1026 ··· 1066 1049 1067 1050 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1068 1051 kfree(wl_wrk); 1069 - kmem_cache_free(wl_entries_slab, e); 1052 + kmem_cache_free(ubi_wl_entry_slab, e); 1070 1053 1071 1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1072 1055 err == -EBUSY) { ··· 1136 1119 } 1137 1120 1138 1121 /** 1139 - * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1140 - * unit. 1122 + * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. 1141 1123 * @ubi: UBI device description object 1142 1124 * @pnum: physical eraseblock to return 1143 1125 * @torture: if this physical eraseblock has to be tortured ··· 1144 1128 * This function is called to return physical eraseblock @pnum to the pool of 1145 1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1146 1130 * occurred to this @pnum and it has to be tested. This function returns zero 1147 - * in case of success and a negative error code in case of failure. 1131 + * in case of success, and a negative error code in case of failure. 1148 1132 */ 1149 1133 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1150 1134 { ··· 1155 1139 ubi_assert(pnum >= 0); 1156 1140 ubi_assert(pnum < ubi->peb_count); 1157 1141 1142 + retry: 1158 1143 spin_lock(&ubi->wl_lock); 1159 - 1160 1144 e = ubi->lookuptbl[pnum]; 1161 1145 if (e == ubi->move_from) { 1162 1146 /* ··· 1164 1148 * be moved. It will be scheduled for erasure in the 1165 1149 * wear-leveling worker. 1166 1150 */ 1167 - dbg_wl("PEB %d is being moved", pnum); 1168 - ubi_assert(!ubi->move_from_put); 1169 - ubi->move_from_put = 1; 1151 + dbg_wl("PEB %d is being moved, wait", pnum); 1170 1152 spin_unlock(&ubi->wl_lock); 1171 - return 0; 1153 + 1154 + /* Wait for the WL worker by taking the @ubi->move_mutex */ 1155 + mutex_lock(&ubi->move_mutex); 1156 + mutex_unlock(&ubi->move_mutex); 1157 + goto retry; 1172 1158 } else if (e == ubi->move_to) { 1173 1159 /* 1174 1160 * User is putting the physical eraseblock which was selected 1175 1161 * as the target the data is moved to. It may happen if the EBA 1176 - * unit already re-mapped the LEB but the WL unit did has not 1177 - * put the PEB to the "used" tree. 1162 + * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but 1163 + * the WL unit has not put the PEB to the "used" tree yet, but 1164 + * it is about to do this. So we just set a flag which will 1165 + * tell the WL worker that the PEB is not needed anymore and 1166 + * should be scheduled for erasure. 1178 1167 */ 1179 1168 dbg_wl("PEB %d is the target of data moving", pnum); 1180 1169 ubi_assert(!ubi->move_to_put); ··· 1193 1172 } else if (in_wl_tree(e, &ubi->scrub)) { 1194 1173 paranoid_check_in_wl_tree(e, &ubi->scrub); 1195 1174 rb_erase(&e->rb, &ubi->scrub); 1196 - } else 1197 - prot_tree_del(ubi, e->pnum); 1175 + } else { 1176 + err = prot_tree_del(ubi, e->pnum); 1177 + if (err) { 1178 + ubi_err("PEB %d not found", pnum); 1179 + ubi_ro_mode(ubi); 1180 + spin_unlock(&ubi->wl_lock); 1181 + return err; 1182 + } 1183 + } 1198 1184 } 1199 1185 spin_unlock(&ubi->wl_lock); 1200 1186 ··· 1255 1227 if (in_wl_tree(e, &ubi->used)) { 1256 1228 paranoid_check_in_wl_tree(e, &ubi->used); 1257 1229 rb_erase(&e->rb, &ubi->used); 1258 - } else 1259 - prot_tree_del(ubi, pnum); 1230 + } else { 1231 + int err; 1232 + 1233 + err = prot_tree_del(ubi, e->pnum); 1234 + if (err) { 1235 + ubi_err("PEB %d not found", pnum); 1236 + ubi_ro_mode(ubi); 1237 + spin_unlock(&ubi->wl_lock); 1238 + return err; 1239 + } 1240 + } 1260 1241 1261 1242 wl_tree_add(e, &ubi->scrub); 1262 1243 spin_unlock(&ubi->wl_lock); ··· 1286 1249 */ 1287 1250 int ubi_wl_flush(struct ubi_device *ubi) 1288 1251 { 1289 - int err, pending_count; 1290 - 1291 - pending_count = ubi->works_count; 1292 - 1293 - dbg_wl("flush (%d pending works)", pending_count); 1252 + int err; 1294 1253 1295 1254 /* 1296 1255 * Erase while the pending works queue is not empty, but not more then 1297 1256 * the number of currently pending works. 1298 1257 */ 1299 - while (pending_count-- > 0) { 1258 + dbg_wl("flush (%d pending works)", ubi->works_count); 1259 + while (ubi->works_count) { 1260 + err = do_work(ubi); 1261 + if (err) 1262 + return err; 1263 + } 1264 + 1265 + /* 1266 + * Make sure all the works which have been done in parallel are 1267 + * finished. 1268 + */ 1269 + down_write(&ubi->work_sem); 1270 + up_write(&ubi->work_sem); 1271 + 1272 + /* 1273 + * And in case last was the WL worker and it cancelled the LEB 1274 + * movement, flush again. 1275 + */ 1276 + while (ubi->works_count) { 1277 + dbg_wl("flush more (%d pending works)", ubi->works_count); 1300 1278 err = do_work(ubi); 1301 1279 if (err) 1302 1280 return err; ··· 1346 1294 rb->rb_right = NULL; 1347 1295 } 1348 1296 1349 - kmem_cache_free(wl_entries_slab, e); 1297 + kmem_cache_free(ubi_wl_entry_slab, e); 1350 1298 } 1351 1299 } 1352 1300 } ··· 1355 1303 * ubi_thread - UBI background thread. 1356 1304 * @u: the UBI device description object pointer 1357 1305 */ 1358 - static int ubi_thread(void *u) 1306 + int ubi_thread(void *u) 1359 1307 { 1360 1308 int failures = 0; 1361 1309 struct ubi_device *ubi = u; ··· 1446 1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1447 1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1448 1396 spin_lock_init(&ubi->wl_lock); 1397 + mutex_init(&ubi->move_mutex); 1398 + init_rwsem(&ubi->work_sem); 1449 1399 ubi->max_ec = si->max_ec; 1450 1400 INIT_LIST_HEAD(&ubi->works); 1451 1401 1452 1402 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1453 1403 1454 - ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 1455 - if (IS_ERR(ubi->bgt_thread)) { 1456 - err = PTR_ERR(ubi->bgt_thread); 1457 - ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 1458 - err); 1459 - return err; 1460 - } 1461 - 1462 - if (ubi_devices_cnt == 0) { 1463 - wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1464 - sizeof(struct ubi_wl_entry), 1465 - 0, 0, NULL); 1466 - if (!wl_entries_slab) 1467 - return -ENOMEM; 1468 - } 1469 - 1470 1404 err = -ENOMEM; 1471 1405 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1472 1406 if (!ubi->lookuptbl) 1473 - goto out_free; 1407 + return err; 1474 1408 1475 1409 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1476 1410 cond_resched(); 1477 1411 1478 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1412 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1479 1413 if (!e) 1480 1414 goto out_free; 1481 1415 ··· 1469 1431 e->ec = seb->ec; 1470 1432 ubi->lookuptbl[e->pnum] = e; 1471 1433 if (schedule_erase(ubi, e, 0)) { 1472 - kmem_cache_free(wl_entries_slab, e); 1434 + kmem_cache_free(ubi_wl_entry_slab, e); 1473 1435 goto out_free; 1474 1436 } 1475 1437 } ··· 1477 1439 list_for_each_entry(seb, &si->free, u.list) { 1478 1440 cond_resched(); 1479 1441 1480 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1442 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1481 1443 if (!e) 1482 1444 goto out_free; 1483 1445 ··· 1491 1453 list_for_each_entry(seb, &si->corr, u.list) { 1492 1454 cond_resched(); 1493 1455 1494 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1456 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1495 1457 if (!e) 1496 1458 goto out_free; 1497 1459 ··· 1499 1461 e->ec = seb->ec; 1500 1462 ubi->lookuptbl[e->pnum] = e; 1501 1463 if (schedule_erase(ubi, e, 0)) { 1502 - kmem_cache_free(wl_entries_slab, e); 1464 + kmem_cache_free(ubi_wl_entry_slab, e); 1503 1465 goto out_free; 1504 1466 } 1505 1467 } ··· 1508 1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1509 1471 cond_resched(); 1510 1472 1511 - e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1473 + e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1512 1474 if (!e) 1513 1475 goto out_free; 1514 1476 ··· 1548 1510 tree_destroy(&ubi->free); 1549 1511 tree_destroy(&ubi->scrub); 1550 1512 kfree(ubi->lookuptbl); 1551 - if (ubi_devices_cnt == 0) 1552 - kmem_cache_destroy(wl_entries_slab); 1553 1513 return err; 1554 1514 } 1555 1515 ··· 1577 1541 rb->rb_right = NULL; 1578 1542 } 1579 1543 1580 - kmem_cache_free(wl_entries_slab, pe->e); 1544 + kmem_cache_free(ubi_wl_entry_slab, pe->e); 1581 1545 kfree(pe); 1582 1546 } 1583 1547 } ··· 1589 1553 */ 1590 1554 void ubi_wl_close(struct ubi_device *ubi) 1591 1555 { 1592 - dbg_wl("disable \"%s\"", ubi->bgt_name); 1593 - if (ubi->bgt_thread) 1594 - kthread_stop(ubi->bgt_thread); 1595 - 1596 1556 dbg_wl("close the UBI wear-leveling unit"); 1597 1557 1598 1558 cancel_pending(ubi); ··· 1597 1565 tree_destroy(&ubi->free); 1598 1566 tree_destroy(&ubi->scrub); 1599 1567 kfree(ubi->lookuptbl); 1600 - if (ubi_devices_cnt == 1) 1601 - kmem_cache_destroy(wl_entries_slab); 1602 1568 } 1603 1569 1604 1570 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
+4 -2
fs/jffs2/acl.c
··· 176 176 spin_unlock(&inode->i_lock); 177 177 } 178 178 179 - struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 179 + static struct posix_acl *jffs2_get_acl(struct inode *inode, int type) 180 180 { 181 181 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 182 182 struct posix_acl *acl; ··· 345 345 if (!clone) 346 346 return -ENOMEM; 347 347 rc = posix_acl_create_masq(clone, (mode_t *)i_mode); 348 - if (rc < 0) 348 + if (rc < 0) { 349 + posix_acl_release(clone); 349 350 return rc; 351 + } 350 352 if (rc > 0) 351 353 jffs2_iset_acl(inode, &f->i_acl_access, clone); 352 354
-2
fs/jffs2/acl.h
··· 28 28 29 29 #define JFFS2_ACL_NOT_CACHED ((void *)-1) 30 30 31 - extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type); 32 31 extern int jffs2_permission(struct inode *, int, struct nameidata *); 33 32 extern int jffs2_acl_chmod(struct inode *); 34 33 extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); ··· 39 40 40 41 #else 41 42 42 - #define jffs2_get_acl(inode, type) (NULL) 43 43 #define jffs2_permission (NULL) 44 44 #define jffs2_acl_chmod(inode) (0) 45 45 #define jffs2_init_acl_pre(dir_i,inode,mode) (0)
+1 -5
fs/jffs2/fs.c
··· 97 97 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 98 98 99 99 if (ivalid & ATTR_MODE) 100 - if (iattr->ia_mode & S_ISGID && 101 - !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) 102 - ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); 103 - else 104 - ri->mode = cpu_to_jemode(iattr->ia_mode); 100 + ri->mode = cpu_to_jemode(iattr->ia_mode); 105 101 else 106 102 ri->mode = cpu_to_jemode(inode->i_mode); 107 103
+6 -3
fs/jffs2/nodelist.c
··· 32 32 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { 33 33 /* Duplicate. Free one */ 34 34 if (new->version < (*prev)->version) { 35 - dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n", 35 + dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", 36 36 (*prev)->name, (*prev)->ino); 37 37 jffs2_mark_node_obsolete(c, new->raw); 38 38 jffs2_free_full_dirent(new); 39 39 } else { 40 - dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n", 40 + dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", 41 41 (*prev)->name, (*prev)->ino); 42 42 new->next = (*prev)->next; 43 - jffs2_mark_node_obsolete(c, ((*prev)->raw)); 43 + /* It may have been a 'placeholder' deletion dirent, 44 + if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ 45 + if ((*prev)->raw) 46 + jffs2_mark_node_obsolete(c, ((*prev)->raw)); 44 47 jffs2_free_full_dirent(*prev); 45 48 *prev = new; 46 49 }
+15 -14
fs/jffs2/readinode.c
··· 37 37 38 38 BUG_ON(tn->csize == 0); 39 39 40 - if (!jffs2_is_writebuffered(c)) 41 - goto adj_acc; 42 - 43 40 /* Calculate how many bytes were already checked */ 44 41 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); 45 - len = ofs % c->wbuf_pagesize; 46 - if (likely(len)) 47 - len = c->wbuf_pagesize - len; 42 + len = tn->csize; 48 43 49 - if (len >= tn->csize) { 50 - dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", 51 - ref_offset(ref), tn->csize, ofs); 52 - goto adj_acc; 44 + if (jffs2_is_writebuffered(c)) { 45 + int adj = ofs % c->wbuf_pagesize; 46 + if (likely(adj)) 47 + adj = c->wbuf_pagesize - adj; 48 + 49 + if (adj >= tn->csize) { 50 + dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", 51 + ref_offset(ref), tn->csize, ofs); 52 + goto adj_acc; 53 + } 54 + 55 + ofs += adj; 56 + len -= adj; 53 57 } 54 - 55 - ofs += len; 56 - len = tn->csize - len; 57 58 58 59 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", 59 60 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); ··· 64 63 * adding and jffs2_flash_read_end() interface. */ 65 64 if (c->mtd->point) { 66 65 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 67 - if (!err && retlen < tn->csize) { 66 + if (!err && retlen < len) { 68 67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); 69 68 c->mtd->unpoint(c->mtd, buffer, ofs, retlen); 70 69 } else if (err)
+15 -13
fs/jffs2/write.c
··· 582 582 jffs2_add_fd_to_list(c, fd, &dir_f->dents); 583 583 up(&dir_f->sem); 584 584 } else { 585 - struct jffs2_full_dirent **prev = &dir_f->dents; 585 + struct jffs2_full_dirent *fd = dir_f->dents; 586 586 uint32_t nhash = full_name_hash(name, namelen); 587 587 588 588 /* We don't actually want to reserve any space, but we do ··· 590 590 down(&c->alloc_sem); 591 591 down(&dir_f->sem); 592 592 593 - while ((*prev) && (*prev)->nhash <= nhash) { 594 - if ((*prev)->nhash == nhash && 595 - !memcmp((*prev)->name, name, namelen) && 596 - !(*prev)->name[namelen]) { 597 - struct jffs2_full_dirent *this = *prev; 593 + for (fd = dir_f->dents; fd; fd = fd->next) { 594 + if (fd->nhash == nhash && 595 + !memcmp(fd->name, name, namelen) && 596 + !fd->name[namelen]) { 598 597 599 598 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", 600 - this->ino, ref_offset(this->raw))); 601 - 602 - *prev = this->next; 603 - jffs2_mark_node_obsolete(c, (this->raw)); 604 - jffs2_free_full_dirent(this); 599 + fd->ino, ref_offset(fd->raw))); 600 + jffs2_mark_node_obsolete(c, fd->raw); 601 + /* We don't want to remove it from the list immediately, 602 + because that screws up getdents()/seek() semantics even 603 + more than they're screwed already. Turn it into a 604 + node-less deletion dirent instead -- a placeholder */ 605 + fd->raw = NULL; 606 + fd->ino = 0; 605 607 break; 606 608 } 607 - prev = &((*prev)->next); 608 609 } 609 610 up(&dir_f->sem); 610 611 } ··· 631 630 D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", 632 631 fd->name, dead_f->inocache->ino)); 633 632 } 634 - jffs2_mark_node_obsolete(c, fd->raw); 633 + if (fd->raw) 634 + jffs2_mark_node_obsolete(c, fd->raw); 635 635 jffs2_free_full_dirent(fd); 636 636 } 637 637 }
+12
include/linux/mtd/cfi.h
··· 98 98 #define CFI_DEVICETYPE_X32 (32 / 8) 99 99 #define CFI_DEVICETYPE_X64 (64 / 8) 100 100 101 + 102 + /* Device Interface Code Assignments from the "Common Flash Memory Interface 103 + * Publication 100" dated December 1, 2001. 104 + */ 105 + #define CFI_INTERFACE_X8_ASYNC 0x0000 106 + #define CFI_INTERFACE_X16_ASYNC 0x0001 107 + #define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 108 + #define CFI_INTERFACE_X32_ASYNC 0x0003 109 + #define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 110 + #define CFI_INTERFACE_NOT_ALLOWED 0xffff 111 + 112 + 101 113 /* NB: We keep these structures in memory in HOST byteorder, except 102 114 * where individually noted. 103 115 */
+9
include/linux/mtd/mtd.h
··· 152 152 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 153 153 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 154 154 155 + /* In blackbox flight recorder like scenarios we want to make successful 156 + writes in interrupt context. panic_write() is only intended to be 157 + called when its known the kernel is about to panic and we need the 158 + write to succeed. Since the kernel is not going to be running for much 159 + longer, this function can break locks and delay to ensure the write 160 + succeeds (but not sleep). */ 161 + 162 + int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 163 + 155 164 int (*read_oob) (struct mtd_info *mtd, loff_t from, 156 165 struct mtd_oob_ops *ops); 157 166 int (*write_oob) (struct mtd_info *mtd, loff_t to,
+8
include/linux/mtd/mtdram.h
··· 1 + #ifndef __MTD_MTDRAM_H__ 2 + #define __MTD_MTDRAM_H__ 3 + 4 + #include <linux/mtd/mtd.h> 5 + int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, 6 + unsigned long size, char *name); 7 + 8 + #endif /* __MTD_MTDRAM_H__ */
+1
include/linux/mtd/onenand_regs.h
··· 67 67 /* 68 68 * Device ID Register F001h (R) 69 69 */ 70 + #define ONENAND_DEVICE_DENSITY_MASK (0xf) 70 71 #define ONENAND_DEVICE_DENSITY_SHIFT (4) 71 72 #define ONENAND_DEVICE_IS_DDP (1 << 3) 72 73 #define ONENAND_DEVICE_IS_DEMUX (1 << 2)
+8 -1
include/linux/mtd/partitions.h
··· 71 71 72 72 #define put_partition_parser(p) do { module_put((p)->owner); } while(0) 73 73 74 - #endif 74 + struct device; 75 + struct device_node; 75 76 77 + int __devinit of_mtd_parse_partitions(struct device *dev, 78 + struct mtd_info *mtd, 79 + struct device_node *node, 80 + struct mtd_partition **pparts); 81 + 82 + #endif
+1 -17
include/linux/mtd/ubi.h
··· 26 26 #include <mtd/ubi-user.h> 27 27 28 28 /* 29 - * UBI data type hint constants. 30 - * 31 - * UBI_LONGTERM: long-term data 32 - * UBI_SHORTTERM: short-term data 33 - * UBI_UNKNOWN: data persistence is unknown 34 - * 35 - * These constants are used when data is written to UBI volumes in order to 36 - * help the UBI wear-leveling unit to find more appropriate physical 37 - * eraseblocks. 38 - */ 39 - enum { 40 - UBI_LONGTERM = 1, 41 - UBI_SHORTTERM, 42 - UBI_UNKNOWN 43 - }; 44 - 45 - /* 46 29 * enum ubi_open_mode - UBI volume open mode constants. 47 30 * 48 31 * UBI_READONLY: read-only mode ··· 150 167 int len, int dtype); 151 168 int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); 152 169 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 170 + int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); 153 171 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 154 172 155 173 /*
+1 -1
include/mtd/mtd-abi.h
··· 29 29 #define MTD_WRITEABLE 0x400 /* Device is writeable */ 30 30 #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ 31 31 #define MTD_NO_ERASE 0x1000 /* No erase necessary */ 32 - #define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */ 32 + #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ 33 33 34 34 // Some common devices / combinations of capabilities 35 35 #define MTD_CAP_ROM 0
+44 -3
include/mtd/ubi-header.h
··· 58 58 }; 59 59 60 60 /* 61 + * Volume flags used in the volume table record. 62 + * 63 + * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume 64 + * 65 + * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume 66 + * table. UBI automatically re-sizes the volume which has this flag and makes 67 + * the volume to be of largest possible size. This means that if after the 68 + * initialization UBI finds out that there are available physical eraseblocks 69 + * present on the device, it automatically appends all of them to the volume 70 + * (the physical eraseblocks reserved for bad eraseblocks handling and other 71 + * reserved physical eraseblocks are not taken). So, if there is a volume with 72 + * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical 73 + * eraseblocks will be zero after UBI is loaded, because all of them will be 74 + * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared 75 + * after the volume had been initialized. 76 + * 77 + * The auto-resize feature is useful for device production purposes. For 78 + * example, different NAND flash chips may have different amount of initial bad 79 + * eraseblocks, depending of particular chip instance. Manufacturers of NAND 80 + * chips usually guarantee that the amount of initial bad eraseblocks does not 81 + * exceed certain percent, e.g. 2%. When one creates an UBI image which will be 82 + * flashed to the end devices in production, he does not know the exact amount 83 + * of good physical eraseblocks the NAND chip on the device will have, but this 84 + * number is required to calculate the volume sized and put them to the volume 85 + * table of the UBI image. In this case, one of the volumes (e.g., the one 86 + * which will store the root file system) is marked as "auto-resizable", and 87 + * UBI will adjust its size on the first boot if needed. 88 + * 89 + * Note, first UBI reserves some amount of physical eraseblocks for bad 90 + * eraseblock handling, and then re-sizes the volume, not vice-versa. This 91 + * means that the pool of reserved physical eraseblocks will always be present. 92 + */ 93 + enum { 94 + UBI_VTBL_AUTORESIZE_FLG = 0x01, 95 + }; 96 + 97 + /* 61 98 * Compatibility constants used by internal volumes. 62 99 * 63 100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written ··· 299 262 300 263 /* The layout volume contains the volume table */ 301 264 302 - #define UBI_LAYOUT_VOL_ID UBI_INTERNAL_VOL_START 265 + #define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START 266 + #define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC 267 + #define UBI_LAYOUT_VOLUME_ALIGN 1 303 268 #define UBI_LAYOUT_VOLUME_EBS 2 304 269 #define UBI_LAYOUT_VOLUME_NAME "layout volume" 305 270 #define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT ··· 328 289 * @upd_marker: if volume update was started but not finished 329 290 * @name_len: volume name length 330 291 * @name: the volume name 331 - * @padding2: reserved, zeroes 292 + * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG) 293 + * @padding: reserved, zeroes 332 294 * @crc: a CRC32 checksum of the record 333 295 * 334 296 * The volume table records are stored in the volume table, which is stored in ··· 364 324 __u8 upd_marker; 365 325 __be16 name_len; 366 326 __u8 name[UBI_VOL_NAME_MAX+1]; 367 - __u8 padding2[24]; 327 + __u8 flags; 328 + __u8 padding[23]; 368 329 __be32 crc; 369 330 } __attribute__ ((packed)); 370 331
+117 -10
include/mtd/ubi-user.h
··· 22 22 #define __UBI_USER_H__ 23 23 24 24 /* 25 + * UBI device creation (the same as MTD device attachment) 26 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 + * 28 + * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI 29 + * control device. The caller has to properly fill and pass 30 + * &struct ubi_attach_req object - UBI will attach the MTD device specified in 31 + * the request and return the newly created UBI device number as the ioctl 32 + * return value. 33 + * 34 + * UBI device deletion (the same as MTD device detachment) 35 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 36 + * 37 + * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI 38 + * control device. 39 + * 25 40 * UBI volume creation 26 41 * ~~~~~~~~~~~~~~~~~~~ 27 42 * ··· 63 48 * 64 49 * Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the 65 50 * corresponding UBI volume character device. A pointer to a 64-bit update 66 - * size should be passed to the IOCTL. After then, UBI expects user to write 51 + * size should be passed to the IOCTL. After this, UBI expects user to write 67 52 * this number of bytes to the volume character device. The update is finished 68 53 * when the claimed number of bytes is passed. So, the volume update sequence 69 54 * is something like: ··· 72 57 * ioctl(fd, UBI_IOCVOLUP, &image_size); 73 58 * write(fd, buf, image_size); 74 59 * close(fd); 60 + * 61 + * Atomic eraseblock change 62 + * ~~~~~~~~~~~~~~~~~~~~~~~~ 63 + * 64 + * Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL 65 + * command of the corresponding UBI volume character device. A pointer to 66 + * &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is 67 + * expected to write the requested amount of bytes. This is similar to the 68 + * "volume update" IOCTL. 75 69 */ 76 70 77 71 /* 78 - * When a new volume is created, users may either specify the volume number they 79 - * want to create or to let UBI automatically assign a volume number using this 80 - * constant. 72 + * When a new UBI volume or UBI device is created, users may either specify the 73 + * volume/device number they want to create or to let UBI automatically assign 74 + * the number using these constants. 81 75 */ 82 76 #define UBI_VOL_NUM_AUTO (-1) 77 + #define UBI_DEV_NUM_AUTO (-1) 83 78 84 79 /* Maximum volume name length */ 85 80 #define UBI_MAX_VOLUME_NAME 127 ··· 105 80 /* Re-size an UBI volume */ 106 81 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 107 82 83 + /* IOCTL commands of the UBI control character device */ 84 + 85 + #define UBI_CTRL_IOC_MAGIC 'o' 86 + 87 + /* Attach an MTD device */ 88 + #define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req) 89 + /* Detach an MTD device */ 90 + #define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t) 91 + 108 92 /* IOCTL commands of UBI volume character devices */ 109 93 110 94 #define UBI_VOL_IOC_MAGIC 'O' ··· 122 88 #define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t) 123 89 /* An eraseblock erasure command, used for debugging, disabled by default */ 124 90 #define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t) 91 + /* An atomic eraseblock change command */ 92 + #define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t) 93 + 94 + /* Maximum MTD device name length supported by UBI */ 95 + #define MAX_UBI_MTD_NAME_LEN 127 96 + 97 + /* 98 + * UBI data type hint constants. 99 + * 100 + * UBI_LONGTERM: long-term data 101 + * UBI_SHORTTERM: short-term data 102 + * UBI_UNKNOWN: data persistence is unknown 103 + * 104 + * These constants are used when data is written to UBI volumes in order to 105 + * help the UBI wear-leveling unit to find more appropriate physical 106 + * eraseblocks. 107 + */ 108 + enum { 109 + UBI_LONGTERM = 1, 110 + UBI_SHORTTERM = 2, 111 + UBI_UNKNOWN = 3, 112 + }; 125 113 126 114 /* 127 115 * UBI volume type constants. ··· 153 97 */ 154 98 enum { 155 99 UBI_DYNAMIC_VOLUME = 3, 156 - UBI_STATIC_VOLUME = 4 100 + UBI_STATIC_VOLUME = 4, 101 + }; 102 + 103 + /** 104 + * struct ubi_attach_req - attach MTD device request. 105 + * @ubi_num: UBI device number to create 106 + * @mtd_num: MTD device number to attach 107 + * @vid_hdr_offset: VID header offset (use defaults if %0) 108 + * @padding: reserved for future, not used, has to be zeroed 109 + * 110 + * This data structure is used to specify MTD device UBI has to attach and the 111 + * parameters it has to use. The number which should be assigned to the new UBI 112 + * device is passed in @ubi_num. UBI may automatically assign the number if 113 + * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in 114 + * @ubi_num. 115 + * 116 + * Most applications should pass %0 in @vid_hdr_offset to make UBI use default 117 + * offset of the VID header within physical eraseblocks. The default offset is 118 + * the next min. I/O unit after the EC header. For example, it will be offset 119 + * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or 120 + * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 121 + * 122 + * But in rare cases, if this optimizes things, the VID header may be placed to 123 + * a different offset. For example, the boot-loader might do things faster if the 124 + * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As 125 + * the boot-loader would not normally need to read EC headers (unless it needs 126 + * UBI in RW mode), it might be faster to calculate ECC. This is weird example, 127 + * but it real-life example. So, in this example, @vid_hdr_offer would be 128 + * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 129 + * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page 130 + * of the first page and add needed padding. 131 + */ 132 + struct ubi_attach_req { 133 + int32_t ubi_num; 134 + int32_t mtd_num; 135 + int32_t vid_hdr_offset; 136 + uint8_t padding[12]; 157 137 }; 158 138 159 139 /** 160 140 * struct ubi_mkvol_req - volume description data structure used in 161 - * volume creation requests. 141 + * volume creation requests. 162 142 * @vol_id: volume number 163 143 * @alignment: volume alignment 164 144 * @bytes: volume size in bytes 165 145 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 166 - * @padding1: reserved for future, not used 146 + * @padding1: reserved for future, not used, has to be zeroed 167 147 * @name_len: volume name length 168 - * @padding2: reserved for future, not used 148 + * @padding2: reserved for future, not used, has to be zeroed 169 149 * @name: volume name 170 150 * 171 - * This structure is used by userspace programs when creating new volumes. The 151 + * This structure is used by user-space programs when creating new volumes. The 172 152 * @used_bytes field is only necessary when creating static volumes. 173 153 * 174 154 * The @alignment field specifies the required alignment of the volume logical ··· 231 139 int8_t padding1; 232 140 int16_t name_len; 233 141 int8_t padding2[4]; 234 - char name[UBI_MAX_VOLUME_NAME+1]; 142 + char name[UBI_MAX_VOLUME_NAME + 1]; 235 143 } __attribute__ ((packed)); 236 144 237 145 /** ··· 248 156 struct ubi_rsvol_req { 249 157 int64_t bytes; 250 158 int32_t vol_id; 159 + } __attribute__ ((packed)); 160 + 161 + /** 162 + * struct ubi_leb_change_req - a data structure used in atomic logical 163 + * eraseblock change requests. 164 + * @lnum: logical eraseblock number to change 165 + * @bytes: how many bytes will be written to the logical eraseblock 166 + * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) 167 + * @padding: reserved for future, not used, has to be zeroed 168 + */ 169 + struct ubi_leb_change_req { 170 + int32_t lnum; 171 + int32_t bytes; 172 + uint8_t dtype; 173 + uint8_t padding[7]; 251 174 } __attribute__ ((packed)); 252 175 253 176 #endif /* __UBI_USER_H__ */