Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: Stop assuming mtd_erase() is asynchronous

None of the mtd->_erase() implementations work in an asynchronous manner,
so let's simplify MTD users that call mtd_erase(). All they need to do
is check the value returned by mtd_erase() and assume that != 0 means
failure.

Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: Richard Weinberger <richard@nod.at>

+51 -369
-3
drivers/mtd/devices/bcm47xxsflash.c
··· 95 95 else 96 96 erase->state = MTD_ERASE_DONE; 97 97 98 - if (erase->callback) 99 - erase->callback(erase); 100 - 101 98 return err; 102 99 } 103 100
+8 -43
drivers/mtd/ftl.c
··· 140 140 #define XFER_PREPARED 0x03 141 141 #define XFER_FAILED 0x04 142 142 143 - /*====================================================================*/ 144 - 145 - 146 - static void ftl_erase_callback(struct erase_info *done); 147 - 148 - 149 143 /*====================================================================== 150 144 151 145 Scan_header() checks to see if a memory region contains an FTL ··· 343 349 return -ENOMEM; 344 350 345 351 erase->mtd = part->mbd.mtd; 346 - erase->callback = ftl_erase_callback; 347 352 erase->addr = xfer->Offset; 348 353 erase->len = 1 << part->header.EraseUnitSize; 349 - erase->priv = (u_long)part; 350 354 351 355 ret = mtd_erase(part->mbd.mtd, erase); 356 + if (!ret) { 357 + xfer->state = XFER_ERASED; 358 + xfer->EraseCount++; 359 + } else { 360 + xfer->state = XFER_FAILED; 361 + pr_notice("ftl_cs: erase failed: err = %d\n", ret); 362 + } 352 363 353 - if (!ret) 354 - xfer->EraseCount++; 355 - else 356 - kfree(erase); 364 + kfree(erase); 357 365 358 366 return ret; 359 367 } /* erase_xfer */ ··· 366 370 it an appropriate header. 367 371 368 372 ======================================================================*/ 369 - 370 - static void ftl_erase_callback(struct erase_info *erase) 371 - { 372 - partition_t *part; 373 - struct xfer_info_t *xfer; 374 - int i; 375 - 376 - /* Look up the transfer unit */ 377 - part = (partition_t *)(erase->priv); 378 - 379 - for (i = 0; i < part->header.NumTransferUnits; i++) 380 - if (part->XferInfo[i].Offset == erase->addr) break; 381 - 382 - if (i == part->header.NumTransferUnits) { 383 - printk(KERN_NOTICE "ftl_cs: internal error: " 384 - "erase lookup failed!\n"); 385 - return; 386 - } 387 - 388 - xfer = &part->XferInfo[i]; 389 - if (erase->state == MTD_ERASE_DONE) 390 - xfer->state = XFER_ERASED; 391 - else { 392 - xfer->state = XFER_FAILED; 393 - printk(KERN_NOTICE "ftl_cs: erase failed: state = %d\n", 394 - erase->state); 395 - } 396 - 397 - kfree(erase); 398 - 399 - } /* ftl_erase_callback */ 400 373 401 374 static int prepare_xfer(partition_t *part, int i) 402 375 {
+3 -2
drivers/mtd/inftlmount.c
··· 393 393 mark only the failed block in the bbt. */ 394 394 for (physblock = 0; physblock < inftl->EraseSize; 395 395 physblock += instr->len, instr->addr += instr->len) { 396 - mtd_erase(inftl->mbd.mtd, instr); 396 + int ret; 397 397 398 - if (instr->state == MTD_ERASE_FAILED) { 398 + ret = mtd_erase(inftl->mbd.mtd, instr); 399 + if (ret) { 399 400 printk(KERN_WARNING "INFTL: error while formatting block %d\n", 400 401 block); 401 402 goto fail;
-20
drivers/mtd/mtdblock.c
··· 55 55 * being written to until a different sector is required. 56 56 */ 57 57 58 - static void erase_callback(struct erase_info *done) 59 - { 60 - wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; 61 - wake_up(wait_q); 62 - } 63 - 64 58 static int erase_write (struct mtd_info *mtd, unsigned long pos, 65 59 int len, const char *buf) 66 60 { 67 61 struct erase_info erase; 68 - DECLARE_WAITQUEUE(wait, current); 69 - wait_queue_head_t wait_q; 70 62 size_t retlen; 71 63 int ret; 72 64 73 65 /* 74 66 * First, let's erase the flash block. 75 67 */ 76 - 77 - init_waitqueue_head(&wait_q); 78 68 erase.mtd = mtd; 79 - erase.callback = erase_callback; 80 69 erase.addr = pos; 81 70 erase.len = len; 82 - erase.priv = (u_long)&wait_q; 83 - 84 - set_current_state(TASK_INTERRUPTIBLE); 85 - add_wait_queue(&wait_q, &wait); 86 71 87 72 ret = mtd_erase(mtd, &erase); 88 73 if (ret) { 89 - set_current_state(TASK_RUNNING); 90 - remove_wait_queue(&wait_q, &wait); 91 74 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] " 92 75 "on \"%s\" failed\n", 93 76 pos, len, mtd->name); 94 77 return ret; 95 78 } 96 - 97 - schedule(); /* Wait for erase to finish. */ 98 - remove_wait_queue(&wait_q, &wait); 99 79 100 80 /* 101 81 * Next, write the data to flash.
-31
drivers/mtd/mtdchar.c
··· 324 324 IOCTL calls for getting device parameters. 325 325 326 326 ======================================================================*/ 327 - static void mtdchar_erase_callback (struct erase_info *instr) 328 - { 329 - wake_up((wait_queue_head_t *)instr->priv); 330 - } 331 327 332 328 static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 333 329 { ··· 705 709 if (!erase) 706 710 ret = -ENOMEM; 707 711 else { 708 - wait_queue_head_t waitq; 709 - DECLARE_WAITQUEUE(wait, current); 710 - 711 - init_waitqueue_head(&waitq); 712 - 713 712 if (cmd == MEMERASE64) { 714 713 struct erase_info_user64 einfo64; 715 714 ··· 727 736 erase->len = einfo32.length; 728 737 } 729 738 erase->mtd = mtd; 730 - erase->callback = mtdchar_erase_callback; 731 - erase->priv = (unsigned long)&waitq; 732 739 733 - /* 734 - FIXME: Allow INTERRUPTIBLE. Which means 735 - not having the wait_queue head on the stack. 736 - 737 - If the wq_head is on the stack, and we 738 - leave because we got interrupted, then the 739 - wq_head is no longer there when the 740 - callback routine tries to wake us up. 741 - */ 742 740 ret = mtd_erase(mtd, erase); 743 - if (!ret) { 744 - set_current_state(TASK_UNINTERRUPTIBLE); 745 - add_wait_queue(&waitq, &wait); 746 - if (erase->state != MTD_ERASE_DONE && 747 - erase->state != MTD_ERASE_FAILED) 748 - schedule(); 749 - remove_wait_queue(&waitq, &wait); 750 - set_current_state(TASK_RUNNING); 751 - 752 - ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 753 - } 754 741 kfree(erase); 755 742 } 756 743 break;
+3 -45
drivers/mtd/mtdconcat.c
··· 333 333 return -EINVAL; 334 334 } 335 335 336 - static void concat_erase_callback(struct erase_info *instr) 337 - { 338 - wake_up((wait_queue_head_t *) instr->priv); 339 - } 340 - 341 - static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) 342 - { 343 - int err; 344 - wait_queue_head_t waitq; 345 - DECLARE_WAITQUEUE(wait, current); 346 - 347 - /* 348 - * This code was stol^H^H^H^Hinspired by mtdchar.c 349 - */ 350 - init_waitqueue_head(&waitq); 351 - 352 - erase->mtd = mtd; 353 - erase->callback = concat_erase_callback; 354 - erase->priv = (unsigned long) &waitq; 355 - 356 - /* 357 - * FIXME: Allow INTERRUPTIBLE. Which means 358 - * not having the wait_queue head on the stack. 359 - */ 360 - err = mtd_erase(mtd, erase); 361 - if (!err) { 362 - set_current_state(TASK_UNINTERRUPTIBLE); 363 - add_wait_queue(&waitq, &wait); 364 - if (erase->state != MTD_ERASE_DONE 365 - && erase->state != MTD_ERASE_FAILED) 366 - schedule(); 367 - remove_wait_queue(&waitq, &wait); 368 - set_current_state(TASK_RUNNING); 369 - 370 - err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; 371 - } 372 - return err; 373 - } 374 - 375 336 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) 376 337 { 377 338 struct mtd_concat *concat = CONCAT(mtd); ··· 427 466 erase->len = length; 428 467 429 468 length -= erase->len; 430 - if ((err = concat_dev_erase(subdev, erase))) { 469 + erase->mtd = subdev; 470 + if ((err = mtd_erase(subdev, erase))) { 431 471 /* sanity check: should never happen since 432 472 * block alignment has been checked above */ 433 473 BUG_ON(err == -EINVAL); ··· 449 487 } 450 488 instr->state = erase->state; 451 489 kfree(erase); 452 - if (err) 453 - return err; 454 490 455 - if (instr->callback) 456 - instr->callback(instr); 457 - return 0; 491 + return err; 458 492 } 459 493 460 494 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+3 -5
drivers/mtd/mtdcore.c
··· 945 945 EXPORT_SYMBOL_GPL(__put_mtd_device); 946 946 947 947 /* 948 - * Erase is an asynchronous operation. Device drivers are supposed 949 - * to call instr->callback() whenever the operation completes, even 950 - * if it completes with a failure. 951 - * Callers are supposed to pass a callback function and wait for it 952 - * to be called before writing to the block. 948 + * Erase is an synchronous operation. Device drivers are epected to return a 949 + * negative error code if the operation failed and update instr->fail_addr 950 + * to point the portion that was not properly erased. 953 951 */ 954 952 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) 955 953 {
-19
drivers/mtd/mtdoops.c
··· 84 84 return test_bit(page, cxt->oops_page_used); 85 85 } 86 86 87 - static void mtdoops_erase_callback(struct erase_info *done) 88 - { 89 - wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; 90 - wake_up(wait_q); 91 - } 92 - 93 87 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset) 94 88 { 95 89 struct mtd_info *mtd = cxt->mtd; ··· 91 97 u32 start_page = start_page_offset / record_size; 92 98 u32 erase_pages = mtd->erasesize / record_size; 93 99 struct erase_info erase; 94 - DECLARE_WAITQUEUE(wait, current); 95 - wait_queue_head_t wait_q; 96 100 int ret; 97 101 int page; 98 102 99 - init_waitqueue_head(&wait_q); 100 103 erase.mtd = mtd; 101 - erase.callback = mtdoops_erase_callback; 102 104 erase.addr = offset; 103 105 erase.len = mtd->erasesize; 104 - erase.priv = (u_long)&wait_q; 105 - 106 - set_current_state(TASK_INTERRUPTIBLE); 107 - add_wait_queue(&wait_q, &wait); 108 106 109 107 ret = mtd_erase(mtd, &erase); 110 108 if (ret) { 111 - set_current_state(TASK_RUNNING); 112 - remove_wait_queue(&wait_q, &wait); 113 109 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", 114 110 (unsigned long long)erase.addr, 115 111 (unsigned long long)erase.len, mtddev); 116 112 return ret; 117 113 } 118 - 119 - schedule(); /* Wait for erase to finish. */ 120 - remove_wait_queue(&wait_q, &wait); 121 114 122 115 /* Mark pages as unused */ 123 116 for (page = start_page; page < start_page + erase_pages; page++)
-2
drivers/mtd/mtdpart.c
··· 222 222 instr->fail_addr -= part->offset; 223 223 instr->addr -= part->offset; 224 224 } 225 - if (instr->callback) 226 - instr->callback(instr); 227 225 } 228 226 EXPORT_SYMBOL_GPL(mtd_erase_callback); 229 227
-32
drivers/mtd/mtdswap.c
··· 536 536 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); 537 537 } 538 538 539 - 540 - static void mtdswap_erase_callback(struct erase_info *done) 541 - { 542 - wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; 543 - wake_up(wait_q); 544 - } 545 - 546 539 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) 547 540 { 548 541 struct mtd_info *mtd = d->mtd; 549 542 struct erase_info erase; 550 - wait_queue_head_t wq; 551 543 unsigned int retries = 0; 552 544 int ret; 553 545 ··· 548 556 d->max_erase_count = eb->erase_count; 549 557 550 558 retry: 551 - init_waitqueue_head(&wq); 552 559 memset(&erase, 0, sizeof(struct erase_info)); 553 560 554 561 erase.mtd = mtd; 555 - erase.callback = mtdswap_erase_callback; 556 562 erase.addr = mtdswap_eb_offset(d, eb); 557 563 erase.len = mtd->erasesize; 558 - erase.priv = (u_long)&wq; 559 564 560 565 ret = mtd_erase(mtd, &erase); 561 566 if (ret) { ··· 566 577 567 578 dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", 568 579 erase.addr, mtd->name); 569 - 570 - mtdswap_handle_badblock(d, eb); 571 - return -EIO; 572 - } 573 - 574 - ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE || 575 - erase.state == MTD_ERASE_FAILED); 576 - if (ret) { 577 - dev_err(d->dev, "Interrupted erase block %#llx erasure on %s\n", 578 - erase.addr, mtd->name); 579 - return -EINTR; 580 - } 581 - 582 - if (erase.state == MTD_ERASE_FAILED) { 583 - if (retries++ < MTDSWAP_ERASE_RETRIES) { 584 - dev_warn(d->dev, 585 - "erase of erase block %#llx on %s failed", 586 - erase.addr, mtd->name); 587 - yield(); 588 - goto retry; 589 - } 590 580 591 581 mtdswap_handle_badblock(d, eb); 592 582 return -EIO;
+1 -3
drivers/mtd/nftlmount.c
··· 331 331 instr->mtd = nftl->mbd.mtd; 332 332 instr->addr = block * nftl->EraseSize; 333 333 instr->len = nftl->EraseSize; 334 - mtd_erase(mtd, instr); 335 - 336 - if (instr->state == MTD_ERASE_FAILED) { 334 + if (mtd_erase(mtd, instr)) { 337 335 printk("Error while formatting block %d\n", block); 338 336 goto fail; 339 337 }
+28 -64
drivers/mtd/rfd_ftl.c
··· 266 266 return 0; 267 267 } 268 268 269 - static void erase_callback(struct erase_info *erase) 270 - { 271 - struct partition *part; 272 - u16 magic; 273 - int i, rc; 274 - size_t retlen; 275 - 276 - part = (struct partition*)erase->priv; 277 - 278 - i = (u32)erase->addr / part->block_size; 279 - if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || 280 - erase->addr > UINT_MAX) { 281 - printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " 282 - "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); 283 - return; 284 - } 285 - 286 - if (erase->state != MTD_ERASE_DONE) { 287 - printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " 288 - "state %d\n", (unsigned long long)erase->addr, 289 - part->mbd.mtd->name, erase->state); 290 - 291 - part->blocks[i].state = BLOCK_FAILED; 292 - part->blocks[i].free_sectors = 0; 293 - part->blocks[i].used_sectors = 0; 294 - 295 - kfree(erase); 296 - 297 - return; 298 - } 299 - 300 - magic = cpu_to_le16(RFD_MAGIC); 301 - 302 - part->blocks[i].state = BLOCK_ERASED; 303 - part->blocks[i].free_sectors = part->data_sectors_per_block; 304 - part->blocks[i].used_sectors = 0; 305 - part->blocks[i].erases++; 306 - 307 - rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), 308 - &retlen, (u_char *)&magic); 309 - 310 - if (!rc && retlen != sizeof(magic)) 311 - rc = -EIO; 312 - 313 - if (rc) { 314 - printk(KERN_ERR PREFIX "'%s': unable to write RFD " 315 - "header at 0x%lx\n", 316 - part->mbd.mtd->name, 317 - part->blocks[i].offset); 318 - part->blocks[i].state = BLOCK_FAILED; 319 - } 320 - else 321 - part->blocks[i].state = BLOCK_OK; 322 - 323 - kfree(erase); 324 - } 325 - 326 269 static int erase_block(struct partition *part, int block) 327 270 { 328 271 struct erase_info *erase; 329 - int rc = -ENOMEM; 272 + int rc; 330 273 331 274 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); 332 275 if (!erase) 333 - goto err; 276 + return -ENOMEM; 334 277 335 278 erase->mtd = part->mbd.mtd; 336 - erase->callback = erase_callback; 337 279 erase->addr = part->blocks[block].offset; 338 280 erase->len = part->block_size; 339 - erase->priv = (u_long)part; 340 281 341 282 part->blocks[block].state = BLOCK_ERASING; 342 283 part->blocks[block].free_sectors = 0; 343 284 344 285 rc = mtd_erase(part->mbd.mtd, erase); 345 - 346 286 if (rc) { 347 287 printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " 348 288 "failed\n", (unsigned long long)erase->addr, 349 289 (unsigned long long)erase->len, part->mbd.mtd->name); 350 - kfree(erase); 290 + part->blocks[block].state = BLOCK_FAILED; 291 + part->blocks[block].free_sectors = 0; 292 + part->blocks[block].used_sectors = 0; 293 + } else { 294 + u16 magic = cpu_to_le16(RFD_MAGIC); 295 + size_t retlen; 296 + 297 + part->blocks[block].state = BLOCK_ERASED; 298 + part->blocks[block].free_sectors = part->data_sectors_per_block; 299 + part->blocks[block].used_sectors = 0; 300 + part->blocks[block].erases++; 301 + 302 + rc = mtd_write(part->mbd.mtd, part->blocks[block].offset, 303 + sizeof(magic), &retlen, (u_char *)&magic); 304 + if (!rc && retlen != sizeof(magic)) 305 + rc = -EIO; 306 + 307 + if (rc) { 308 + pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n", 309 + part->mbd.mtd->name, part->blocks[block].offset); 310 + part->blocks[block].state = BLOCK_FAILED; 311 + } else { 312 + part->blocks[block].state = BLOCK_OK; 313 + } 351 314 } 352 315 353 - err: 316 + kfree(erase); 317 + 354 318 return rc; 355 319 } 356 320
-18
drivers/mtd/sm_ftl.c
··· 461 461 struct erase_info erase; 462 462 463 463 erase.mtd = mtd; 464 - erase.callback = sm_erase_callback; 465 464 erase.addr = sm_mkoffset(ftl, zone_num, block, 0); 466 465 erase.len = ftl->block_size; 467 - erase.priv = (u_long)ftl; 468 466 469 467 if (ftl->unstable) 470 468 return -EIO; ··· 480 482 goto error; 481 483 } 482 484 483 - if (erase.state == MTD_ERASE_PENDING) 484 - wait_for_completion(&ftl->erase_completion); 485 - 486 - if (erase.state != MTD_ERASE_DONE) { 487 - sm_printk("erase of block %d in zone %d failed after wait", 488 - block, zone_num); 489 - goto error; 490 - } 491 - 492 485 if (put_free) 493 486 kfifo_in(&zone->free_sectors, 494 487 (const unsigned char *)&block, sizeof(block)); ··· 488 499 error: 489 500 sm_mark_block_bad(ftl, zone_num, block); 490 501 return -EIO; 491 - } 492 - 493 - static void sm_erase_callback(struct erase_info *self) 494 - { 495 - struct sm_ftl *ftl = (struct sm_ftl *)self->priv; 496 - complete(&ftl->erase_completion); 497 502 } 498 503 499 504 /* Thoroughly test that block is valid. */ ··· 1124 1141 mutex_init(&ftl->mutex); 1125 1142 timer_setup(&ftl->timer, sm_cache_flush_timer, 0); 1126 1143 INIT_WORK(&ftl->flush_work, sm_cache_flush_work); 1127 - init_completion(&ftl->erase_completion); 1128 1144 1129 1145 /* Read media information */ 1130 1146 if (sm_get_media_info(ftl, mtd)) {
-4
drivers/mtd/sm_ftl.h
··· 53 53 struct work_struct flush_work; 54 54 struct timer_list timer; 55 55 56 - /* Async erase stuff */ 57 - struct completion erase_completion; 58 - 59 56 /* Geometry stuff */ 60 57 int heads; 61 58 int sectors; ··· 83 86 printk(KERN_DEBUG "sm_ftl" ": " format "\n", ## __VA_ARGS__) 84 87 85 88 86 - static void sm_erase_callback(struct erase_info *self); 87 89 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, 88 90 int put_free); 89 91 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone_num, int block);
-4
drivers/mtd/tests/mtd_test.c
··· 24 24 return err; 25 25 } 26 26 27 - if (ei.state == MTD_ERASE_FAILED) { 28 - pr_info("some erase error occurred at EB %d\n", ebnum); 29 - return -EIO; 30 - } 31 27 return 0; 32 28 } 33 29
-6
drivers/mtd/tests/speedtest.c
··· 70 70 return err; 71 71 } 72 72 73 - if (ei.state == MTD_ERASE_FAILED) { 74 - pr_err("some erase error occurred at EB %d," 75 - "blocks %d\n", ebnum, blocks); 76 - return -EIO; 77 - } 78 - 79 73 return 0; 80 74 } 81 75
-35
drivers/mtd/ubi/io.c
··· 309 309 } 310 310 311 311 /** 312 - * erase_callback - MTD erasure call-back. 313 - * @ei: MTD erase information object. 314 - * 315 - * Note, even though MTD erase interface is asynchronous, all the current 316 - * implementations are synchronous anyway. 317 - */ 318 - static void erase_callback(struct erase_info *ei) 319 - { 320 - wake_up_interruptible((wait_queue_head_t *)ei->priv); 321 - } 322 - 323 - /** 324 312 * do_sync_erase - synchronously erase a physical eraseblock. 325 313 * @ubi: UBI device description object 326 314 * @pnum: the physical eraseblock number to erase ··· 321 333 { 322 334 int err, retries = 0; 323 335 struct erase_info ei; 324 - wait_queue_head_t wq; 325 336 326 337 dbg_io("erase PEB %d", pnum); 327 338 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ··· 331 344 } 332 345 333 346 retry: 334 - init_waitqueue_head(&wq); 335 347 memset(&ei, 0, sizeof(struct erase_info)); 336 348 337 349 ei.mtd = ubi->mtd; 338 350 ei.addr = (loff_t)pnum * ubi->peb_size; 339 351 ei.len = ubi->peb_size; 340 - ei.callback = erase_callback; 341 - ei.priv = (unsigned long)&wq; 342 352 343 353 err = mtd_erase(ubi->mtd, &ei); 344 354 if (err) { ··· 348 364 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err); 349 365 dump_stack(); 350 366 return err; 351 - } 352 - 353 - err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || 354 - ei.state == MTD_ERASE_FAILED); 355 - if (err) { 356 - ubi_err(ubi, "interrupted PEB %d erasure", pnum); 357 - return -EINTR; 358 - } 359 - 360 - if (ei.state == MTD_ERASE_FAILED) { 361 - if (retries++ < UBI_IO_RETRIES) { 362 - ubi_warn(ubi, "error while erasing PEB %d, retry", 363 - pnum); 364 - yield(); 365 - goto retry; 366 - } 367 - ubi_err(ubi, "cannot erase PEB %d", pnum); 368 - dump_stack(); 369 - return -EIO; 370 367 } 371 368 372 369 err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+5 -31
fs/jffs2/erase.c
··· 21 21 #include <linux/pagemap.h> 22 22 #include "nodelist.h" 23 23 24 - struct erase_priv_struct { 25 - struct jffs2_eraseblock *jeb; 26 - struct jffs2_sb_info *c; 27 - }; 28 - 29 - #ifndef __ECOS 30 - static void jffs2_erase_callback(struct erase_info *); 31 - #endif 32 24 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); 33 25 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 34 26 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); ··· 43 51 jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", 44 52 __func__, 45 53 jeb->offset, jeb->offset, jeb->offset + c->sector_size); 46 - instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); 54 + instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL); 47 55 if (!instr) { 48 56 pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 49 57 mutex_lock(&c->erase_free_sem); ··· 62 70 instr->mtd = c->mtd; 63 71 instr->addr = jeb->offset; 64 72 instr->len = c->sector_size; 65 - instr->callback = jffs2_erase_callback; 66 - instr->priv = (unsigned long)(&instr[1]); 67 - 68 - ((struct erase_priv_struct *)instr->priv)->jeb = jeb; 69 - ((struct erase_priv_struct *)instr->priv)->c = c; 70 73 71 74 ret = mtd_erase(c->mtd, instr); 72 - if (!ret) 75 + if (!ret) { 76 + jffs2_erase_succeeded(c, jeb); 77 + kfree(instr); 73 78 return; 79 + } 74 80 75 81 bad_offset = instr->fail_addr; 76 82 kfree(instr); ··· 203 213 mutex_unlock(&c->erase_free_sem); 204 214 wake_up(&c->erase_wait); 205 215 } 206 - 207 - #ifndef __ECOS 208 - static void jffs2_erase_callback(struct erase_info *instr) 209 - { 210 - struct erase_priv_struct *priv = (void *)instr->priv; 211 - 212 - if(instr->state != MTD_ERASE_DONE) { 213 - pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", 214 - (unsigned long long)instr->addr, instr->state); 215 - jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); 216 - } else { 217 - jffs2_erase_succeeded(priv->c, priv->jeb); 218 - } 219 - kfree(instr); 220 - } 221 - #endif /* !__ECOS */ 222 216 223 217 /* Hmmm. Maybe we should accept the extra space it takes and make 224 218 this a standard doubly-linked list? */
-2
include/linux/mtd/mtd.h
··· 48 48 uint64_t addr; 49 49 uint64_t len; 50 50 uint64_t fail_addr; 51 - void (*callback) (struct erase_info *self); 52 - u_long priv; 53 51 u_char state; 54 52 }; 55 53