Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[JFFS2] Fix node allocation leak

In the rare case of failing to write the cleanmarker
the allocated node was not freed.

Pointed out by Forrest Zhao
Initial cleanup by Joern Engel

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Thomas Gleixner and committed by
Thomas Gleixner
5d157885 ba460e48

+96 -86
+96 -86
fs/jffs2/erase.c
··· 7 7 * 8 8 * For licensing information, see the file 'LICENCE' in this directory. 9 9 * 10 - * $Id: erase.c,v 1.76 2005/05/03 15:11:40 dedekind Exp $ 10 + * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $ 11 11 * 12 12 */ 13 13 ··· 300 300 jeb->last_node = NULL; 301 301 } 302 302 303 + static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) 304 + { 305 + void *ebuf; 306 + uint32_t ofs; 307 + size_t retlen; 308 + int ret = -EIO; 309 + 310 + ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 311 + if (!ebuf) { 312 + printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); 313 + return -EAGAIN; 314 + } 315 + 316 + D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); 317 + 318 + for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { 319 + uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); 320 + int i; 321 + 322 + *bad_offset = ofs; 323 + 324 + ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf); 325 + if (ret) { 326 + printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 327 + goto fail; 328 + } 329 + if (retlen != readlen) { 330 + printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); 331 + goto fail; 332 + } 333 + for (i=0; i<readlen; i += sizeof(unsigned long)) { 334 + /* It's OK. We know it's properly aligned */ 335 + unsigned long *datum = ebuf + i; 336 + if (*datum + 1) { 337 + *bad_offset += i; 338 + printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); 339 + goto fail; 340 + } 341 + } 342 + ofs += readlen; 343 + cond_resched(); 344 + } 345 + ret = 0; 346 + fail: 347 + kfree(ebuf); 348 + return ret; 349 + } 350 + 303 351 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 304 352 { 305 353 struct jffs2_raw_node_ref *marker_ref = NULL; 306 - unsigned char *ebuf; 307 354 size_t retlen; 308 355 int ret; 309 356 uint32_t bad_offset; 310 357 311 - if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) { 312 - marker_ref = jffs2_alloc_raw_node_ref(); 313 - if (!marker_ref) { 314 - printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n"); 315 - /* Stick it back on the list from whence it came and come back later */ 316 - jffs2_erase_pending_trigger(c); 317 - spin_lock(&c->erase_completion_lock); 318 - list_add(&jeb->list, &c->erase_complete_list); 319 - spin_unlock(&c->erase_completion_lock); 320 - return; 321 - } 358 + switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { 359 + case -EAGAIN: goto refile; 360 + case -EIO: goto filebad; 322 361 } 323 - ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 324 - if (!ebuf) { 325 - printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset); 326 - } else { 327 - uint32_t ofs = jeb->offset; 328 - 329 - D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); 330 - while(ofs < jeb->offset + c->sector_size) { 331 - uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); 332 - int i; 333 - 334 - bad_offset = ofs; 335 - 336 - ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); 337 - 338 - if (ret) { 339 - printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 340 - goto bad; 341 - } 342 - if (retlen != readlen) { 343 - printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); 344 - goto bad; 345 - } 346 - for (i=0; i<readlen; i += sizeof(unsigned long)) { 347 - /* It's OK. We know it's properly aligned */ 348 - unsigned long datum = *(unsigned long *)(&ebuf[i]); 349 - if (datum + 1) { 350 - bad_offset += i; 351 - printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset); 352 - bad: 353 - if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) 354 - jffs2_free_raw_node_ref(marker_ref); 355 - kfree(ebuf); 356 - bad2: 357 - spin_lock(&c->erase_completion_lock); 358 - /* Stick it on a list (any list) so 359 - erase_failed can take it right off 360 - again. Silly, but shouldn't happen 361 - often. */ 362 - list_add(&jeb->list, &c->erasing_list); 363 - spin_unlock(&c->erase_completion_lock); 364 - jffs2_erase_failed(c, jeb, bad_offset); 365 - return; 366 - } 367 - } 368 - ofs += readlen; 369 - cond_resched(); 370 - } 371 - kfree(ebuf); 372 - } 373 - 374 - bad_offset = jeb->offset; 375 362 376 363 /* Write the erase complete marker */ 377 364 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); 378 - if (jffs2_cleanmarker_oob(c)) { 365 + bad_offset = jeb->offset; 379 366 380 - if (jffs2_write_nand_cleanmarker(c, jeb)) 381 - goto bad2; 382 - 367 + /* Cleanmarker in oob area or no cleanmarker at all ? */ 368 + if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { 369 + 370 + if (jffs2_cleanmarker_oob(c)) { 371 + if (jffs2_write_nand_cleanmarker(c, jeb)) 372 + goto filebad; 373 + } 374 + 383 375 jeb->first_node = jeb->last_node = NULL; 384 - 385 376 jeb->free_size = c->sector_size; 386 377 jeb->used_size = 0; 387 378 jeb->dirty_size = 0; 388 379 jeb->wasted_size = 0; 389 - } else if (c->cleanmarker_size == 0) { 390 - jeb->first_node = jeb->last_node = NULL; 391 380 392 - jeb->free_size = c->sector_size; 393 - jeb->used_size = 0; 394 - jeb->dirty_size = 0; 395 - jeb->wasted_size = 0; 396 381 } else { 382 + 397 383 struct kvec vecs[1]; 398 384 struct jffs2_unknown_node marker = { 399 385 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), ··· 387 401 .totlen = cpu_to_je32(c->cleanmarker_size) 388 402 }; 389 403 404 + marker_ref = jffs2_alloc_raw_node_ref(); 405 + if (!marker_ref) { 406 + printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n"); 407 + goto refile; 408 + } 409 + 390 410 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); 391 411 392 412 vecs[0].iov_base = (unsigned char *) &marker; 393 413 vecs[0].iov_len = sizeof(marker); 394 414 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); 395 415 396 - if (ret) { 397 - printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", 398 - jeb->offset, ret); 399 - goto bad2; 400 - } 401 - if (retlen != sizeof(marker)) { 402 - printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 403 - jeb->offset, sizeof(marker), retlen); 404 - goto bad2; 416 + if (ret || retlen != sizeof(marker)) { 417 + if (ret) 418 + printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", 419 + jeb->offset, ret); 420 + else 421 + printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 422 + jeb->offset, sizeof(marker), retlen); 423 + 424 + jffs2_free_raw_node_ref(marker_ref); 425 + goto filebad; 405 426 } 406 427 407 428 marker_ref->next_in_ino = NULL; ··· 437 444 c->nr_free_blocks++; 438 445 spin_unlock(&c->erase_completion_lock); 439 446 wake_up(&c->erase_wait); 440 - } 447 + return; 441 448 449 + filebad: 450 + spin_lock(&c->erase_completion_lock); 451 + /* Stick it on a list (any list) so erase_failed can take it 452 + right off again. Silly, but shouldn't happen often. */ 453 + list_add(&jeb->list, &c->erasing_list); 454 + spin_unlock(&c->erase_completion_lock); 455 + jffs2_erase_failed(c, jeb, bad_offset); 456 + return; 457 + 458 + refile: 459 + /* Stick it back on the list from whence it came and come back later */ 460 + jffs2_erase_pending_trigger(c); 461 + spin_lock(&c->erase_completion_lock); 462 + list_add(&jeb->list, &c->erase_complete_list); 463 + spin_unlock(&c->erase_completion_lock); 464 + return; 465 + }