Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull jffs2, ubi and ubifs updates from Richard Weinberger:
"JFFS2:
- Fix memory corruption in error path
- Spelling and coding style fixes

UBI:
- Switch to BLK_MQ_F_BLOCKING in ubiblock
- Wire up partent device (for sysfs)
- Multiple UAF bugfixes
- Fix for an infinite loop in WL error path

UBIFS:
- Fix for multiple memory leaks in error paths
- Fixes for wrong space accounting
- Minor cleanups
- Spelling and coding style fixes"

* tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: (36 commits)
ubi: block: Fix a possible use-after-free bug in ubiblock_create()
ubifs: make kobj_type structures constant
mtd: ubi: block: wire-up device parent
mtd: ubi: wire-up parent MTD device
ubi: use correct names in function kernel-doc comments
ubi: block: set BLK_MQ_F_BLOCKING
jffs2: Fix list_del corruption if compressors initialized failed
jffs2: Use function instead of macro when initialize compressors
jffs2: fix spelling mistake "neccecary"->"necessary"
ubifs: Fix kernel-doc
ubifs: Fix some kernel-doc comments
UBI: Fastmap: Fix kernel-doc
ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
ubi: fastmap: Fix missed fm_anchor PEB in wear-leveling after disabling fastmap
ubifs: ubifs_releasepage: Remove ubifs_assert(0) to valid this process
ubifs: ubifs_writepage: Mark page dirty after writing inode failed
ubifs: dirty_cow_znode: Fix memleak in error handling path
ubifs: Re-statistic cleaned znode count if commit failed
ubi: Fix permission display of the debugfs files
...

+275 -169
+37 -74
drivers/mtd/ubi/block.c
··· 35 35 #include <linux/mutex.h> 36 36 #include <linux/slab.h> 37 37 #include <linux/mtd/ubi.h> 38 - #include <linux/workqueue.h> 39 38 #include <linux/blkdev.h> 40 39 #include <linux/blk-mq.h> 41 40 #include <linux/hdreg.h> ··· 61 62 }; 62 63 63 64 struct ubiblock_pdu { 64 - struct work_struct work; 65 65 struct ubi_sgl usgl; 66 66 }; 67 67 ··· 79 81 80 82 struct gendisk *gd; 81 83 struct request_queue *rq; 82 - 83 - struct workqueue_struct *wq; 84 84 85 85 struct mutex dev_mutex; 86 86 struct list_head list; ··· 177 181 return NULL; 178 182 } 179 183 180 - static int ubiblock_read(struct ubiblock_pdu *pdu) 184 + static blk_status_t ubiblock_read(struct request *req) 181 185 { 182 - int ret, leb, offset, bytes_left, to_read; 183 - u64 pos; 184 - struct request *req = blk_mq_rq_from_pdu(pdu); 186 + struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 185 187 struct ubiblock *dev = req->q->queuedata; 186 - 187 - to_read = blk_rq_bytes(req); 188 - pos = blk_rq_pos(req) << 9; 189 - 188 + u64 pos = blk_rq_pos(req) << 9; 189 + int to_read = blk_rq_bytes(req); 190 + int bytes_left = to_read; 190 191 /* Get LEB:offset address to read from */ 191 - offset = do_div(pos, dev->leb_size); 192 - leb = pos; 193 - bytes_left = to_read; 192 + int offset = do_div(pos, dev->leb_size); 193 + int leb = pos; 194 + struct req_iterator iter; 195 + struct bio_vec bvec; 196 + int ret; 197 + 198 + blk_mq_start_request(req); 199 + 200 + /* 201 + * It is safe to ignore the return value of blk_rq_map_sg() because 202 + * the number of sg entries is limited to UBI_MAX_SG_COUNT 203 + * and ubi_read_sg() will check that limit. 204 + */ 205 + ubi_sgl_init(&pdu->usgl); 206 + blk_rq_map_sg(req->q, req, pdu->usgl.sg); 194 207 195 208 while (bytes_left) { 196 209 /* ··· 211 206 212 207 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read); 213 208 if (ret < 0) 214 - return ret; 209 + break; 215 210 216 211 bytes_left -= to_read; 217 212 to_read = bytes_left; 218 213 leb += 1; 219 214 offset = 0; 220 215 } 221 - return 0; 216 + 217 + rq_for_each_segment(bvec, req, iter) 218 + flush_dcache_page(bvec.bv_page); 219 + return errno_to_blk_status(ret); 222 220 } 223 221 224 222 static int ubiblock_open(struct block_device *bdev, fmode_t mode) ··· 297 289 .getgeo = ubiblock_getgeo, 298 290 }; 299 291 300 - static void ubiblock_do_work(struct work_struct *work) 301 - { 302 - int ret; 303 - struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work); 304 - struct request *req = blk_mq_rq_from_pdu(pdu); 305 - struct req_iterator iter; 306 - struct bio_vec bvec; 307 - 308 - blk_mq_start_request(req); 309 - 310 - /* 311 - * It is safe to ignore the return value of blk_rq_map_sg() because 312 - * the number of sg entries is limited to UBI_MAX_SG_COUNT 313 - * and ubi_read_sg() will check that limit. 314 - */ 315 - blk_rq_map_sg(req->q, req, pdu->usgl.sg); 316 - 317 - ret = ubiblock_read(pdu); 318 - 319 - rq_for_each_segment(bvec, req, iter) 320 - flush_dcache_page(bvec.bv_page); 321 - 322 - blk_mq_end_request(req, errno_to_blk_status(ret)); 323 - } 324 - 325 292 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, 326 293 const struct blk_mq_queue_data *bd) 327 294 { 328 - struct request *req = bd->rq; 329 - struct ubiblock *dev = hctx->queue->queuedata; 330 - struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 331 - 332 - switch (req_op(req)) { 295 + switch (req_op(bd->rq)) { 333 296 case REQ_OP_READ: 334 - ubi_sgl_init(&pdu->usgl); 335 - queue_work(dev->wq, &pdu->work); 336 - return BLK_STS_OK; 297 + return ubiblock_read(bd->rq); 337 298 default: 338 299 return BLK_STS_IOERR; 339 300 } 340 - 341 301 } 342 302 343 303 static int ubiblock_init_request(struct blk_mq_tag_set *set, ··· 315 339 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 316 340 317 341 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT); 318 - INIT_WORK(&pdu->work, ubiblock_do_work); 319 - 320 342 return 0; 321 343 } 322 344 ··· 328 354 u64 size = vi->used_bytes >> 9; 329 355 330 356 if (vi->used_bytes % 512) { 331 - pr_warn("UBI: block: volume size is not a multiple of 512, " 332 - "last %llu bytes are ignored!\n", 333 - vi->used_bytes - (size << 9)); 357 + if (vi->vol_type == UBI_DYNAMIC_VOLUME) 358 + pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n", 359 + vi->used_bytes - (size << 9)); 360 + else 361 + pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n", 362 + vi->used_bytes - (size << 9)); 334 363 } 335 364 336 365 if ((sector_t)size != size) ··· 378 401 dev->tag_set.ops = &ubiblock_mq_ops; 379 402 dev->tag_set.queue_depth = 64; 380 403 dev->tag_set.numa_node = NUMA_NO_NODE; 381 - dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 404 + dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 382 405 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); 383 406 dev->tag_set.driver_data = dev; 384 407 dev->tag_set.nr_hw_queues = 1; ··· 416 439 dev->rq = gd->queue; 417 440 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); 418 441 419 - /* 420 - * Create one workqueue per volume (per registered block device). 421 - * Remember workqueues are cheap, they're not threads. 422 - */ 423 - dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); 424 - if (!dev->wq) { 425 - ret = -ENOMEM; 426 - goto out_remove_minor; 427 - } 428 - 429 442 list_add_tail(&dev->list, &ubiblock_devices); 430 443 431 444 /* Must be the last step: anyone can call file ops from now on */ 432 - ret = add_disk(dev->gd); 445 + ret = device_add_disk(vi->dev, dev->gd, NULL); 433 446 if (ret) 434 - goto out_destroy_wq; 447 + goto out_remove_minor; 435 448 436 449 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", 437 450 dev->ubi_num, dev->vol_id, vi->name); 438 451 mutex_unlock(&devices_mutex); 439 452 return 0; 440 453 441 - out_destroy_wq: 442 - list_del(&dev->list); 443 - destroy_workqueue(dev->wq); 444 454 out_remove_minor: 455 + list_del(&dev->list); 445 456 idr_remove(&ubiblock_minor_idr, gd->first_minor); 446 457 out_cleanup_disk: 447 458 put_disk(dev->gd); ··· 447 482 { 448 483 /* Stop new requests to arrive */ 449 484 del_gendisk(dev->gd); 450 - /* Flush pending work */ 451 - destroy_workqueue(dev->wq); 452 485 /* Finally destroy the blk queue */ 453 486 dev_info(disk_to_dev(dev->gd), "released"); 454 487 put_disk(dev->gd);
+28 -4
drivers/mtd/ubi/build.c
··· 35 35 #define MTD_PARAM_LEN_MAX 64 36 36 37 37 /* Maximum number of comma-separated items in the 'mtd=' parameter */ 38 - #define MTD_PARAM_MAX_COUNT 4 38 + #define MTD_PARAM_MAX_COUNT 5 39 39 40 40 /* Maximum value for the number of bad PEBs per 1024 PEBs */ 41 41 #define MAX_MTD_UBI_BEB_LIMIT 768 ··· 53 53 * @ubi_num: UBI number 54 54 * @vid_hdr_offs: VID header offset 55 55 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs 56 + * @enable_fm: enable fastmap when value is non-zero 56 57 */ 57 58 struct mtd_dev_param { 58 59 char name[MTD_PARAM_LEN_MAX]; 59 60 int ubi_num; 60 61 int vid_hdr_offs; 61 62 int max_beb_per1024; 63 + int enable_fm; 62 64 }; 63 65 64 66 /* Numbers of elements set in the @mtd_dev_param array */ ··· 470 468 err = ubi_add_volume(ubi, ubi->volumes[i]); 471 469 if (err) { 472 470 ubi_err(ubi, "cannot add volume %d", i); 471 + ubi->volumes[i] = NULL; 473 472 goto out_volumes; 474 473 } 475 474 } ··· 665 662 /* Calculate default aligned sizes of EC and VID headers */ 666 663 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 667 664 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 665 + 666 + if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) > 667 + ubi->vid_hdr_alsize)) { 668 + ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset); 669 + return -EINVAL; 670 + } 668 671 669 672 dbg_gen("min_io_size %d", ubi->min_io_size); 670 673 dbg_gen("max_write_size %d", ubi->max_write_size); ··· 915 906 ubi->dev.release = dev_release; 916 907 ubi->dev.class = &ubi_class; 917 908 ubi->dev.groups = ubi_dev_groups; 909 + ubi->dev.parent = &mtd->dev; 918 910 919 911 ubi->mtd = mtd; 920 912 ubi->ubi_num = ubi_num; ··· 1258 1248 mutex_lock(&ubi_devices_mutex); 1259 1249 err = ubi_attach_mtd_dev(mtd, p->ubi_num, 1260 1250 p->vid_hdr_offs, p->max_beb_per1024, 1261 - false); 1251 + p->enable_fm == 0 ? true : false); 1262 1252 mutex_unlock(&ubi_devices_mutex); 1263 1253 if (err < 0) { 1264 1254 pr_err("UBI error: cannot attach mtd%d\n", ··· 1437 1427 int err = kstrtoint(token, 10, &p->max_beb_per1024); 1438 1428 1439 1429 if (err) { 1440 - pr_err("UBI error: bad value for max_beb_per1024 parameter: %s", 1430 + pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n", 1441 1431 token); 1442 1432 return -EINVAL; 1443 1433 } ··· 1448 1438 int err = kstrtoint(token, 10, &p->ubi_num); 1449 1439 1450 1440 if (err) { 1451 - pr_err("UBI error: bad value for ubi_num parameter: %s", 1441 + pr_err("UBI error: bad value for ubi_num parameter: %s\n", 1452 1442 token); 1453 1443 return -EINVAL; 1454 1444 } 1455 1445 } else 1456 1446 p->ubi_num = UBI_DEV_NUM_AUTO; 1447 + 1448 + token = tokens[4]; 1449 + if (token) { 1450 + int err = kstrtoint(token, 10, &p->enable_fm); 1451 + 1452 + if (err) { 1453 + pr_err("UBI error: bad value for enable_fm parameter: %s\n", 1454 + token); 1455 + return -EINVAL; 1456 + } 1457 + } else 1458 + p->enable_fm = 0; 1457 1459 1458 1460 mtd_devs += 1; 1459 1461 return 0; ··· 1479 1457 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value (" 1480 1458 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n" 1481 1459 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n" 1460 + "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n" 1482 1461 "\n" 1483 1462 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n" 1484 1463 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" 1485 1464 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" 1486 1465 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n" 1466 + "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n" 1487 1467 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); 1488 1468 #ifdef CONFIG_MTD_UBI_FASTMAP 1489 1469 module_param(fm_autoconvert, bool, 0644);
+10 -9
drivers/mtd/ubi/debug.c
··· 504 504 { 505 505 unsigned long ubi_num = ubi->ubi_num; 506 506 struct ubi_debug_info *d = &ubi->dbg; 507 + umode_t mode = S_IRUSR | S_IWUSR; 507 508 int n; 508 509 509 510 if (!IS_ENABLED(CONFIG_DEBUG_FS)) ··· 519 518 520 519 d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); 521 520 522 - d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir, 521 + d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir, 523 522 (void *)ubi_num, &dfs_fops); 524 523 525 - d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir, 524 + d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir, 526 525 (void *)ubi_num, &dfs_fops); 527 526 528 - d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR, 527 + d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode, 529 528 d->dfs_dir, (void *)ubi_num, 530 529 &dfs_fops); 531 530 532 - d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR, 531 + d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode, 533 532 d->dfs_dir, (void *)ubi_num, 534 533 &dfs_fops); 535 534 536 535 d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips", 537 - S_IWUSR, d->dfs_dir, 536 + mode, d->dfs_dir, 538 537 (void *)ubi_num, 539 538 &dfs_fops); 540 539 541 540 d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures", 542 - S_IWUSR, d->dfs_dir, 541 + mode, d->dfs_dir, 543 542 (void *)ubi_num, 544 543 &dfs_fops); 545 544 546 545 d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut", 547 - S_IWUSR, d->dfs_dir, 546 + mode, d->dfs_dir, 548 547 (void *)ubi_num, 549 548 &dfs_fops); 550 549 551 550 d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min", 552 - S_IWUSR, d->dfs_dir, 551 + mode, d->dfs_dir, 553 552 (void *)ubi_num, &dfs_fops); 554 553 555 554 d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max", 556 - S_IWUSR, d->dfs_dir, 555 + mode, d->dfs_dir, 557 556 (void *)ubi_num, &dfs_fops); 558 557 559 558 debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
+1 -1
drivers/mtd/ubi/eba.c
··· 61 61 }; 62 62 63 63 /** 64 - * next_sqnum - get next sequence number. 64 + * ubi_next_sqnum - get next sequence number. 65 65 * @ubi: UBI device description object 66 66 * 67 67 * This function returns next sequence number to use, which is just the current
+7 -5
drivers/mtd/ubi/fastmap-wl.c
··· 146 146 if (ubi->fm_anchor) { 147 147 wl_tree_add(ubi->fm_anchor, &ubi->free); 148 148 ubi->free_count++; 149 + ubi->fm_anchor = NULL; 149 150 } 150 151 151 - /* 152 - * All available PEBs are in ubi->free, now is the time to get 153 - * the best anchor PEBs. 154 - */ 155 - ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); 152 + if (!ubi->fm_disabled) 153 + /* 154 + * All available PEBs are in ubi->free, now is the time to get 155 + * the best anchor PEBs. 156 + */ 157 + ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); 156 158 157 159 for (;;) { 158 160 enough = 0;
+1 -1
drivers/mtd/ubi/fastmap.c
··· 93 93 94 94 95 95 /** 96 - * new_fm_vhdr - allocate a new volume header for fastmap usage. 96 + * new_fm_vbuf() - allocate a new volume header for fastmap usage. 97 97 * @ubi: UBI device description object 98 98 * @vol_id: the VID of the new header 99 99 *
+1
drivers/mtd/ubi/kapi.c
··· 79 79 vi->name_len = vol->name_len; 80 80 vi->name = vol->name; 81 81 vi->cdev = vol->cdev.dev; 82 + vi->dev = &vol->dev; 82 83 } 83 84 84 85 /**
+1 -1
drivers/mtd/ubi/misc.c
··· 10 10 #include "ubi.h" 11 11 12 12 /** 13 - * calc_data_len - calculate how much real data is stored in a buffer. 13 + * ubi_calc_data_len - calculate how much real data is stored in a buffer. 14 14 * @ubi: UBI device description object 15 15 * @buf: a buffer with the contents of the physical eraseblock 16 16 * @length: the buffer length
+10 -8
drivers/mtd/ubi/vmt.c
··· 464 464 for (i = 0; i < -pebs; i++) { 465 465 err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); 466 466 if (err) 467 - goto out_acc; 467 + goto out_free; 468 468 } 469 469 spin_lock(&ubi->volumes_lock); 470 470 ubi->rsvd_pebs += pebs; ··· 512 512 ubi->avail_pebs += pebs; 513 513 spin_unlock(&ubi->volumes_lock); 514 514 } 515 + return err; 516 + 515 517 out_free: 516 - kfree(new_eba_tbl); 518 + ubi_eba_destroy_table(new_eba_tbl); 517 519 return err; 518 520 } 519 521 ··· 582 580 if (err) { 583 581 ubi_err(ubi, "cannot add character device for volume %d, error %d", 584 582 vol_id, err); 583 + vol_release(&vol->dev); 585 584 return err; 586 585 } 587 586 ··· 593 590 vol->dev.groups = volume_dev_groups; 594 591 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); 595 592 err = device_register(&vol->dev); 596 - if (err) 597 - goto out_cdev; 593 + if (err) { 594 + cdev_del(&vol->cdev); 595 + put_device(&vol->dev); 596 + return err; 597 + } 598 598 599 599 self_check_volumes(ubi); 600 - return err; 601 - 602 - out_cdev: 603 - cdev_del(&vol->cdev); 604 600 return err; 605 601 } 606 602
+23 -4
drivers/mtd/ubi/wl.c
··· 165 165 } 166 166 167 167 /** 168 - * wl_tree_destroy - destroy a wear-leveling entry. 168 + * wl_entry_destroy - destroy a wear-leveling entry. 169 169 * @ubi: UBI device description object 170 170 * @e: the wear-leveling entry to add 171 171 * ··· 890 890 891 891 err = do_sync_erase(ubi, e1, vol_id, lnum, 0); 892 892 if (err) { 893 - if (e2) 893 + if (e2) { 894 + spin_lock(&ubi->wl_lock); 894 895 wl_entry_destroy(ubi, e2); 896 + spin_unlock(&ubi->wl_lock); 897 + } 895 898 goto out_ro; 896 899 } 897 900 ··· 976 973 spin_lock(&ubi->wl_lock); 977 974 ubi->move_from = ubi->move_to = NULL; 978 975 ubi->move_to_put = ubi->wl_scheduled = 0; 976 + wl_entry_destroy(ubi, e1); 977 + wl_entry_destroy(ubi, e2); 979 978 spin_unlock(&ubi->wl_lock); 980 979 981 980 ubi_free_vid_buf(vidb); 982 - wl_entry_destroy(ubi, e1); 983 - wl_entry_destroy(ubi, e2); 984 981 985 982 out_ro: 986 983 ubi_ro_mode(ubi); ··· 1133 1130 /* Re-schedule the LEB for erasure */ 1134 1131 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); 1135 1132 if (err1) { 1133 + spin_lock(&ubi->wl_lock); 1136 1134 wl_entry_destroy(ubi, e); 1135 + spin_unlock(&ubi->wl_lock); 1137 1136 err = err1; 1138 1137 goto out_ro; 1139 1138 } 1140 1139 return err; 1141 1140 } 1142 1141 1142 + spin_lock(&ubi->wl_lock); 1143 1143 wl_entry_destroy(ubi, e); 1144 + spin_unlock(&ubi->wl_lock); 1144 1145 if (err != -EIO) 1145 1146 /* 1146 1147 * If this is not %-EIO, we have no idea what to do. Scheduling ··· 1260 1253 retry: 1261 1254 spin_lock(&ubi->wl_lock); 1262 1255 e = ubi->lookuptbl[pnum]; 1256 + if (!e) { 1257 + /* 1258 + * This wl entry has been removed for some errors by other 1259 + * process (eg. wear leveling worker), corresponding process 1260 + * (except __erase_worker, which cannot concurrent with 1261 + * ubi_wl_put_peb) will set ubi ro_mode at the same time, 1262 + * just ignore this wl entry. 1263 + */ 1264 + spin_unlock(&ubi->wl_lock); 1265 + up_read(&ubi->fm_protect); 1266 + return 0; 1267 + } 1263 1268 if (e == ubi->move_from) { 1264 1269 /* 1265 1270 * User is putting the physical eraseblock which was selected to
+29 -21
fs/jffs2/compr.c
··· 364 364 365 365 int __init jffs2_compressors_init(void) 366 366 { 367 + int ret = 0; 367 368 /* Registering compressors */ 368 - #ifdef CONFIG_JFFS2_ZLIB 369 - jffs2_zlib_init(); 370 - #endif 371 - #ifdef CONFIG_JFFS2_RTIME 372 - jffs2_rtime_init(); 373 - #endif 374 - #ifdef CONFIG_JFFS2_RUBIN 375 - jffs2_rubinmips_init(); 376 - jffs2_dynrubin_init(); 377 - #endif 378 - #ifdef CONFIG_JFFS2_LZO 379 - jffs2_lzo_init(); 380 - #endif 369 + ret = jffs2_zlib_init(); 370 + if (ret) 371 + goto exit; 372 + ret = jffs2_rtime_init(); 373 + if (ret) 374 + goto exit_zlib; 375 + ret = jffs2_rubinmips_init(); 376 + if (ret) 377 + goto exit_rtime; 378 + ret = jffs2_dynrubin_init(); 379 + if (ret) 380 + goto exit_runinmips; 381 + ret = jffs2_lzo_init(); 382 + if (ret) 383 + goto exit_dynrubin; 384 + 385 + 381 386 /* Setting default compression mode */ 382 387 #ifdef CONFIG_JFFS2_CMODE_NONE 383 388 jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; ··· 401 396 #endif 402 397 #endif 403 398 return 0; 399 + 400 + exit_dynrubin: 401 + jffs2_dynrubin_exit(); 402 + exit_runinmips: 403 + jffs2_rubinmips_exit(); 404 + exit_rtime: 405 + jffs2_rtime_exit(); 406 + exit_zlib: 407 + jffs2_zlib_exit(); 408 + exit: 409 + return ret; 404 410 } 405 411 406 412 int jffs2_compressors_exit(void) 407 413 { 408 414 /* Unregistering compressors */ 409 - #ifdef CONFIG_JFFS2_LZO 410 415 jffs2_lzo_exit(); 411 - #endif 412 - #ifdef CONFIG_JFFS2_RUBIN 413 416 jffs2_dynrubin_exit(); 414 417 jffs2_rubinmips_exit(); 415 - #endif 416 - #ifdef CONFIG_JFFS2_RTIME 417 418 jffs2_rtime_exit(); 418 - #endif 419 - #ifdef CONFIG_JFFS2_ZLIB 420 419 jffs2_zlib_exit(); 421 - #endif 422 420 return 0; 423 421 }
+20 -6
fs/jffs2/compr.h
··· 88 88 void jffs2_rubinmips_exit(void); 89 89 int jffs2_dynrubin_init(void); 90 90 void jffs2_dynrubin_exit(void); 91 + #else 92 + static inline int jffs2_rubinmips_init(void) { return 0; } 93 + static inline void jffs2_rubinmips_exit(void) {} 94 + static inline int jffs2_dynrubin_init(void) { return 0; } 95 + static inline void jffs2_dynrubin_exit(void) {} 91 96 #endif 92 97 #ifdef CONFIG_JFFS2_RTIME 93 - int jffs2_rtime_init(void); 94 - void jffs2_rtime_exit(void); 98 + extern int jffs2_rtime_init(void); 99 + extern void jffs2_rtime_exit(void); 100 + #else 101 + static inline int jffs2_rtime_init(void) { return 0; } 102 + static inline void jffs2_rtime_exit(void) {} 95 103 #endif 96 104 #ifdef CONFIG_JFFS2_ZLIB 97 - int jffs2_zlib_init(void); 98 - void jffs2_zlib_exit(void); 105 + extern int jffs2_zlib_init(void); 106 + extern void jffs2_zlib_exit(void); 107 + #else 108 + static inline int jffs2_zlib_init(void) { return 0; } 109 + static inline void jffs2_zlib_exit(void) {} 99 110 #endif 100 111 #ifdef CONFIG_JFFS2_LZO 101 - int jffs2_lzo_init(void); 102 - void jffs2_lzo_exit(void); 112 + extern int jffs2_lzo_init(void); 113 + extern void jffs2_lzo_exit(void); 114 + #else 115 + static inline int jffs2_lzo_init(void) { return 0; } 116 + static inline void jffs2_lzo_exit(void) {} 103 117 #endif 104 118 105 119 #endif /* __JFFS2_COMPR_H__ */
+7 -8
fs/jffs2/file.c
··· 137 137 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 138 138 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 139 139 pgoff_t index = pos >> PAGE_SHIFT; 140 - uint32_t pageofs = index << PAGE_SHIFT; 141 140 int ret = 0; 142 141 143 142 jffs2_dbg(1, "%s()\n", __func__); 144 143 145 - if (pageofs > inode->i_size) { 146 - /* Make new hole frag from old EOF to new page */ 144 + if (pos > inode->i_size) { 145 + /* Make new hole frag from old EOF to new position */ 147 146 struct jffs2_raw_inode ri; 148 147 struct jffs2_full_dnode *fn; 149 148 uint32_t alloc_len; 150 149 151 - jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", 152 - (unsigned int)inode->i_size, pageofs); 150 + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n", 151 + (unsigned int)inode->i_size, (uint32_t)pos); 153 152 154 153 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, 155 154 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ··· 168 169 ri.mode = cpu_to_jemode(inode->i_mode); 169 170 ri.uid = cpu_to_je16(i_uid_read(inode)); 170 171 ri.gid = cpu_to_je16(i_gid_read(inode)); 171 - ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); 172 + ri.isize = cpu_to_je32((uint32_t)pos); 172 173 ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW()); 173 174 ri.offset = cpu_to_je32(inode->i_size); 174 - ri.dsize = cpu_to_je32(pageofs - inode->i_size); 175 + ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size); 175 176 ri.csize = cpu_to_je32(0); 176 177 ri.compr = JFFS2_COMPR_ZERO; 177 178 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ··· 201 202 goto out_err; 202 203 } 203 204 jffs2_complete_reservation(c); 204 - inode->i_size = pageofs; 205 + inode->i_size = pos; 205 206 mutex_unlock(&f->sem); 206 207 } 207 208
+1 -1
fs/jffs2/fs.c
··· 403 403 /* We stop if it was running, then restart if it needs to. 404 404 This also catches the case where it was stopped and this 405 405 is just a remount to restart it. 406 - Flush the writebuffer, if neccecary, else we loose it */ 406 + Flush the writebuffer, if necessary, else we loose it */ 407 407 if (!sb_rdonly(sb)) { 408 408 jffs2_stop_garbage_collect_thread(c); 409 409 mutex_lock(&c->alloc_sem);
+4 -5
fs/ubifs/budget.c
··· 209 209 subtract_lebs += 1; 210 210 211 211 /* 212 - * The GC journal head LEB is not really accessible. And since 213 - * different write types go to different heads, we may count only on 214 - * one head's space. 212 + * Since different write types go to different heads, we should 213 + * reserve one leb for each head. 215 214 */ 216 - subtract_lebs += c->jhead_cnt - 1; 215 + subtract_lebs += c->jhead_cnt; 217 216 218 217 /* We also reserve one LEB for deletions, which bypass budgeting */ 219 218 subtract_lebs += 1; ··· 399 400 dd_growth = req->dirtied_page ? c->bi.page_budget : 0; 400 401 401 402 if (req->dirtied_ino) 402 - dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1); 403 + dd_growth += c->bi.inode_budget * req->dirtied_ino; 403 404 if (req->mod_dent) 404 405 dd_growth += c->bi.dent_budget; 405 406 dd_growth += req->dirtied_ino_d;
+17 -1
fs/ubifs/dir.c
··· 1151 1151 int err, sz_change, len = strlen(symname); 1152 1152 struct fscrypt_str disk_link; 1153 1153 struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, 1154 - .new_ino_d = ALIGN(len, 8), 1155 1154 .dirtied_ino = 1 }; 1156 1155 struct fscrypt_name nm; 1157 1156 ··· 1166 1167 * Budget request settings: new inode, new direntry and changing parent 1167 1168 * directory inode. 1168 1169 */ 1170 + req.new_ino_d = ALIGN(disk_link.len - 1, 8); 1169 1171 err = ubifs_budget_space(c, &req); 1170 1172 if (err) 1171 1173 return err; ··· 1324 1324 if (unlink) { 1325 1325 ubifs_assert(c, inode_is_locked(new_inode)); 1326 1326 1327 + /* Budget for old inode's data when its nlink > 1. */ 1328 + req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8); 1327 1329 err = ubifs_purge_xattrs(new_inode); 1328 1330 if (err) 1329 1331 return err; ··· 1568 1566 1569 1567 ubifs_assert(c, fst_inode && snd_inode); 1570 1568 1569 + /* 1570 + * Budget request settings: changing two direntries, changing the two 1571 + * parent directory inodes. 1572 + */ 1573 + 1574 + dbg_gen("dent '%pd' ino %lu in dir ino %lu exchange dent '%pd' ino %lu in dir ino %lu", 1575 + old_dentry, fst_inode->i_ino, old_dir->i_ino, 1576 + new_dentry, snd_inode->i_ino, new_dir->i_ino); 1577 + 1571 1578 err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm); 1572 1579 if (err) 1573 1580 return err; ··· 1586 1575 fscrypt_free_filename(&fst_nm); 1587 1576 return err; 1588 1577 } 1578 + 1579 + err = ubifs_budget_space(c, &req); 1580 + if (err) 1581 + goto out; 1589 1582 1590 1583 lock_4_inodes(old_dir, new_dir, NULL, NULL); 1591 1584 ··· 1616 1601 unlock_4_inodes(old_dir, new_dir, NULL, NULL); 1617 1602 ubifs_release_budget(c, &req); 1618 1603 1604 + out: 1619 1605 fscrypt_free_filename(&fst_nm); 1620 1606 fscrypt_free_filename(&snd_nm); 1621 1607 return err;
+23 -8
fs/ubifs/file.c
··· 1032 1032 if (page->index >= synced_i_size >> PAGE_SHIFT) { 1033 1033 err = inode->i_sb->s_op->write_inode(inode, NULL); 1034 1034 if (err) 1035 - goto out_unlock; 1035 + goto out_redirty; 1036 1036 /* 1037 1037 * The inode has been written, but the write-buffer has 1038 1038 * not been synchronized, so in case of an unclean ··· 1060 1060 if (i_size > synced_i_size) { 1061 1061 err = inode->i_sb->s_op->write_inode(inode, NULL); 1062 1062 if (err) 1063 - goto out_unlock; 1063 + goto out_redirty; 1064 1064 } 1065 1065 1066 1066 return do_writepage(page, len); 1067 - 1067 + out_redirty: 1068 + /* 1069 + * redirty_page_for_writepage() won't call ubifs_dirty_inode() because 1070 + * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so 1071 + * there is no need to do space budget for dirty inode. 1072 + */ 1073 + redirty_page_for_writepage(wbc, page); 1068 1074 out_unlock: 1069 1075 unlock_page(page); 1070 1076 return err; ··· 1472 1466 struct inode *inode = folio->mapping->host; 1473 1467 struct ubifs_info *c = inode->i_sb->s_fs_info; 1474 1468 1475 - /* 1476 - * An attempt to release a dirty page without budgeting for it - should 1477 - * not happen. 1478 - */ 1479 1469 if (folio_test_writeback(folio)) 1480 1470 return false; 1471 + 1472 + /* 1473 + * Page is private but not dirty, weird? There is one condition 1474 + * making it happened. ubifs_writepage skipped the page because 1475 + * page index beyonds isize (for example. truncated by other 1476 + * process named A), then the page is invalidated by fadvise64 1477 + * syscall before being truncated by process A. 1478 + */ 1481 1479 ubifs_assert(c, folio_test_private(folio)); 1482 - ubifs_assert(c, 0); 1480 + if (folio_test_checked(folio)) 1481 + release_new_page_budget(c); 1482 + else 1483 + release_existing_page_budget(c); 1484 + 1485 + atomic_long_dec(&c->dirty_pg_cnt); 1483 1486 folio_detach_private(folio); 1484 1487 folio_clear_checked(folio); 1485 1488 return true;
+3 -3
fs/ubifs/io.c
··· 488 488 } 489 489 490 490 /** 491 - * wbuf_timer_callback - write-buffer timer callback function. 491 + * wbuf_timer_callback_nolock - write-buffer timer callback function. 492 492 * @timer: timer data (write-buffer descriptor) 493 493 * 494 494 * This function is called when the write-buffer timer expires. ··· 505 505 } 506 506 507 507 /** 508 - * new_wbuf_timer - start new write-buffer timer. 508 + * new_wbuf_timer_nolock - start new write-buffer timer. 509 509 * @c: UBIFS file-system description object 510 510 * @wbuf: write-buffer descriptor 511 511 */ ··· 531 531 } 532 532 533 533 /** 534 - * cancel_wbuf_timer - cancel write-buffer timer. 534 + * cancel_wbuf_timer_nolock - cancel write-buffer timer. 535 535 * @wbuf: write-buffer descriptor 536 536 */ 537 537 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
+6 -2
fs/ubifs/journal.c
··· 1201 1201 * ubifs_jnl_rename - rename a directory entry. 1202 1202 * @c: UBIFS file-system description object 1203 1203 * @old_dir: parent inode of directory entry to rename 1204 - * @old_dentry: directory entry to rename 1204 + * @old_inode: directory entry's inode to rename 1205 + * @old_nm: name of the old directory entry to rename 1205 1206 * @new_dir: parent inode of directory entry to rename 1206 - * @new_dentry: new directory entry (or directory entry to replace) 1207 + * @new_inode: new directory entry's inode (or directory entry's inode to 1208 + * replace) 1209 + * @new_nm: new name of the new directory entry 1210 + * @whiteout: whiteout inode 1207 1211 * @sync: non-zero if the write-buffer has to be synchronized 1208 1212 * 1209 1213 * This function implements the re-name operation which may involve writing up
+13 -4
fs/ubifs/super.c
··· 833 833 INIT_LIST_HEAD(&c->jheads[i].buds_list); 834 834 err = ubifs_wbuf_init(c, &c->jheads[i].wbuf); 835 835 if (err) 836 - return err; 836 + goto out_wbuf; 837 837 838 838 c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; 839 839 c->jheads[i].wbuf.jhead = i; ··· 841 841 c->jheads[i].log_hash = ubifs_hash_get_desc(c); 842 842 if (IS_ERR(c->jheads[i].log_hash)) { 843 843 err = PTR_ERR(c->jheads[i].log_hash); 844 - goto out; 844 + goto out_log_hash; 845 845 } 846 846 } 847 847 ··· 854 854 855 855 return 0; 856 856 857 - out: 858 - while (i--) 857 + out_log_hash: 858 + kfree(c->jheads[i].wbuf.buf); 859 + kfree(c->jheads[i].wbuf.inodes); 860 + 861 + out_wbuf: 862 + while (i--) { 863 + kfree(c->jheads[i].wbuf.buf); 864 + kfree(c->jheads[i].wbuf.inodes); 859 865 kfree(c->jheads[i].log_hash); 866 + } 867 + kfree(c->jheads); 868 + c->jheads = NULL; 860 869 861 870 return err; 862 871 }
+4 -2
fs/ubifs/sysfs.c
··· 74 74 .show = ubifs_attr_show, 75 75 }; 76 76 77 - static struct kobj_type ubifs_sb_ktype = { 77 + static const struct kobj_type ubifs_sb_ktype = { 78 78 .default_groups = ubifs_groups, 79 79 .sysfs_ops = &ubifs_attr_ops, 80 80 .release = ubifs_sb_release, 81 81 }; 82 82 83 - static struct kobj_type ubifs_ktype = { 83 + static const struct kobj_type ubifs_ktype = { 84 84 .sysfs_ops = &ubifs_attr_ops, 85 85 }; 86 86 ··· 144 144 kobject_set_name(&ubifs_kset.kobj, "ubifs"); 145 145 ubifs_kset.kobj.parent = fs_kobj; 146 146 ret = kset_register(&ubifs_kset); 147 + if (ret) 148 + kset_put(&ubifs_kset); 147 149 148 150 return ret; 149 151 }
+23 -1
fs/ubifs/tnc.c
··· 267 267 if (zbr->len) { 268 268 err = insert_old_idx(c, zbr->lnum, zbr->offs); 269 269 if (unlikely(err)) 270 - return ERR_PTR(err); 270 + /* 271 + * Obsolete znodes will be freed by tnc_destroy_cnext() 272 + * or free_obsolete_znodes(), copied up znodes should 273 + * be added back to tnc and freed by 274 + * ubifs_destroy_tnc_subtree(). 275 + */ 276 + goto out; 271 277 err = add_idx_dirt(c, zbr->lnum, zbr->len); 272 278 } else 273 279 err = 0; 274 280 281 + out: 275 282 zbr->znode = zn; 276 283 zbr->lnum = 0; 277 284 zbr->offs = 0; ··· 3060 3053 cnext = cnext->cnext; 3061 3054 if (ubifs_zn_obsolete(znode)) 3062 3055 kfree(znode); 3056 + else if (!ubifs_zn_cow(znode)) { 3057 + /* 3058 + * Don't forget to update clean znode count after 3059 + * committing failed, because ubifs will check this 3060 + * count while closing tnc. Non-obsolete znode could 3061 + * be re-dirtied during committing process, so dirty 3062 + * flag is untrustable. The flag 'COW_ZNODE' is set 3063 + * for each dirty znode before committing, and it is 3064 + * cleared as long as the znode become clean, so we 3065 + * can statistic clean znode count according to this 3066 + * flag. 3067 + */ 3068 + atomic_long_inc(&c->clean_zn_cnt); 3069 + atomic_long_inc(&ubifs_clean_zn_cnt); 3070 + } 3063 3071 } while (cnext && cnext != c->cnext); 3064 3072 } 3065 3073
+5
fs/ubifs/ubifs.h
··· 1623 1623 return crypto_memneq(expected, got, c->hmac_desc_len); 1624 1624 } 1625 1625 1626 + #ifdef CONFIG_UBIFS_FS_AUTHENTICATION 1626 1627 void ubifs_bad_hash(const struct ubifs_info *c, const void *node, 1627 1628 const u8 *hash, int lnum, int offs); 1629 + #else 1630 + static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node, 1631 + const u8 *hash, int lnum, int offs) {}; 1632 + #endif 1628 1633 1629 1634 int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf, 1630 1635 const u8 *expected);
+1
include/linux/mtd/ubi.h
··· 110 110 int name_len; 111 111 const char *name; 112 112 dev_t cdev; 113 + struct device *dev; 113 114 }; 114 115 115 116 /**