Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'upstream-4.19-rc1' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS updates from Richard Weinberger:

- Year 2038 preparations

- New UBI feature to skip CRC checks of static volumes

- A new Kconfig option to disable xattrs in UBIFS

- Lots of fixes in UBIFS, found by our new test framework

* tag 'upstream-4.19-rc1' of git://git.infradead.org/linux-ubifs: (21 commits)
ubifs: Set default assert action to read-only
ubifs: Allow setting assert action as mount parameter
ubifs: Rework ubifs_assert()
ubifs: Pass struct ubifs_info to ubifs_assert()
ubifs: Turn two ubifs_assert() into a WARN_ON()
ubi: expose the volume CRC check skip flag
ubi: provide a way to skip CRC checks
ubifs: Use kmalloc_array()
ubifs: Check data node size before truncate
Revert "UBIFS: Fix potential integer overflow in allocation"
ubifs: Add comment on c->commit_sem
ubifs: introduce Kconfig symbol for xattr support
ubifs: use swap macro in swap_dirty_idx
ubifs: tnc: use monotonic znode timestamp
ubifs: use timespec64 for inode timestamps
ubifs: xattr: Don't operate on deleted inodes
ubifs: gc: Fix typo
ubifs: Fix memory leak in lprobs self-check
ubi: Initialize Fastmap checkmapping correctly
ubifs: Fix synced_i_size calculation for xattr inodes
...

+787 -571
+11
drivers/mtd/ubi/cdev.c
··· 367 367 return count; 368 368 } 369 369 370 + /* 371 + * We voluntarily do not take into account the skip_check flag 372 + * as we want to make sure what we wrote was correctly written. 373 + */ 370 374 err = ubi_check_volume(ubi, vol->vol_id); 371 375 if (err < 0) 372 376 return err; ··· 623 619 goto bad; 624 620 625 621 if (req->vol_type != UBI_DYNAMIC_VOLUME && 622 + req->vol_type != UBI_STATIC_VOLUME) 623 + goto bad; 624 + 625 + if (req->flags & ~UBI_VOL_VALID_FLGS) 626 + goto bad; 627 + 628 + if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG && 626 629 req->vol_type != UBI_STATIC_VOLUME) 627 630 goto bad; 628 631
+1 -1
drivers/mtd/ubi/kapi.c
··· 202 202 desc->mode = mode; 203 203 204 204 mutex_lock(&ubi->ckvol_mutex); 205 - if (!vol->checked) { 205 + if (!vol->checked && !vol->skip_check) { 206 206 /* This is the first open - check the volume */ 207 207 err = ubi_check_volume(ubi, vol_id); 208 208 if (err < 0) {
+6
drivers/mtd/ubi/ubi-media.h
··· 45 45 * Volume flags used in the volume table record. 46 46 * 47 47 * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume 48 + * @UBI_VTBL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at 49 + * open time. Should only be set on volumes that 50 + * are used by upper layers doing this kind of 51 + * check. Main use-case for this flag is 52 + * boot-time reduction 48 53 * 49 54 * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume 50 55 * table. UBI automatically re-sizes the volume which has this flag and makes ··· 81 76 */ 82 77 enum { 83 78 UBI_VTBL_AUTORESIZE_FLG = 0x01, 79 + UBI_VTBL_SKIP_CRC_CHECK_FLG = 0x02, 84 80 }; 85 81 86 82 /*
+4
drivers/mtd/ubi/ubi.h
··· 327 327 * atomic LEB change 328 328 * 329 329 * @eba_tbl: EBA table of this volume (LEB->PEB mapping) 330 + * @skip_check: %1 if CRC check of this static volume should be skipped. 331 + * Directly reflects the presence of the 332 + * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry 330 333 * @checked: %1 if this static volume was checked 331 334 * @corrupted: %1 if the volume is corrupted (static volumes only) 332 335 * @upd_marker: %1 if the update marker is set for this volume ··· 377 374 void *upd_buf; 378 375 379 376 struct ubi_eba_table *eba_tbl; 377 + unsigned int skip_check:1; 380 378 unsigned int checked:1; 381 379 unsigned int corrupted:1; 382 380 unsigned int upd_marker:1;
+12
drivers/mtd/ubi/vmt.c
··· 174 174 vol->dev.class = &ubi_class; 175 175 vol->dev.groups = volume_dev_groups; 176 176 177 + if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG) 178 + vol->skip_check = 1; 179 + 177 180 spin_lock(&ubi->volumes_lock); 178 181 if (vol_id == UBI_VOL_NUM_AUTO) { 179 182 /* Find unused volume ID */ ··· 302 299 vtbl_rec.vol_type = UBI_VID_DYNAMIC; 303 300 else 304 301 vtbl_rec.vol_type = UBI_VID_STATIC; 302 + 303 + if (vol->skip_check) 304 + vtbl_rec.flags |= UBI_VTBL_SKIP_CRC_CHECK_FLG; 305 + 305 306 memcpy(vtbl_rec.name, vol->name, vol->name_len); 306 307 307 308 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); ··· 738 731 } 739 732 if (vol->used_bytes != n) { 740 733 ubi_err(ubi, "bad used_bytes"); 734 + goto fail; 735 + } 736 + 737 + if (vol->skip_check) { 738 + ubi_err(ubi, "bad skip_check"); 741 739 goto fail; 742 740 } 743 741 } else {
+13 -10
drivers/mtd/ubi/vtbl.c
··· 560 560 vol->name[vol->name_len] = '\0'; 561 561 vol->vol_id = i; 562 562 563 + if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG) 564 + vol->skip_check = 1; 565 + 563 566 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 564 567 /* Auto re-size flag may be set only for one volume */ 565 568 if (ubi->autoresize_vol_id != -1) { ··· 580 577 ubi->vol_count += 1; 581 578 vol->ubi = ubi; 582 579 reserved_pebs += vol->reserved_pebs; 580 + 581 + /* 582 + * We use ubi->peb_count and not vol->reserved_pebs because 583 + * we want to keep the code simple. Otherwise we'd have to 584 + * resize/check the bitmap upon volume resize too. 585 + * Allocating a few bytes more does not hurt. 586 + */ 587 + err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); 588 + if (err) 589 + return err; 583 590 584 591 /* 585 592 * In case of dynamic volume UBI knows nothing about how many ··· 633 620 (long long)(vol->used_ebs - 1) * vol->usable_leb_size; 634 621 vol->used_bytes += av->last_data_size; 635 622 vol->last_eb_bytes = av->last_data_size; 636 - 637 - /* 638 - * We use ubi->peb_count and not vol->reserved_pebs because 639 - * we want to keep the code simple. Otherwise we'd have to 640 - * resize/check the bitmap upon volume resize too. 641 - * Allocating a few bytes more does not hurt. 642 - */ 643 - err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); 644 - if (err) 645 - return err; 646 623 } 647 624 648 625 /* And add the layout volume */
+13 -2
fs/ubifs/Kconfig
··· 51 51 52 52 If unsure, say 'N' 53 53 54 + config UBIFS_FS_XATTR 55 + bool "UBIFS XATTR support" 56 + depends on UBIFS_FS 57 + default y 58 + help 59 + Saying Y here includes support for extended attributes (xattrs). 60 + Xattrs are name:value pairs associated with inodes by 61 + the kernel or by users (see the attr(5) manual page). 62 + 63 + If unsure, say Y. 64 + 54 65 config UBIFS_FS_ENCRYPTION 55 66 bool "UBIFS Encryption" 56 - depends on UBIFS_FS && BLOCK 67 + depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK 57 68 select FS_ENCRYPTION 58 69 default n 59 70 help ··· 75 64 76 65 config UBIFS_FS_SECURITY 77 66 bool "UBIFS Security Labels" 78 - depends on UBIFS_FS 67 + depends on UBIFS_FS && UBIFS_FS_XATTR 79 68 default y 80 69 help 81 70 Security labels provide an access control facility to support Linux
+2 -1
fs/ubifs/Makefile
··· 4 4 ubifs-y += shrinker.o journal.o file.o dir.o super.o sb.o io.o 5 5 ubifs-y += tnc.o master.o scan.o replay.o log.o commit.o gc.o orphan.o 6 6 ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o 7 - ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o 7 + ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o 8 8 ubifs-y += misc.o 9 9 ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o 10 + ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
+34 -34
fs/ubifs/budget.c
··· 439 439 { 440 440 int err, idx_growth, data_growth, dd_growth, retried = 0; 441 441 442 - ubifs_assert(req->new_page <= 1); 443 - ubifs_assert(req->dirtied_page <= 1); 444 - ubifs_assert(req->new_dent <= 1); 445 - ubifs_assert(req->mod_dent <= 1); 446 - ubifs_assert(req->new_ino <= 1); 447 - ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA); 448 - ubifs_assert(req->dirtied_ino <= 4); 449 - ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); 450 - ubifs_assert(!(req->new_ino_d & 7)); 451 - ubifs_assert(!(req->dirtied_ino_d & 7)); 442 + ubifs_assert(c, req->new_page <= 1); 443 + ubifs_assert(c, req->dirtied_page <= 1); 444 + ubifs_assert(c, req->new_dent <= 1); 445 + ubifs_assert(c, req->mod_dent <= 1); 446 + ubifs_assert(c, req->new_ino <= 1); 447 + ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA); 448 + ubifs_assert(c, req->dirtied_ino <= 4); 449 + ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); 450 + ubifs_assert(c, !(req->new_ino_d & 7)); 451 + ubifs_assert(c, !(req->dirtied_ino_d & 7)); 452 452 453 453 data_growth = calc_data_growth(c, req); 454 454 dd_growth = calc_dd_growth(c, req); ··· 458 458 459 459 again: 460 460 spin_lock(&c->space_lock); 461 - ubifs_assert(c->bi.idx_growth >= 0); 462 - ubifs_assert(c->bi.data_growth >= 0); 463 - ubifs_assert(c->bi.dd_growth >= 0); 461 + ubifs_assert(c, c->bi.idx_growth >= 0); 462 + ubifs_assert(c, c->bi.data_growth >= 0); 463 + ubifs_assert(c, c->bi.dd_growth >= 0); 464 464 465 465 if (unlikely(c->bi.nospace) && (c->bi.nospace_rp || !can_use_rp(c))) { 466 466 dbg_budg("no space"); ··· 526 526 */ 527 527 void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req) 528 528 { 529 - ubifs_assert(req->new_page <= 1); 530 - ubifs_assert(req->dirtied_page <= 1); 531 - ubifs_assert(req->new_dent <= 1); 532 - ubifs_assert(req->mod_dent <= 1); 533 - ubifs_assert(req->new_ino <= 1); 534 - ubifs_assert(req->new_ino_d <= UBIFS_MAX_INO_DATA); 535 - ubifs_assert(req->dirtied_ino <= 4); 536 - ubifs_assert(req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); 537 - ubifs_assert(!(req->new_ino_d & 7)); 538 - ubifs_assert(!(req->dirtied_ino_d & 7)); 529 + ubifs_assert(c, req->new_page <= 1); 530 + ubifs_assert(c, req->dirtied_page <= 1); 531 + ubifs_assert(c, req->new_dent <= 1); 532 + ubifs_assert(c, req->mod_dent <= 1); 533 + ubifs_assert(c, req->new_ino <= 1); 534 + ubifs_assert(c, req->new_ino_d <= UBIFS_MAX_INO_DATA); 535 + ubifs_assert(c, req->dirtied_ino <= 4); 536 + ubifs_assert(c, req->dirtied_ino_d <= UBIFS_MAX_INO_DATA * 4); 537 + ubifs_assert(c, !(req->new_ino_d & 7)); 538 + ubifs_assert(c, !(req->dirtied_ino_d & 7)); 539 539 if (!req->recalculate) { 540 - ubifs_assert(req->idx_growth >= 0); 541 - ubifs_assert(req->data_growth >= 0); 542 - ubifs_assert(req->dd_growth >= 0); 540 + ubifs_assert(c, req->idx_growth >= 0); 541 + ubifs_assert(c, req->data_growth >= 0); 542 + ubifs_assert(c, req->dd_growth >= 0); 543 543 } 544 544 545 545 if (req->recalculate) { ··· 561 561 c->bi.dd_growth -= req->dd_growth; 562 562 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); 563 563 564 - ubifs_assert(c->bi.idx_growth >= 0); 565 - ubifs_assert(c->bi.data_growth >= 0); 566 - ubifs_assert(c->bi.dd_growth >= 0); 567 - ubifs_assert(c->bi.min_idx_lebs < c->main_lebs); 568 - ubifs_assert(!(c->bi.idx_growth & 7)); 569 - ubifs_assert(!(c->bi.data_growth & 7)); 570 - ubifs_assert(!(c->bi.dd_growth & 7)); 564 + ubifs_assert(c, c->bi.idx_growth >= 0); 565 + ubifs_assert(c, c->bi.data_growth >= 0); 566 + ubifs_assert(c, c->bi.dd_growth >= 0); 567 + ubifs_assert(c, c->bi.min_idx_lebs < c->main_lebs); 568 + ubifs_assert(c, !(c->bi.idx_growth & 7)); 569 + ubifs_assert(c, !(c->bi.data_growth & 7)); 570 + ubifs_assert(c, !(c->bi.dd_growth & 7)); 571 571 spin_unlock(&c->space_lock); 572 572 } 573 573 ··· 680 680 int rsvd_idx_lebs, lebs; 681 681 long long available, outstanding, free; 682 682 683 - ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 683 + ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 684 684 outstanding = c->bi.data_growth + c->bi.dd_growth; 685 685 available = ubifs_calc_available(c, c->bi.min_idx_lebs); 686 686
+4 -4
fs/ubifs/commit.c
··· 91 91 if (c->nroot && test_bit(DIRTY_CNODE, &c->nroot->flags)) 92 92 return 0; 93 93 94 - ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0); 95 - ubifs_assert(c->dirty_pn_cnt == 0); 96 - ubifs_assert(c->dirty_nn_cnt == 0); 94 + ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0); 95 + ubifs_assert(c, c->dirty_pn_cnt == 0); 96 + ubifs_assert(c, c->dirty_nn_cnt == 0); 97 97 98 98 return 1; 99 99 } ··· 113 113 struct ubifs_lp_stats lst; 114 114 115 115 dbg_cmt("start"); 116 - ubifs_assert(!c->ro_media && !c->ro_mount); 116 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 117 117 118 118 if (c->ro_error) { 119 119 err = -EROFS;
+2 -2
fs/ubifs/crypto.c
··· 32 32 struct page *ret; 33 33 unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE); 34 34 35 - ubifs_assert(pad_len <= *out_len); 35 + ubifs_assert(c, pad_len <= *out_len); 36 36 dn->compr_size = cpu_to_le16(in_len); 37 37 38 38 /* pad to full block cipher length */ ··· 63 63 return -EINVAL; 64 64 } 65 65 66 - ubifs_assert(dlen <= UBIFS_BLOCK_SIZE); 66 + ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE); 67 67 err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen, 68 68 offset_in_page(&dn->data), block); 69 69 if (err) {
+32 -10
fs/ubifs/debug.c
··· 134 134 } 135 135 } else 136 136 len -= snprintf(p, len, "bad key format %d", c->key_fmt); 137 - ubifs_assert(len > 0); 137 + ubifs_assert(c, len > 0); 138 138 return p; 139 139 } 140 140 ··· 276 276 return; 277 277 278 278 pr_err("List of directory entries:\n"); 279 - ubifs_assert(!mutex_is_locked(&c->tnc_mutex)); 279 + ubifs_assert(c, !mutex_is_locked(&c->tnc_mutex)); 280 280 281 281 lowest_dent_key(c, &key, inode->i_ino); 282 282 while (1) { ··· 931 931 932 932 pr_err("\n"); 933 933 pr_err("(pid %d) start dumping TNC tree\n", current->pid); 934 - znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); 934 + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); 935 935 level = znode->level; 936 936 pr_err("== Level %d ==\n", level); 937 937 while (znode) { ··· 940 940 pr_err("== Level %d ==\n", level); 941 941 } 942 942 ubifs_dump_znode(c, znode); 943 - znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); 943 + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); 944 944 } 945 945 pr_err("(pid %d) finish dumping TNC tree\n", current->pid); 946 946 } ··· 1183 1183 union ubifs_key key; 1184 1184 char key_buf[DBG_KEY_BUF_LEN]; 1185 1185 1186 - ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key)); 1186 + ubifs_assert(c, !keys_cmp(c, &zbr1->key, &zbr2->key)); 1187 1187 dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS); 1188 1188 if (!dent1) 1189 1189 return -ENOMEM; ··· 1479 1479 if (!dbg_is_chk_index(c)) 1480 1480 return 0; 1481 1481 1482 - ubifs_assert(mutex_is_locked(&c->tnc_mutex)); 1482 + ubifs_assert(c, mutex_is_locked(&c->tnc_mutex)); 1483 1483 if (!c->zroot.znode) 1484 1484 return 0; 1485 1485 ··· 1505 1505 } 1506 1506 1507 1507 prev = znode; 1508 - znode = ubifs_tnc_postorder_next(znode); 1508 + znode = ubifs_tnc_postorder_next(c, znode); 1509 1509 if (!znode) 1510 1510 break; 1511 1511 ··· 2036 2036 long long blk_offs; 2037 2037 struct ubifs_data_node *dn = node; 2038 2038 2039 - ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ); 2039 + ubifs_assert(c, zbr->len >= UBIFS_DATA_NODE_SZ); 2040 2040 2041 2041 /* 2042 2042 * Search the inode node this data node belongs to and insert ··· 2066 2066 struct ubifs_dent_node *dent = node; 2067 2067 struct fsck_inode *fscki1; 2068 2068 2069 - ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ); 2069 + ubifs_assert(c, zbr->len >= UBIFS_DENT_NODE_SZ); 2070 2070 2071 2071 err = ubifs_validate_entry(c, dent); 2072 2072 if (err) ··· 2461 2461 { 2462 2462 struct ubifs_debug_info *d = c->dbg; 2463 2463 2464 - ubifs_assert(dbg_is_tst_rcvry(c)); 2464 + ubifs_assert(c, dbg_is_tst_rcvry(c)); 2465 2465 2466 2466 if (!d->pc_cnt) { 2467 2467 /* First call - decide delay to the power cut */ ··· 3079 3079 { 3080 3080 if (IS_ENABLED(CONFIG_DEBUG_FS)) 3081 3081 debugfs_remove_recursive(dfs_rootdir); 3082 + } 3083 + 3084 + void ubifs_assert_failed(struct ubifs_info *c, const char *expr, 3085 + const char *file, int line) 3086 + { 3087 + ubifs_err(c, "UBIFS assert failed: %s, in %s:%u", expr, file, line); 3088 + 3089 + switch (c->assert_action) { 3090 + case ASSACT_PANIC: 3091 + BUG(); 3092 + break; 3093 + 3094 + case ASSACT_RO: 3095 + ubifs_ro_mode(c, -EINVAL); 3096 + break; 3097 + 3098 + case ASSACT_REPORT: 3099 + default: 3100 + dump_stack(); 3101 + break; 3102 + 3103 + } 3082 3104 } 3083 3105 3084 3106 /**
+8 -6
fs/ubifs/debug.h
··· 148 148 unsigned int tst_rcvry:1; 149 149 }; 150 150 151 - #define ubifs_assert(expr) do { \ 151 + void ubifs_assert_failed(struct ubifs_info *c, const char *expr, 152 + const char *file, int line); 153 + 154 + #define ubifs_assert(c, expr) do { \ 152 155 if (unlikely(!(expr))) { \ 153 - pr_crit("UBIFS assert failed in %s at %u (pid %d)\n", \ 154 - __func__, __LINE__, current->pid); \ 155 - dump_stack(); \ 156 + ubifs_assert_failed((struct ubifs_info *)c, #expr, __FILE__, \ 157 + __LINE__); \ 156 158 } \ 157 159 } while (0) 158 160 159 161 #define ubifs_assert_cmt_locked(c) do { \ 160 162 if (unlikely(down_write_trylock(&(c)->commit_sem))) { \ 161 163 up_write(&(c)->commit_sem); \ 162 - pr_crit("commit lock is not locked!\n"); \ 163 - ubifs_assert(0); \ 164 + ubifs_err(c, "commit lock is not locked!\n"); \ 165 + ubifs_assert(c, 0); \ 164 166 } \ 165 167 } while (0) 166 168
+23 -19
fs/ubifs/dir.c
··· 240 240 } 241 241 242 242 if (nm.hash) { 243 - ubifs_assert(fname_len(&nm) == 0); 244 - ubifs_assert(fname_name(&nm) == NULL); 243 + ubifs_assert(c, fname_len(&nm) == 0); 244 + ubifs_assert(c, fname_name(&nm) == NULL); 245 245 dent_key_init_hash(c, &key, dir->i_ino, nm.hash); 246 246 err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash); 247 247 } else { ··· 404 404 405 405 if (whiteout) { 406 406 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); 407 - ubifs_assert(inode->i_op == &ubifs_file_inode_operations); 407 + ubifs_assert(c, inode->i_op == &ubifs_file_inode_operations); 408 408 } 409 409 410 410 err = ubifs_init_security(dir, inode, &dentry->d_name); ··· 421 421 } else { 422 422 d_tmpfile(dentry, inode); 423 423 } 424 - ubifs_assert(ui->dirty); 424 + ubifs_assert(c, ui->dirty); 425 425 426 426 instantiated = 1; 427 427 mutex_unlock(&ui->ui_mutex); ··· 556 556 557 557 /* File positions 0 and 1 correspond to "." and ".." */ 558 558 if (ctx->pos < 2) { 559 - ubifs_assert(!file->private_data); 559 + ubifs_assert(c, !file->private_data); 560 560 if (!dir_emit_dots(file, ctx)) { 561 561 if (encrypted) 562 562 fscrypt_fname_free_buffer(&fstr); ··· 597 597 dbg_gen("ino %llu, new f_pos %#x", 598 598 (unsigned long long)le64_to_cpu(dent->inum), 599 599 key_hash_flash(c, &dent->key)); 600 - ubifs_assert(le64_to_cpu(dent->ch.sqnum) > 600 + ubifs_assert(c, le64_to_cpu(dent->ch.sqnum) > 601 601 ubifs_inode(dir)->creat_sqnum); 602 602 603 603 fname_len(&nm) = le16_to_cpu(dent->nlen); ··· 716 716 dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu", 717 717 dentry, inode->i_ino, 718 718 inode->i_nlink, dir->i_ino); 719 - ubifs_assert(inode_is_locked(dir)); 720 - ubifs_assert(inode_is_locked(inode)); 719 + ubifs_assert(c, inode_is_locked(dir)); 720 + ubifs_assert(c, inode_is_locked(inode)); 721 721 722 722 err = fscrypt_prepare_link(old_dentry, dir, dentry); 723 723 if (err) ··· 804 804 805 805 sz_change = CALC_DENT_SIZE(fname_len(&nm)); 806 806 807 - ubifs_assert(inode_is_locked(dir)); 808 - ubifs_assert(inode_is_locked(inode)); 807 + ubifs_assert(c, inode_is_locked(dir)); 808 + ubifs_assert(c, inode_is_locked(inode)); 809 809 err = dbg_check_synced_i_size(c, inode); 810 810 if (err) 811 811 goto out_fname; ··· 896 896 897 897 dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry, 898 898 inode->i_ino, dir->i_ino); 899 - ubifs_assert(inode_is_locked(dir)); 900 - ubifs_assert(inode_is_locked(inode)); 899 + ubifs_assert(c, inode_is_locked(dir)); 900 + ubifs_assert(c, inode_is_locked(inode)); 901 901 err = ubifs_check_dir_empty(d_inode(dentry)); 902 902 if (err) 903 903 return err; ··· 1123 1123 struct ubifs_inode *ui; 1124 1124 struct ubifs_inode *dir_ui = ubifs_inode(dir); 1125 1125 struct ubifs_info *c = dir->i_sb->s_fs_info; 1126 - int err, len = strlen(symname); 1127 - int sz_change = CALC_DENT_SIZE(len); 1126 + int err, sz_change, len = strlen(symname); 1128 1127 struct fscrypt_str disk_link; 1129 1128 struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, 1130 1129 .new_ino_d = ALIGN(len, 8), ··· 1149 1150 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1150 1151 if (err) 1151 1152 goto out_budg; 1153 + 1154 + sz_change = CALC_DENT_SIZE(fname_len(&nm)); 1152 1155 1153 1156 inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO); 1154 1157 if (IS_ERR(inode)) { ··· 1295 1294 new_dentry, new_dir->i_ino, flags); 1296 1295 1297 1296 if (unlink) 1298 - ubifs_assert(inode_is_locked(new_inode)); 1297 + ubifs_assert(c, inode_is_locked(new_inode)); 1299 1298 1300 1299 if (unlink && is_dir) { 1301 1300 err = ubifs_check_dir_empty(new_inode); ··· 1349 1348 whiteout_ui = ubifs_inode(whiteout); 1350 1349 whiteout_ui->data = dev; 1351 1350 whiteout_ui->data_len = ubifs_encode_dev(dev, MKDEV(0, 0)); 1352 - ubifs_assert(!whiteout_ui->dirty); 1351 + ubifs_assert(c, !whiteout_ui->dirty); 1353 1352 } 1354 1353 1355 1354 lock_4_inodes(old_dir, new_dir, new_inode, whiteout); ··· 1509 1508 int err; 1510 1509 struct fscrypt_name fst_nm, snd_nm; 1511 1510 1512 - ubifs_assert(fst_inode && snd_inode); 1511 + ubifs_assert(c, fst_inode && snd_inode); 1513 1512 1514 1513 err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm); 1515 1514 if (err) ··· 1556 1555 unsigned int flags) 1557 1556 { 1558 1557 int err; 1558 + struct ubifs_info *c = old_dir->i_sb->s_fs_info; 1559 1559 1560 1560 if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE)) 1561 1561 return -EINVAL; 1562 1562 1563 - ubifs_assert(inode_is_locked(old_dir)); 1564 - ubifs_assert(inode_is_locked(new_dir)); 1563 + ubifs_assert(c, inode_is_locked(old_dir)); 1564 + ubifs_assert(c, inode_is_locked(new_dir)); 1565 1565 1566 1566 err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry, 1567 1567 flags); ··· 1649 1647 .rename = ubifs_rename, 1650 1648 .setattr = ubifs_setattr, 1651 1649 .getattr = ubifs_getattr, 1650 + #ifdef CONFIG_UBIFS_FS_XATTR 1652 1651 .listxattr = ubifs_listxattr, 1652 + #endif 1653 1653 #ifdef CONFIG_UBIFS_ATIME_SUPPORT 1654 1654 .update_time = ubifs_update_time, 1655 1655 #endif
+36 -26
fs/ubifs/file.c
··· 71 71 return err; 72 72 } 73 73 74 - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > 74 + ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > 75 75 ubifs_inode(inode)->creat_sqnum); 76 76 len = le32_to_cpu(dn->size); 77 77 if (len <= 0 || len > UBIFS_BLOCK_SIZE) ··· 115 115 unsigned int block, beyond; 116 116 struct ubifs_data_node *dn; 117 117 struct inode *inode = page->mapping->host; 118 + struct ubifs_info *c = inode->i_sb->s_fs_info; 118 119 loff_t i_size = i_size_read(inode); 119 120 120 121 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", 121 122 inode->i_ino, page->index, i_size, page->flags); 122 - ubifs_assert(!PageChecked(page)); 123 - ubifs_assert(!PagePrivate(page)); 123 + ubifs_assert(c, !PageChecked(page)); 124 + ubifs_assert(c, !PagePrivate(page)); 124 125 125 126 addr = kmap(page); 126 127 ··· 442 441 int skipped_read = 0; 443 442 struct page *page; 444 443 445 - ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size); 446 - ubifs_assert(!c->ro_media && !c->ro_mount); 444 + ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size); 445 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 447 446 448 447 if (unlikely(c->ro_error)) 449 448 return -EROFS; ··· 482 481 483 482 err = allocate_budget(c, page, ui, appending); 484 483 if (unlikely(err)) { 485 - ubifs_assert(err == -ENOSPC); 484 + ubifs_assert(c, err == -ENOSPC); 486 485 /* 487 486 * If we skipped reading the page because we were going to 488 487 * write all of it, then it is not up to date. ··· 499 498 * everything and fall-back to slow-path. 500 499 */ 501 500 if (appending) { 502 - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); 501 + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 503 502 mutex_unlock(&ui->ui_mutex); 504 503 } 505 504 unlock_page(page); ··· 596 595 * '__set_page_dirty_nobuffers()'. 597 596 */ 598 597 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 599 - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); 598 + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 600 599 mutex_unlock(&ui->ui_mutex); 601 600 } 602 601 ··· 649 648 650 649 dn = bu->buf + (bu->zbranch[nn].offs - offs); 651 650 652 - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > 651 + ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > 653 652 ubifs_inode(inode)->creat_sqnum); 654 653 655 654 len = le32_to_cpu(dn->size); ··· 768 767 bu->buf_len = bu->zbranch[bu->cnt - 1].offs + 769 768 bu->zbranch[bu->cnt - 1].len - 770 769 bu->zbranch[0].offs; 771 - ubifs_assert(bu->buf_len > 0); 772 - ubifs_assert(bu->buf_len <= c->leb_size); 770 + ubifs_assert(c, bu->buf_len > 0); 771 + ubifs_assert(c, bu->buf_len <= c->leb_size); 773 772 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); 774 773 if (!bu->buf) 775 774 goto out_bu_off; ··· 921 920 #ifdef UBIFS_DEBUG 922 921 struct ubifs_inode *ui = ubifs_inode(inode); 923 922 spin_lock(&ui->ui_lock); 924 - ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT); 923 + ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); 925 924 spin_unlock(&ui->ui_lock); 926 925 #endif 927 926 ··· 950 949 ubifs_ro_mode(c, err); 951 950 } 952 951 953 - ubifs_assert(PagePrivate(page)); 952 + ubifs_assert(c, PagePrivate(page)); 954 953 if (PageChecked(page)) 955 954 release_new_page_budget(c); 956 955 else ··· 1015 1014 static int ubifs_writepage(struct page *page, struct writeback_control *wbc) 1016 1015 { 1017 1016 struct inode *inode = page->mapping->host; 1017 + struct ubifs_info *c = inode->i_sb->s_fs_info; 1018 1018 struct ubifs_inode *ui = ubifs_inode(inode); 1019 1019 loff_t i_size = i_size_read(inode), synced_i_size; 1020 1020 pgoff_t end_index = i_size >> PAGE_SHIFT; ··· 1024 1022 1025 1023 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 1026 1024 inode->i_ino, page->index, page->flags); 1027 - ubifs_assert(PagePrivate(page)); 1025 + ubifs_assert(c, PagePrivate(page)); 1028 1026 1029 1027 /* Is the page fully outside @i_size? (truncate in progress) */ 1030 1028 if (page->index > end_index || (page->index == end_index && !len)) { ··· 1169 1167 * 'ubifs_jnl_truncate()' will see an already 1170 1168 * truncated (and up to date) data node. 1171 1169 */ 1172 - ubifs_assert(PagePrivate(page)); 1170 + ubifs_assert(c, PagePrivate(page)); 1173 1171 1174 1172 clear_page_dirty_for_io(page); 1175 1173 if (UBIFS_BLOCKS_PER_PAGE_SHIFT) ··· 1305 1303 struct inode *inode = page->mapping->host; 1306 1304 struct ubifs_info *c = inode->i_sb->s_fs_info; 1307 1305 1308 - ubifs_assert(PagePrivate(page)); 1306 + ubifs_assert(c, PagePrivate(page)); 1309 1307 if (offset || length < PAGE_SIZE) 1310 1308 /* Partial page remains dirty */ 1311 1309 return; ··· 1367 1365 * granularity, they are not updated. This is an optimization. 1368 1366 */ 1369 1367 static inline int mctime_update_needed(const struct inode *inode, 1370 - const struct timespec *now) 1368 + const struct timespec64 *now) 1371 1369 { 1372 - struct timespec64 now64 = timespec_to_timespec64(*now); 1373 - if (!timespec64_equal(&inode->i_mtime, &now64) || 1374 - !timespec64_equal(&inode->i_ctime, &now64)) 1370 + if (!timespec64_equal(&inode->i_mtime, now) || 1371 + !timespec64_equal(&inode->i_ctime, now)) 1375 1372 return 1; 1376 1373 return 0; 1377 1374 } ··· 1426 1425 */ 1427 1426 static int update_mctime(struct inode *inode) 1428 1427 { 1429 - struct timespec now = timespec64_to_timespec(current_time(inode)); 1428 + struct timespec64 now = current_time(inode); 1430 1429 struct ubifs_inode *ui = ubifs_inode(inode); 1431 1430 struct ubifs_info *c = inode->i_sb->s_fs_info; 1432 1431 ··· 1463 1462 static int ubifs_set_page_dirty(struct page *page) 1464 1463 { 1465 1464 int ret; 1465 + struct inode *inode = page->mapping->host; 1466 + struct ubifs_info *c = inode->i_sb->s_fs_info; 1466 1467 1467 1468 ret = __set_page_dirty_nobuffers(page); 1468 1469 /* 1469 1470 * An attempt to dirty a page without budgeting for it - should not 1470 1471 * happen. 1471 1472 */ 1472 - ubifs_assert(ret == 0); 1473 + ubifs_assert(c, ret == 0); 1473 1474 return ret; 1474 1475 } 1475 1476 ··· 1500 1497 1501 1498 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) 1502 1499 { 1500 + struct inode *inode = page->mapping->host; 1501 + struct ubifs_info *c = inode->i_sb->s_fs_info; 1502 + 1503 1503 /* 1504 1504 * An attempt to release a dirty page without budgeting for it - should 1505 1505 * not happen. 1506 1506 */ 1507 1507 if (PageWriteback(page)) 1508 1508 return 0; 1509 - ubifs_assert(PagePrivate(page)); 1510 - ubifs_assert(0); 1509 + ubifs_assert(c, PagePrivate(page)); 1510 + ubifs_assert(c, 0); 1511 1511 ClearPagePrivate(page); 1512 1512 ClearPageChecked(page); 1513 1513 return 1; ··· 1525 1519 struct page *page = vmf->page; 1526 1520 struct inode *inode = file_inode(vmf->vma->vm_file); 1527 1521 struct ubifs_info *c = inode->i_sb->s_fs_info; 1528 - struct timespec now = timespec64_to_timespec(current_time(inode)); 1522 + struct timespec64 now = current_time(inode); 1529 1523 struct ubifs_budget_req req = { .new_page = 1 }; 1530 1524 int err, update_time; 1531 1525 1532 1526 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, 1533 1527 i_size_read(inode)); 1534 - ubifs_assert(!c->ro_media && !c->ro_mount); 1528 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 1535 1529 1536 1530 if (unlikely(c->ro_error)) 1537 1531 return VM_FAULT_SIGBUS; /* -EROFS */ ··· 1660 1654 const struct inode_operations ubifs_file_inode_operations = { 1661 1655 .setattr = ubifs_setattr, 1662 1656 .getattr = ubifs_getattr, 1657 + #ifdef CONFIG_UBIFS_FS_XATTR 1663 1658 .listxattr = ubifs_listxattr, 1659 + #endif 1664 1660 #ifdef CONFIG_UBIFS_ATIME_SUPPORT 1665 1661 .update_time = ubifs_update_time, 1666 1662 #endif ··· 1672 1664 .get_link = ubifs_get_link, 1673 1665 .setattr = ubifs_setattr, 1674 1666 .getattr = ubifs_getattr, 1667 + #ifdef CONFIG_UBIFS_FS_XATTR 1675 1668 .listxattr = ubifs_listxattr, 1669 + #endif 1676 1670 #ifdef CONFIG_UBIFS_ATIME_SUPPORT 1677 1671 .update_time = ubifs_update_time, 1678 1672 #endif
fs/ubifs/file.h
+28 -31
fs/ubifs/find.c
··· 183 183 &data); 184 184 if (err) 185 185 return ERR_PTR(err); 186 - ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); 186 + ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); 187 187 c->lscan_lnum = data.lnum; 188 188 lprops = ubifs_lpt_lookup_dirty(c, data.lnum); 189 189 if (IS_ERR(lprops)) 190 190 return lprops; 191 - ubifs_assert(lprops->lnum == data.lnum); 192 - ubifs_assert(lprops->free + lprops->dirty >= min_space); 193 - ubifs_assert(lprops->dirty >= c->dead_wm || 191 + ubifs_assert(c, lprops->lnum == data.lnum); 192 + ubifs_assert(c, lprops->free + lprops->dirty >= min_space); 193 + ubifs_assert(c, lprops->dirty >= c->dead_wm || 194 194 (pick_free && 195 195 lprops->free + lprops->dirty == c->leb_size)); 196 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 197 - ubifs_assert(!exclude_index || !(lprops->flags & LPROPS_INDEX)); 196 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 197 + ubifs_assert(c, !exclude_index || !(lprops->flags & LPROPS_INDEX)); 198 198 return lprops; 199 199 } 200 200 ··· 315 315 lp = idx_lp; 316 316 317 317 if (lp) { 318 - ubifs_assert(lp->free + lp->dirty >= c->dead_wm); 318 + ubifs_assert(c, lp->free + lp->dirty >= c->dead_wm); 319 319 goto found; 320 320 } 321 321 ··· 326 326 err = PTR_ERR(lp); 327 327 goto out; 328 328 } 329 - ubifs_assert(lp->dirty >= c->dead_wm || 329 + ubifs_assert(c, lp->dirty >= c->dead_wm || 330 330 (pick_free && lp->free + lp->dirty == c->leb_size)); 331 331 332 332 found: ··· 462 462 &data); 463 463 if (err) 464 464 return ERR_PTR(err); 465 - ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); 465 + ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); 466 466 c->lscan_lnum = data.lnum; 467 467 lprops = ubifs_lpt_lookup_dirty(c, data.lnum); 468 468 if (IS_ERR(lprops)) 469 469 return lprops; 470 - ubifs_assert(lprops->lnum == data.lnum); 471 - ubifs_assert(lprops->free >= min_space); 472 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 473 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 470 + ubifs_assert(c, lprops->lnum == data.lnum); 471 + ubifs_assert(c, lprops->free >= min_space); 472 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 473 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 474 474 return lprops; 475 475 } 476 476 ··· 574 574 } 575 575 576 576 dbg_find("found LEB %d, free %d", lnum, c->leb_size - *offs); 577 - ubifs_assert(*offs <= c->leb_size - min_space); 577 + ubifs_assert(c, *offs <= c->leb_size - min_space); 578 578 return lnum; 579 579 580 580 out: ··· 642 642 &data); 643 643 if (err) 644 644 return ERR_PTR(err); 645 - ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); 645 + ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); 646 646 c->lscan_lnum = data.lnum; 647 647 lprops = ubifs_lpt_lookup_dirty(c, data.lnum); 648 648 if (IS_ERR(lprops)) 649 649 return lprops; 650 - ubifs_assert(lprops->lnum == data.lnum); 651 - ubifs_assert(lprops->free + lprops->dirty == c->leb_size); 652 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 653 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 650 + ubifs_assert(c, lprops->lnum == data.lnum); 651 + ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); 652 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 653 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 654 654 return lprops; 655 655 } 656 656 ··· 690 690 */ 691 691 if (c->in_a_category_cnt != c->main_lebs || 692 692 c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { 693 - ubifs_assert(c->freeable_cnt == 0); 693 + ubifs_assert(c, c->freeable_cnt == 0); 694 694 lprops = scan_for_leb_for_idx(c); 695 695 if (IS_ERR(lprops)) { 696 696 err = PTR_ERR(lprops); ··· 750 750 static void swap_dirty_idx(struct ubifs_lprops **a, struct ubifs_lprops **b, 751 751 int size) 752 752 { 753 - struct ubifs_lprops *t = *a; 754 - 755 - *a = *b; 756 - *b = t; 753 + swap(*a, *b); 757 754 } 758 755 759 756 /** ··· 867 870 if (err) 868 871 return err; 869 872 found: 870 - ubifs_assert(data.lnum >= c->main_first && data.lnum < c->leb_cnt); 873 + ubifs_assert(c, data.lnum >= c->main_first && data.lnum < c->leb_cnt); 871 874 c->lscan_lnum = data.lnum; 872 875 lprops = ubifs_lpt_lookup_dirty(c, data.lnum); 873 876 if (IS_ERR(lprops)) 874 877 return PTR_ERR(lprops); 875 - ubifs_assert(lprops->lnum == data.lnum); 876 - ubifs_assert(lprops->free + lprops->dirty >= c->min_idx_node_sz); 877 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 878 - ubifs_assert((lprops->flags & LPROPS_INDEX)); 878 + ubifs_assert(c, lprops->lnum == data.lnum); 879 + ubifs_assert(c, lprops->free + lprops->dirty >= c->min_idx_node_sz); 880 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 881 + ubifs_assert(c, (lprops->flags & LPROPS_INDEX)); 879 882 880 883 dbg_find("found dirty LEB %d, free %d, dirty %d, flags %#x", 881 884 lprops->lnum, lprops->free, lprops->dirty, lprops->flags); ··· 944 947 } 945 948 dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, 946 949 lp->free, lp->flags); 947 - ubifs_assert(lp->flags & LPROPS_TAKEN); 948 - ubifs_assert(lp->flags & LPROPS_INDEX); 950 + ubifs_assert(c, lp->flags & LPROPS_TAKEN); 951 + ubifs_assert(c, lp->flags & LPROPS_INDEX); 949 952 return lnum; 950 953 } 951 954
+33 -33
fs/ubifs/gc.c
··· 83 83 int err, gc_lnum = c->gc_lnum; 84 84 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 85 85 86 - ubifs_assert(gc_lnum != -1); 86 + ubifs_assert(c, gc_lnum != -1); 87 87 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)", 88 88 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum, 89 89 c->leb_size - wbuf->offs - wbuf->used); ··· 131 131 sa = list_entry(a, struct ubifs_scan_node, list); 132 132 sb = list_entry(b, struct ubifs_scan_node, list); 133 133 134 - ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY); 135 - ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY); 136 - ubifs_assert(sa->type == UBIFS_DATA_NODE); 137 - ubifs_assert(sb->type == UBIFS_DATA_NODE); 134 + ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY); 135 + ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY); 136 + ubifs_assert(c, sa->type == UBIFS_DATA_NODE); 137 + ubifs_assert(c, sb->type == UBIFS_DATA_NODE); 138 138 139 139 inuma = key_inum(c, &sa->key); 140 140 inumb = key_inum(c, &sb->key); ··· 175 175 sa = list_entry(a, struct ubifs_scan_node, list); 176 176 sb = list_entry(b, struct ubifs_scan_node, list); 177 177 178 - ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY && 178 + ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY && 179 179 key_type(c, &sb->key) != UBIFS_DATA_KEY); 180 - ubifs_assert(sa->type != UBIFS_DATA_NODE && 180 + ubifs_assert(c, sa->type != UBIFS_DATA_NODE && 181 181 sb->type != UBIFS_DATA_NODE); 182 182 183 183 /* Inodes go before directory entries */ ··· 189 189 if (sb->type == UBIFS_INO_NODE) 190 190 return 1; 191 191 192 - ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY || 192 + ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY || 193 193 key_type(c, &sa->key) == UBIFS_XENT_KEY); 194 - ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY || 194 + ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY || 195 195 key_type(c, &sb->key) == UBIFS_XENT_KEY); 196 - ubifs_assert(sa->type == UBIFS_DENT_NODE || 196 + ubifs_assert(c, sa->type == UBIFS_DENT_NODE || 197 197 sa->type == UBIFS_XENT_NODE); 198 - ubifs_assert(sb->type == UBIFS_DENT_NODE || 198 + ubifs_assert(c, sb->type == UBIFS_DENT_NODE || 199 199 sb->type == UBIFS_XENT_NODE); 200 200 201 201 inuma = key_inum(c, &sa->key); ··· 250 250 251 251 /* Separate data nodes and non-data nodes */ 252 252 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { 253 - ubifs_assert(snod->type == UBIFS_INO_NODE || 253 + ubifs_assert(c, snod->type == UBIFS_INO_NODE || 254 254 snod->type == UBIFS_DATA_NODE || 255 255 snod->type == UBIFS_DENT_NODE || 256 256 snod->type == UBIFS_XENT_NODE || ··· 266 266 continue; 267 267 } 268 268 269 - ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY || 269 + ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY || 270 270 key_type(c, &snod->key) == UBIFS_INO_KEY || 271 271 key_type(c, &snod->key) == UBIFS_DENT_KEY || 272 272 key_type(c, &snod->key) == UBIFS_XENT_KEY); ··· 469 469 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 470 470 int err = 0, lnum = lp->lnum; 471 471 472 - ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || 472 + ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || 473 473 c->need_recovery); 474 - ubifs_assert(c->gc_lnum != lnum); 475 - ubifs_assert(wbuf->lnum != lnum); 474 + ubifs_assert(c, c->gc_lnum != lnum); 475 + ubifs_assert(c, wbuf->lnum != lnum); 476 476 477 477 if (lp->free + lp->dirty == c->leb_size) { 478 478 /* Special case - a free LEB */ 479 479 dbg_gc("LEB %d is free, return it", lp->lnum); 480 - ubifs_assert(!(lp->flags & LPROPS_INDEX)); 480 + ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); 481 481 482 482 if (lp->free != c->leb_size) { 483 483 /* 484 484 * Write buffers must be sync'd before unmapping 485 485 * freeable LEBs, because one of them may contain data 486 - * which obsoletes something in 'lp->pnum'. 486 + * which obsoletes something in 'lp->lnum'. 487 487 */ 488 488 err = gc_sync_wbufs(c); 489 489 if (err) ··· 513 513 if (IS_ERR(sleb)) 514 514 return PTR_ERR(sleb); 515 515 516 - ubifs_assert(!list_empty(&sleb->nodes)); 516 + ubifs_assert(c, !list_empty(&sleb->nodes)); 517 517 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list); 518 518 519 519 if (snod->type == UBIFS_IDX_NODE) { ··· 525 525 struct ubifs_idx_node *idx = snod->node; 526 526 int level = le16_to_cpu(idx->level); 527 527 528 - ubifs_assert(snod->type == UBIFS_IDX_NODE); 528 + ubifs_assert(c, snod->type == UBIFS_IDX_NODE); 529 529 key_read(c, ubifs_idx_key(c, idx), &snod->key); 530 530 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum, 531 531 snod->offs); ··· 648 648 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 649 649 650 650 ubifs_assert_cmt_locked(c); 651 - ubifs_assert(!c->ro_media && !c->ro_mount); 651 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 652 652 653 653 if (ubifs_gc_should_commit(c)) 654 654 return -EAGAIN; ··· 661 661 } 662 662 663 663 /* We expect the write-buffer to be empty on entry */ 664 - ubifs_assert(!wbuf->used); 664 + ubifs_assert(c, !wbuf->used); 665 665 666 666 for (i = 0; ; i++) { 667 667 int space_before, space_after; ··· 752 752 continue; 753 753 } 754 754 755 - ubifs_assert(ret == LEB_RETAINED); 755 + ubifs_assert(c, ret == LEB_RETAINED); 756 756 space_after = c->leb_size - wbuf->offs - wbuf->used; 757 757 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum, 758 758 space_after - space_before); ··· 812 812 return ret; 813 813 814 814 out: 815 - ubifs_assert(ret < 0); 816 - ubifs_assert(ret != -ENOSPC && ret != -EAGAIN); 815 + ubifs_assert(c, ret < 0); 816 + ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN); 817 817 ubifs_wbuf_sync_nolock(wbuf); 818 818 ubifs_ro_mode(c, ret); 819 819 mutex_unlock(&wbuf->io_mutex); ··· 848 848 lp = ubifs_fast_find_freeable(c); 849 849 if (!lp) 850 850 break; 851 - ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 852 - ubifs_assert(!(lp->flags & LPROPS_INDEX)); 851 + ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); 852 + ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); 853 853 err = ubifs_leb_unmap(c, lp->lnum); 854 854 if (err) 855 855 goto out; ··· 858 858 err = PTR_ERR(lp); 859 859 goto out; 860 860 } 861 - ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 862 - ubifs_assert(!(lp->flags & LPROPS_INDEX)); 861 + ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); 862 + ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); 863 863 } 864 864 865 865 /* Mark GC'd index LEBs OK to unmap after this commit finishes */ ··· 880 880 err = -ENOMEM; 881 881 goto out; 882 882 } 883 - ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 884 - ubifs_assert(lp->flags & LPROPS_INDEX); 883 + ubifs_assert(c, !(lp->flags & LPROPS_TAKEN)); 884 + ubifs_assert(c, lp->flags & LPROPS_INDEX); 885 885 /* Don't release the LEB until after the next commit */ 886 886 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX; 887 887 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); ··· 890 890 kfree(idx_gc); 891 891 goto out; 892 892 } 893 - ubifs_assert(lp->flags & LPROPS_TAKEN); 894 - ubifs_assert(!(lp->flags & LPROPS_INDEX)); 893 + ubifs_assert(c, lp->flags & LPROPS_TAKEN); 894 + ubifs_assert(c, !(lp->flags & LPROPS_INDEX)); 895 895 idx_gc->lnum = lp->lnum; 896 896 idx_gc->unmap = 1; 897 897 list_add(&idx_gc->list, &c->idx_gc);
+49 -48
fs/ubifs/io.c
··· 119 119 { 120 120 int err; 121 121 122 - ubifs_assert(!c->ro_media && !c->ro_mount); 122 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 123 123 if (c->ro_error) 124 124 return -EROFS; 125 125 if (!dbg_is_tst_rcvry(c)) ··· 139 139 { 140 140 int err; 141 141 142 - ubifs_assert(!c->ro_media && !c->ro_mount); 142 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 143 143 if (c->ro_error) 144 144 return -EROFS; 145 145 if (!dbg_is_tst_rcvry(c)) ··· 159 159 { 160 160 int err; 161 161 162 - ubifs_assert(!c->ro_media && !c->ro_mount); 162 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 163 163 if (c->ro_error) 164 164 return -EROFS; 165 165 if (!dbg_is_tst_rcvry(c)) ··· 178 178 { 179 179 int err; 180 180 181 - ubifs_assert(!c->ro_media && !c->ro_mount); 181 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 182 182 if (c->ro_error) 183 183 return -EROFS; 184 184 if (!dbg_is_tst_rcvry(c)) ··· 241 241 uint32_t crc, node_crc, magic; 242 242 const struct ubifs_ch *ch = buf; 243 243 244 - ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 245 - ubifs_assert(!(offs & 7) && offs < c->leb_size); 244 + ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 245 + ubifs_assert(c, !(offs & 7) && offs < c->leb_size); 246 246 247 247 magic = le32_to_cpu(ch->magic); 248 248 if (magic != UBIFS_NODE_MAGIC) { ··· 319 319 { 320 320 uint32_t crc; 321 321 322 - ubifs_assert(pad >= 0 && !(pad & 7)); 322 + ubifs_assert(c, pad >= 0 && !(pad & 7)); 323 323 324 324 if (pad >= UBIFS_PAD_NODE_SZ) { 325 325 struct ubifs_ch *ch = buf; ··· 382 382 struct ubifs_ch *ch = node; 383 383 unsigned long long sqnum = next_sqnum(c); 384 384 385 - ubifs_assert(len >= UBIFS_CH_SZ); 385 + ubifs_assert(c, len >= UBIFS_CH_SZ); 386 386 387 387 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); 388 388 ch->len = cpu_to_le32(len); ··· 415 415 struct ubifs_ch *ch = node; 416 416 unsigned long long sqnum = next_sqnum(c); 417 417 418 - ubifs_assert(len >= UBIFS_CH_SZ); 418 + ubifs_assert(c, len >= UBIFS_CH_SZ); 419 419 420 420 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); 421 421 ch->len = cpu_to_le32(len); ··· 448 448 449 449 /** 450 450 * new_wbuf_timer - start new write-buffer timer. 451 + * @c: UBIFS file-system description object 451 452 * @wbuf: write-buffer descriptor 452 453 */ 453 - static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) 454 + static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf) 454 455 { 455 456 ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10); 456 457 unsigned long long delta = dirty_writeback_interval; ··· 459 458 /* centi to milli, milli to nano, then 10% */ 460 459 delta *= 10ULL * NSEC_PER_MSEC / 10ULL; 461 460 462 - ubifs_assert(!hrtimer_active(&wbuf->timer)); 463 - ubifs_assert(delta <= ULONG_MAX); 461 + ubifs_assert(c, !hrtimer_active(&wbuf->timer)); 462 + ubifs_assert(c, delta <= ULONG_MAX); 464 463 465 464 if (wbuf->no_timer) 466 465 return; ··· 509 508 510 509 dbg_io("LEB %d:%d, %d bytes, jhead %s", 511 510 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); 512 - ubifs_assert(!(wbuf->avail & 7)); 513 - ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size); 514 - ubifs_assert(wbuf->size >= c->min_io_size); 515 - ubifs_assert(wbuf->size <= c->max_write_size); 516 - ubifs_assert(wbuf->size % c->min_io_size == 0); 517 - ubifs_assert(!c->ro_media && !c->ro_mount); 511 + ubifs_assert(c, !(wbuf->avail & 7)); 512 + ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size); 513 + ubifs_assert(c, wbuf->size >= c->min_io_size); 514 + ubifs_assert(c, wbuf->size <= c->max_write_size); 515 + ubifs_assert(c, wbuf->size % c->min_io_size == 0); 516 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 518 517 if (c->leb_size - wbuf->offs >= c->max_write_size) 519 - ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); 518 + ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); 520 519 521 520 if (c->ro_error) 522 521 return -EROFS; ··· 577 576 const struct ubifs_info *c = wbuf->c; 578 577 579 578 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); 580 - ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); 581 - ubifs_assert(offs >= 0 && offs <= c->leb_size); 582 - ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 583 - ubifs_assert(lnum != wbuf->lnum); 584 - ubifs_assert(wbuf->used == 0); 579 + ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt); 580 + ubifs_assert(c, offs >= 0 && offs <= c->leb_size); 581 + ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7)); 582 + ubifs_assert(c, lnum != wbuf->lnum); 583 + ubifs_assert(c, wbuf->used == 0); 585 584 586 585 spin_lock(&wbuf->lock); 587 586 wbuf->lnum = lnum; ··· 611 610 { 612 611 int err, i; 613 612 614 - ubifs_assert(!c->ro_media && !c->ro_mount); 613 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 615 614 if (!c->need_wbuf_sync) 616 615 return 0; 617 616 c->need_wbuf_sync = 0; ··· 687 686 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, 688 687 dbg_ntype(((struct ubifs_ch *)buf)->node_type), 689 688 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); 690 - ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 691 - ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 692 - ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 693 - ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size); 694 - ubifs_assert(wbuf->size >= c->min_io_size); 695 - ubifs_assert(wbuf->size <= c->max_write_size); 696 - ubifs_assert(wbuf->size % c->min_io_size == 0); 697 - ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 698 - ubifs_assert(!c->ro_media && !c->ro_mount); 699 - ubifs_assert(!c->space_fixup); 689 + ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 690 + ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 691 + ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 692 + ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size); 693 + ubifs_assert(c, wbuf->size >= c->min_io_size); 694 + ubifs_assert(c, wbuf->size <= c->max_write_size); 695 + ubifs_assert(c, wbuf->size % c->min_io_size == 0); 696 + ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex)); 697 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 698 + ubifs_assert(c, !c->space_fixup); 700 699 if (c->leb_size - wbuf->offs >= c->max_write_size) 701 - ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); 700 + ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size)); 702 701 703 702 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { 704 703 err = -ENOSPC; ··· 835 834 } 836 835 837 836 if (wbuf->used) 838 - new_wbuf_timer_nolock(wbuf); 837 + new_wbuf_timer_nolock(c, wbuf); 839 838 840 839 return 0; 841 840 ··· 870 869 dbg_io("LEB %d:%d, %s, length %d (aligned %d)", 871 870 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, 872 871 buf_len); 873 - ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 874 - ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); 875 - ubifs_assert(!c->ro_media && !c->ro_mount); 876 - ubifs_assert(!c->space_fixup); 872 + ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 873 + ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size); 874 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 875 + ubifs_assert(c, !c->space_fixup); 877 876 878 877 if (c->ro_error) 879 878 return -EROFS; ··· 910 909 911 910 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, 912 911 dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); 913 - ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 914 - ubifs_assert(!(offs & 7) && offs < c->leb_size); 915 - ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 912 + ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 913 + ubifs_assert(c, !(offs & 7) && offs < c->leb_size); 914 + ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); 916 915 917 916 spin_lock(&wbuf->lock); 918 917 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); ··· 985 984 struct ubifs_ch *ch = buf; 986 985 987 986 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); 988 - ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 989 - ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); 990 - ubifs_assert(!(offs & 7) && offs < c->leb_size); 991 - ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 987 + ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 988 + ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size); 989 + ubifs_assert(c, !(offs & 7) && offs < c->leb_size); 990 + ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT); 992 991 993 992 err = ubifs_leb_read(c, lnum, buf, offs, len, 0); 994 993 if (err && err != -EBADMSG)
+43 -29
fs/ubifs/journal.c
··· 111 111 * better to try to allocate space at the ends of eraseblocks. This is 112 112 * what the squeeze parameter does. 113 113 */ 114 - ubifs_assert(!c->ro_media && !c->ro_mount); 114 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 115 115 squeeze = (jhead == BASEHD); 116 116 again: 117 117 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); ··· 215 215 216 216 out_return: 217 217 /* An error occurred and the LEB has to be returned to lprops */ 218 - ubifs_assert(err < 0); 218 + ubifs_assert(c, err < 0); 219 219 err1 = ubifs_return_leb(c, lnum); 220 220 if (err1 && err == -EAGAIN) 221 221 /* ··· 246 246 { 247 247 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 248 248 249 - ubifs_assert(jhead != GCHD); 249 + ubifs_assert(c, jhead != GCHD); 250 250 251 251 *lnum = c->jheads[jhead].wbuf.lnum; 252 252 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; ··· 278 278 int err; 279 279 struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf; 280 280 281 - ubifs_assert(jhead != GCHD); 281 + ubifs_assert(c, jhead != GCHD); 282 282 283 283 *lnum = c->jheads[jhead].wbuf.lnum; 284 284 *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; ··· 317 317 down_read(&c->commit_sem); 318 318 err = reserve_space(c, jhead, len); 319 319 if (!err) 320 + /* c->commit_sem will get released via finish_reservation(). */ 320 321 return 0; 321 322 up_read(&c->commit_sem); 322 323 ··· 549 548 struct ubifs_ino_node *ino; 550 549 union ubifs_key dent_key, ino_key; 551 550 552 - ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); 551 + ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 553 552 554 553 dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1; 555 554 ilen = UBIFS_INO_NODE_SZ; ··· 665 664 spin_lock(&ui->ui_lock); 666 665 ui->synced_i_size = ui->ui_size; 667 666 spin_unlock(&ui->ui_lock); 667 + if (xent) { 668 + spin_lock(&host_ui->ui_lock); 669 + host_ui->synced_i_size = host_ui->ui_size; 670 + spin_unlock(&host_ui->ui_lock); 671 + } 668 672 mark_inode_clean(c, ui); 669 673 mark_inode_clean(c, host_ui); 670 674 return 0; ··· 713 707 714 708 dbg_jnlk(key, "ino %lu, blk %u, len %d, key ", 715 709 (unsigned long)key_inum(c, key), key_block(c, key), len); 716 - ubifs_assert(len <= UBIFS_BLOCK_SIZE); 710 + ubifs_assert(c, len <= UBIFS_BLOCK_SIZE); 717 711 718 712 if (encrypted) 719 713 dlen += UBIFS_CIPHER_BLOCK_SIZE; ··· 744 738 745 739 out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ; 746 740 ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type); 747 - ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE); 741 + ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); 748 742 749 743 if (encrypted) { 750 744 err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key)); ··· 904 898 int err; 905 899 struct ubifs_inode *ui = ubifs_inode(inode); 906 900 907 - ubifs_assert(inode->i_nlink == 0); 901 + ubifs_assert(c, inode->i_nlink == 0); 908 902 909 903 if (ui->del_cmtno != c->cmt_no) 910 904 /* A commit happened for sure */ ··· 959 953 int twoparents = (fst_dir != snd_dir); 960 954 void *p; 961 955 962 - ubifs_assert(ubifs_inode(fst_dir)->data_len == 0); 963 - ubifs_assert(ubifs_inode(snd_dir)->data_len == 0); 964 - ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); 965 - ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); 956 + ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0); 957 + ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0); 958 + ubifs_assert(c, mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex)); 959 + ubifs_assert(c, mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex)); 966 960 967 961 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1; 968 962 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1; ··· 1102 1096 int move = (old_dir != new_dir); 1103 1097 struct ubifs_inode *uninitialized_var(new_ui); 1104 1098 1105 - ubifs_assert(ubifs_inode(old_dir)->data_len == 0); 1106 - ubifs_assert(ubifs_inode(new_dir)->data_len == 0); 1107 - ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); 1108 - ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); 1099 + ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0); 1100 + ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0); 1101 + ubifs_assert(c, mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex)); 1102 + ubifs_assert(c, mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex)); 1109 1103 1110 1104 dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1; 1111 1105 dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1; 1112 1106 if (new_inode) { 1113 1107 new_ui = ubifs_inode(new_inode); 1114 - ubifs_assert(mutex_is_locked(&new_ui->ui_mutex)); 1108 + ubifs_assert(c, mutex_is_locked(&new_ui->ui_mutex)); 1115 1109 ilen = UBIFS_INO_NODE_SZ; 1116 1110 if (!last_reference) 1117 1111 ilen += new_ui->data_len; ··· 1288 1282 int *new_len) 1289 1283 { 1290 1284 void *buf; 1291 - int err, compr_type; 1292 - u32 dlen, out_len, old_dlen; 1285 + int err, dlen, compr_type, out_len, old_dlen; 1293 1286 1294 1287 out_len = le32_to_cpu(dn->size); 1295 1288 buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); ··· 1324 1319 dn->compr_size = 0; 1325 1320 } 1326 1321 1327 - ubifs_assert(out_len <= UBIFS_BLOCK_SIZE); 1322 + ubifs_assert(c, out_len <= UBIFS_BLOCK_SIZE); 1328 1323 dn->compr_type = cpu_to_le16(compr_type); 1329 1324 dn->size = cpu_to_le32(*new_len); 1330 1325 *new_len = UBIFS_DATA_NODE_SZ + out_len; ··· 1363 1358 1364 1359 dbg_jnl("ino %lu, size %lld -> %lld", 1365 1360 (unsigned long)inum, old_size, new_size); 1366 - ubifs_assert(!ui->data_len); 1367 - ubifs_assert(S_ISREG(inode->i_mode)); 1368 - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); 1361 + ubifs_assert(c, !ui->data_len); 1362 + ubifs_assert(c, S_ISREG(inode->i_mode)); 1363 + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 1369 1364 1370 1365 sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ + 1371 1366 UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR; ··· 1393 1388 else if (err) 1394 1389 goto out_free; 1395 1390 else { 1396 - if (le32_to_cpu(dn->size) <= dlen) 1391 + int dn_len = le32_to_cpu(dn->size); 1392 + 1393 + if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { 1394 + ubifs_err(c, "bad data node (block %u, inode %lu)", 1395 + blk, inode->i_ino); 1396 + ubifs_dump_node(c, dn); 1397 + goto out_free; 1398 + } 1399 + 1400 + if (dn_len <= dlen) 1397 1401 dlen = 0; /* Nothing to do */ 1398 1402 else { 1399 1403 err = truncate_data_node(c, inode, blk, dn, &dlen); ··· 1502 1488 int sync = IS_DIRSYNC(host); 1503 1489 struct ubifs_inode *host_ui = ubifs_inode(host); 1504 1490 1505 - ubifs_assert(inode->i_nlink == 0); 1506 - ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); 1491 + ubifs_assert(c, inode->i_nlink == 0); 1492 + ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1507 1493 1508 1494 /* 1509 1495 * Since we are deleting the inode, we do not bother to attach any data ··· 1612 1598 int sync = IS_DIRSYNC(host); 1613 1599 1614 1600 dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino); 1615 - ubifs_assert(host->i_nlink > 0); 1616 - ubifs_assert(inode->i_nlink > 0); 1617 - ubifs_assert(mutex_is_locked(&host_ui->ui_mutex)); 1601 + ubifs_assert(c, host->i_nlink > 0); 1602 + ubifs_assert(c, inode->i_nlink > 0); 1603 + ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex)); 1618 1604 1619 1605 len1 = UBIFS_INO_NODE_SZ + host_ui->data_len; 1620 1606 len2 = UBIFS_INO_NODE_SZ + ubifs_inode(inode)->data_len;
+7 -7
fs/ubifs/key.h
··· 161 161 { 162 162 uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); 163 163 164 - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); 165 - ubifs_assert(!nm->hash && !nm->minor_hash); 164 + ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); 165 + ubifs_assert(c, !nm->hash && !nm->minor_hash); 166 166 key->u32[0] = inum; 167 167 key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS); 168 168 } ··· 179 179 union ubifs_key *key, ino_t inum, 180 180 uint32_t hash) 181 181 { 182 - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); 182 + ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); 183 183 key->u32[0] = inum; 184 184 key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS); 185 185 } ··· 198 198 union ubifs_key *key = k; 199 199 uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); 200 200 201 - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); 201 + ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); 202 202 key->j32[0] = cpu_to_le32(inum); 203 203 key->j32[1] = cpu_to_le32(hash | 204 204 (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS)); ··· 231 231 { 232 232 uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); 233 233 234 - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); 234 + ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); 235 235 key->u32[0] = inum; 236 236 key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS); 237 237 } ··· 249 249 union ubifs_key *key = k; 250 250 uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm)); 251 251 252 - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); 252 + ubifs_assert(c, !(hash & ~UBIFS_S_KEY_HASH_MASK)); 253 253 key->j32[0] = cpu_to_le32(inum); 254 254 key->j32[1] = cpu_to_le32(hash | 255 255 (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS)); ··· 280 280 union ubifs_key *key, ino_t inum, 281 281 unsigned int block) 282 282 { 283 - ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK)); 283 + ubifs_assert(c, !(block & ~UBIFS_S_KEY_BLOCK_MASK)); 284 284 key->u32[0] = inum; 285 285 key->u32[1] = block | (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS); 286 286 }
+6 -6
fs/ubifs/log.c
··· 132 132 while (*p) { 133 133 parent = *p; 134 134 b = rb_entry(parent, struct ubifs_bud, rb); 135 - ubifs_assert(bud->lnum != b->lnum); 135 + ubifs_assert(c, bud->lnum != b->lnum); 136 136 if (bud->lnum < b->lnum) 137 137 p = &(*p)->rb_left; 138 138 else ··· 145 145 jhead = &c->jheads[bud->jhead]; 146 146 list_add_tail(&bud->list, &jhead->buds_list); 147 147 } else 148 - ubifs_assert(c->replaying && c->ro_mount); 148 + ubifs_assert(c, c->replaying && c->ro_mount); 149 149 150 150 /* 151 151 * Note, although this is a new bud, we anyway account this space now, ··· 189 189 } 190 190 191 191 mutex_lock(&c->log_mutex); 192 - ubifs_assert(!c->ro_media && !c->ro_mount); 192 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 193 193 if (c->ro_error) { 194 194 err = -EROFS; 195 195 goto out_unlock; ··· 244 244 245 245 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { 246 246 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 247 - ubifs_assert(c->lhead_lnum != c->ltail_lnum); 247 + ubifs_assert(c, c->lhead_lnum != c->ltail_lnum); 248 248 c->lhead_offs = 0; 249 249 } 250 250 ··· 301 301 { 302 302 struct rb_node *p; 303 303 304 - ubifs_assert(list_empty(&c->old_buds)); 304 + ubifs_assert(c, list_empty(&c->old_buds)); 305 305 c->cmt_bud_bytes = 0; 306 306 spin_lock(&c->buds_lock); 307 307 p = rb_first(&c->buds); ··· 409 409 /* Switch to the next log LEB */ 410 410 if (c->lhead_offs) { 411 411 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 412 - ubifs_assert(c->lhead_lnum != c->ltail_lnum); 412 + ubifs_assert(c, c->lhead_lnum != c->ltail_lnum); 413 413 c->lhead_offs = 0; 414 414 } 415 415
+47 -47
fs/ubifs/lprops.c
··· 187 187 /* Compare to some other LEB on the bottom of heap */ 188 188 /* Pick a position kind of randomly */ 189 189 cpos = (((size_t)lprops >> 4) & b) + b; 190 - ubifs_assert(cpos >= b); 191 - ubifs_assert(cpos < LPT_HEAP_SZ); 192 - ubifs_assert(cpos < heap->cnt); 190 + ubifs_assert(c, cpos >= b); 191 + ubifs_assert(c, cpos < LPT_HEAP_SZ); 192 + ubifs_assert(c, cpos < heap->cnt); 193 193 194 194 val1 = get_heap_comp_val(lprops, cat); 195 195 val2 = get_heap_comp_val(heap->arr[cpos], cat); ··· 230 230 int hpos = lprops->hpos; 231 231 232 232 heap = &c->lpt_heap[cat - 1]; 233 - ubifs_assert(hpos >= 0 && hpos < heap->cnt); 234 - ubifs_assert(heap->arr[hpos] == lprops); 233 + ubifs_assert(c, hpos >= 0 && hpos < heap->cnt); 234 + ubifs_assert(c, heap->arr[hpos] == lprops); 235 235 heap->cnt -= 1; 236 236 if (hpos < heap->cnt) { 237 237 heap->arr[hpos] = heap->arr[heap->cnt]; ··· 296 296 list_add(&lprops->list, &c->frdi_idx_list); 297 297 break; 298 298 default: 299 - ubifs_assert(0); 299 + ubifs_assert(c, 0); 300 300 } 301 301 302 302 lprops->flags &= ~LPROPS_CAT_MASK; 303 303 lprops->flags |= cat; 304 304 c->in_a_category_cnt += 1; 305 - ubifs_assert(c->in_a_category_cnt <= c->main_lebs); 305 + ubifs_assert(c, c->in_a_category_cnt <= c->main_lebs); 306 306 } 307 307 308 308 /** ··· 324 324 break; 325 325 case LPROPS_FREEABLE: 326 326 c->freeable_cnt -= 1; 327 - ubifs_assert(c->freeable_cnt >= 0); 327 + ubifs_assert(c, c->freeable_cnt >= 0); 328 328 /* Fall through */ 329 329 case LPROPS_UNCAT: 330 330 case LPROPS_EMPTY: 331 331 case LPROPS_FRDI_IDX: 332 - ubifs_assert(!list_empty(&lprops->list)); 332 + ubifs_assert(c, !list_empty(&lprops->list)); 333 333 list_del(&lprops->list); 334 334 break; 335 335 default: 336 - ubifs_assert(0); 336 + ubifs_assert(c, 0); 337 337 } 338 338 339 339 c->in_a_category_cnt -= 1; 340 - ubifs_assert(c->in_a_category_cnt >= 0); 340 + ubifs_assert(c, c->in_a_category_cnt >= 0); 341 341 } 342 342 343 343 /** ··· 369 369 list_replace(&old_lprops->list, &new_lprops->list); 370 370 break; 371 371 default: 372 - ubifs_assert(0); 372 + ubifs_assert(c, 0); 373 373 } 374 374 } 375 375 ··· 412 412 return LPROPS_UNCAT; 413 413 414 414 if (lprops->free == c->leb_size) { 415 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 415 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 416 416 return LPROPS_EMPTY; 417 417 } 418 418 ··· 478 478 */ 479 479 int ubifs_calc_dark(const struct ubifs_info *c, int spc) 480 480 { 481 - ubifs_assert(!(spc & 7)); 481 + ubifs_assert(c, !(spc & 7)); 482 482 483 483 if (spc < c->dark_wm) 484 484 return spc; ··· 543 543 dbg_lp("LEB %d, free %d, dirty %d, flags %d", 544 544 lprops->lnum, free, dirty, flags); 545 545 546 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 547 - ubifs_assert(c->lst.empty_lebs >= 0 && 546 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 547 + ubifs_assert(c, c->lst.empty_lebs >= 0 && 548 548 c->lst.empty_lebs <= c->main_lebs); 549 - ubifs_assert(c->freeable_cnt >= 0); 550 - ubifs_assert(c->freeable_cnt <= c->main_lebs); 551 - ubifs_assert(c->lst.taken_empty_lebs >= 0); 552 - ubifs_assert(c->lst.taken_empty_lebs <= c->lst.empty_lebs); 553 - ubifs_assert(!(c->lst.total_free & 7) && !(c->lst.total_dirty & 7)); 554 - ubifs_assert(!(c->lst.total_dead & 7) && !(c->lst.total_dark & 7)); 555 - ubifs_assert(!(c->lst.total_used & 7)); 556 - ubifs_assert(free == LPROPS_NC || free >= 0); 557 - ubifs_assert(dirty == LPROPS_NC || dirty >= 0); 549 + ubifs_assert(c, c->freeable_cnt >= 0); 550 + ubifs_assert(c, c->freeable_cnt <= c->main_lebs); 551 + ubifs_assert(c, c->lst.taken_empty_lebs >= 0); 552 + ubifs_assert(c, c->lst.taken_empty_lebs <= c->lst.empty_lebs); 553 + ubifs_assert(c, !(c->lst.total_free & 7) && !(c->lst.total_dirty & 7)); 554 + ubifs_assert(c, !(c->lst.total_dead & 7) && !(c->lst.total_dark & 7)); 555 + ubifs_assert(c, !(c->lst.total_used & 7)); 556 + ubifs_assert(c, free == LPROPS_NC || free >= 0); 557 + ubifs_assert(c, dirty == LPROPS_NC || dirty >= 0); 558 558 559 559 if (!is_lprops_dirty(c, lprops)) { 560 560 lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum); 561 561 if (IS_ERR(lprops)) 562 562 return lprops; 563 563 } else 564 - ubifs_assert(lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum)); 564 + ubifs_assert(c, lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum)); 565 565 566 - ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7)); 566 + ubifs_assert(c, !(lprops->free & 7) && !(lprops->dirty & 7)); 567 567 568 568 spin_lock(&c->space_lock); 569 569 if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size) ··· 768 768 struct ubifs_lprops *lprops; 769 769 struct ubifs_lpt_heap *heap; 770 770 771 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 771 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 772 772 773 773 heap = &c->lpt_heap[LPROPS_FREE - 1]; 774 774 if (heap->cnt == 0) 775 775 return NULL; 776 776 777 777 lprops = heap->arr[0]; 778 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 779 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 778 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 779 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 780 780 return lprops; 781 781 } 782 782 ··· 791 791 { 792 792 struct ubifs_lprops *lprops; 793 793 794 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 794 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 795 795 796 796 if (list_empty(&c->empty_list)) 797 797 return NULL; 798 798 799 799 lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list); 800 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 801 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 802 - ubifs_assert(lprops->free == c->leb_size); 800 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 801 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 802 + ubifs_assert(c, lprops->free == c->leb_size); 803 803 return lprops; 804 804 } 805 805 ··· 814 814 { 815 815 struct ubifs_lprops *lprops; 816 816 817 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 817 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 818 818 819 819 if (list_empty(&c->freeable_list)) 820 820 return NULL; 821 821 822 822 lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list); 823 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 824 - ubifs_assert(!(lprops->flags & LPROPS_INDEX)); 825 - ubifs_assert(lprops->free + lprops->dirty == c->leb_size); 826 - ubifs_assert(c->freeable_cnt > 0); 823 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 824 + ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); 825 + ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); 826 + ubifs_assert(c, c->freeable_cnt > 0); 827 827 return lprops; 828 828 } 829 829 ··· 838 838 { 839 839 struct ubifs_lprops *lprops; 840 840 841 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 841 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 842 842 843 843 if (list_empty(&c->frdi_idx_list)) 844 844 return NULL; 845 845 846 846 lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list); 847 - ubifs_assert(!(lprops->flags & LPROPS_TAKEN)); 848 - ubifs_assert((lprops->flags & LPROPS_INDEX)); 849 - ubifs_assert(lprops->free + lprops->dirty == c->leb_size); 847 + ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); 848 + ubifs_assert(c, (lprops->flags & LPROPS_INDEX)); 849 + ubifs_assert(c, lprops->free + lprops->dirty == c->leb_size); 850 850 return lprops; 851 851 } 852 852 ··· 1089 1089 } 1090 1090 } 1091 1091 1092 - buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1093 - if (!buf) 1094 - return -ENOMEM; 1095 - 1096 1092 /* 1097 1093 * After an unclean unmount, empty and freeable LEBs 1098 1094 * may contain garbage - do not scan them. ··· 1106 1110 lst->total_dark += ubifs_calc_dark(c, c->leb_size); 1107 1111 return LPT_SCAN_CONTINUE; 1108 1112 } 1113 + 1114 + buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); 1115 + if (!buf) 1116 + return -ENOMEM; 1109 1117 1110 1118 sleb = ubifs_scan(c, lnum, 0, buf, 0); 1111 1119 if (IS_ERR(sleb)) {
+57 -55
fs/ubifs/lpt.c
··· 225 225 226 226 /** 227 227 * pack_bits - pack bit fields end-to-end. 228 + * @c: UBIFS file-system description object 228 229 * @addr: address at which to pack (passed and next address returned) 229 230 * @pos: bit position at which to pack (passed and next position returned) 230 231 * @val: value to pack 231 232 * @nrbits: number of bits of value to pack (1-32) 232 233 */ 233 - static void pack_bits(uint8_t **addr, int *pos, uint32_t val, int nrbits) 234 + static void pack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, uint32_t val, int nrbits) 234 235 { 235 236 uint8_t *p = *addr; 236 237 int b = *pos; 237 238 238 - ubifs_assert(nrbits > 0); 239 - ubifs_assert(nrbits <= 32); 240 - ubifs_assert(*pos >= 0); 241 - ubifs_assert(*pos < 8); 242 - ubifs_assert((val >> nrbits) == 0 || nrbits == 32); 239 + ubifs_assert(c, nrbits > 0); 240 + ubifs_assert(c, nrbits <= 32); 241 + ubifs_assert(c, *pos >= 0); 242 + ubifs_assert(c, *pos < 8); 243 + ubifs_assert(c, (val >> nrbits) == 0 || nrbits == 32); 243 244 if (b) { 244 245 *p |= ((uint8_t)val) << b; 245 246 nrbits += b; ··· 275 274 276 275 /** 277 276 * ubifs_unpack_bits - unpack bit fields. 277 + * @c: UBIFS file-system description object 278 278 * @addr: address at which to unpack (passed and next address returned) 279 279 * @pos: bit position at which to unpack (passed and next position returned) 280 280 * @nrbits: number of bits of value to unpack (1-32) 281 281 * 282 282 * This functions returns the value unpacked. 283 283 */ 284 - uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits) 284 + uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits) 285 285 { 286 286 const int k = 32 - nrbits; 287 287 uint8_t *p = *addr; ··· 290 288 uint32_t uninitialized_var(val); 291 289 const int bytes = (nrbits + b + 7) >> 3; 292 290 293 - ubifs_assert(nrbits > 0); 294 - ubifs_assert(nrbits <= 32); 295 - ubifs_assert(*pos >= 0); 296 - ubifs_assert(*pos < 8); 291 + ubifs_assert(c, nrbits > 0); 292 + ubifs_assert(c, nrbits <= 32); 293 + ubifs_assert(c, *pos >= 0); 294 + ubifs_assert(c, *pos < 8); 297 295 if (b) { 298 296 switch (bytes) { 299 297 case 2: ··· 339 337 p += nrbits >> 3; 340 338 *addr = p; 341 339 *pos = b; 342 - ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32); 340 + ubifs_assert(c, (val >> nrbits) == 0 || nrbits - b == 32); 343 341 return val; 344 342 } 345 343 ··· 356 354 int i, pos = 0; 357 355 uint16_t crc; 358 356 359 - pack_bits(&addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS); 357 + pack_bits(c, &addr, &pos, UBIFS_LPT_PNODE, UBIFS_LPT_TYPE_BITS); 360 358 if (c->big_lpt) 361 - pack_bits(&addr, &pos, pnode->num, c->pcnt_bits); 359 + pack_bits(c, &addr, &pos, pnode->num, c->pcnt_bits); 362 360 for (i = 0; i < UBIFS_LPT_FANOUT; i++) { 363 - pack_bits(&addr, &pos, pnode->lprops[i].free >> 3, 361 + pack_bits(c, &addr, &pos, pnode->lprops[i].free >> 3, 364 362 c->space_bits); 365 - pack_bits(&addr, &pos, pnode->lprops[i].dirty >> 3, 363 + pack_bits(c, &addr, &pos, pnode->lprops[i].dirty >> 3, 366 364 c->space_bits); 367 365 if (pnode->lprops[i].flags & LPROPS_INDEX) 368 - pack_bits(&addr, &pos, 1, 1); 366 + pack_bits(c, &addr, &pos, 1, 1); 369 367 else 370 - pack_bits(&addr, &pos, 0, 1); 368 + pack_bits(c, &addr, &pos, 0, 1); 371 369 } 372 370 crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 373 371 c->pnode_sz - UBIFS_LPT_CRC_BYTES); 374 372 addr = buf; 375 373 pos = 0; 376 - pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); 374 + pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); 377 375 } 378 376 379 377 /** ··· 389 387 int i, pos = 0; 390 388 uint16_t crc; 391 389 392 - pack_bits(&addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS); 390 + pack_bits(c, &addr, &pos, UBIFS_LPT_NNODE, UBIFS_LPT_TYPE_BITS); 393 391 if (c->big_lpt) 394 - pack_bits(&addr, &pos, nnode->num, c->pcnt_bits); 392 + pack_bits(c, &addr, &pos, nnode->num, c->pcnt_bits); 395 393 for (i = 0; i < UBIFS_LPT_FANOUT; i++) { 396 394 int lnum = nnode->nbranch[i].lnum; 397 395 398 396 if (lnum == 0) 399 397 lnum = c->lpt_last + 1; 400 - pack_bits(&addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits); 401 - pack_bits(&addr, &pos, nnode->nbranch[i].offs, 398 + pack_bits(c, &addr, &pos, lnum - c->lpt_first, c->lpt_lnum_bits); 399 + pack_bits(c, &addr, &pos, nnode->nbranch[i].offs, 402 400 c->lpt_offs_bits); 403 401 } 404 402 crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 405 403 c->nnode_sz - UBIFS_LPT_CRC_BYTES); 406 404 addr = buf; 407 405 pos = 0; 408 - pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); 406 + pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); 409 407 } 410 408 411 409 /** ··· 421 419 int i, pos = 0; 422 420 uint16_t crc; 423 421 424 - pack_bits(&addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS); 422 + pack_bits(c, &addr, &pos, UBIFS_LPT_LTAB, UBIFS_LPT_TYPE_BITS); 425 423 for (i = 0; i < c->lpt_lebs; i++) { 426 - pack_bits(&addr, &pos, ltab[i].free, c->lpt_spc_bits); 427 - pack_bits(&addr, &pos, ltab[i].dirty, c->lpt_spc_bits); 424 + pack_bits(c, &addr, &pos, ltab[i].free, c->lpt_spc_bits); 425 + pack_bits(c, &addr, &pos, ltab[i].dirty, c->lpt_spc_bits); 428 426 } 429 427 crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 430 428 c->ltab_sz - UBIFS_LPT_CRC_BYTES); 431 429 addr = buf; 432 430 pos = 0; 433 - pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); 431 + pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); 434 432 } 435 433 436 434 /** ··· 445 443 int i, pos = 0; 446 444 uint16_t crc; 447 445 448 - pack_bits(&addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS); 446 + pack_bits(c, &addr, &pos, UBIFS_LPT_LSAVE, UBIFS_LPT_TYPE_BITS); 449 447 for (i = 0; i < c->lsave_cnt; i++) 450 - pack_bits(&addr, &pos, lsave[i], c->lnum_bits); 448 + pack_bits(c, &addr, &pos, lsave[i], c->lnum_bits); 451 449 crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 452 450 c->lsave_sz - UBIFS_LPT_CRC_BYTES); 453 451 addr = buf; 454 452 pos = 0; 455 - pack_bits(&addr, &pos, crc, UBIFS_LPT_CRC_BITS); 453 + pack_bits(c, &addr, &pos, crc, UBIFS_LPT_CRC_BITS); 456 454 } 457 455 458 456 /** ··· 467 465 return; 468 466 dbg_lp("LEB %d add %d to %d", 469 467 lnum, dirty, c->ltab[lnum - c->lpt_first].dirty); 470 - ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); 468 + ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); 471 469 c->ltab[lnum - c->lpt_first].dirty += dirty; 472 470 } 473 471 ··· 483 481 dbg_lp("LEB %d free %d dirty %d to %d %d", 484 482 lnum, c->ltab[lnum - c->lpt_first].free, 485 483 c->ltab[lnum - c->lpt_first].dirty, free, dirty); 486 - ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); 484 + ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); 487 485 c->ltab[lnum - c->lpt_first].free = free; 488 486 c->ltab[lnum - c->lpt_first].dirty = dirty; 489 487 } ··· 641 639 goto out; 642 640 } 643 641 644 - ubifs_assert(!c->ltab); 642 + ubifs_assert(c, !c->ltab); 645 643 c->ltab = ltab; /* Needed by set_ltab */ 646 644 647 645 /* Initialize LPT's own lprops */ ··· 920 918 uint8_t *addr = buf; 921 919 uint16_t crc, calc_crc; 922 920 923 - crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS); 921 + crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS); 924 922 calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 925 923 len - UBIFS_LPT_CRC_BYTES); 926 924 if (crc != calc_crc) { ··· 946 944 { 947 945 int node_type; 948 946 949 - node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS); 947 + node_type = ubifs_unpack_bits(c, addr, pos, UBIFS_LPT_TYPE_BITS); 950 948 if (node_type != type) { 951 949 ubifs_err(c, "invalid type (%d) in LPT node type %d", 952 950 node_type, type); ··· 974 972 if (err) 975 973 return err; 976 974 if (c->big_lpt) 977 - pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); 975 + pnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); 978 976 for (i = 0; i < UBIFS_LPT_FANOUT; i++) { 979 977 struct ubifs_lprops * const lprops = &pnode->lprops[i]; 980 978 981 - lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits); 979 + lprops->free = ubifs_unpack_bits(c, &addr, &pos, c->space_bits); 982 980 lprops->free <<= 3; 983 - lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits); 981 + lprops->dirty = ubifs_unpack_bits(c, &addr, &pos, c->space_bits); 984 982 lprops->dirty <<= 3; 985 983 986 - if (ubifs_unpack_bits(&addr, &pos, 1)) 984 + if (ubifs_unpack_bits(c, &addr, &pos, 1)) 987 985 lprops->flags = LPROPS_INDEX; 988 986 else 989 987 lprops->flags = 0; ··· 1011 1009 if (err) 1012 1010 return err; 1013 1011 if (c->big_lpt) 1014 - nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); 1012 + nnode->num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); 1015 1013 for (i = 0; i < UBIFS_LPT_FANOUT; i++) { 1016 1014 int lnum; 1017 1015 1018 - lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) + 1016 + lnum = ubifs_unpack_bits(c, &addr, &pos, c->lpt_lnum_bits) + 1019 1017 c->lpt_first; 1020 1018 if (lnum == c->lpt_last + 1) 1021 1019 lnum = 0; 1022 1020 nnode->nbranch[i].lnum = lnum; 1023 - nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos, 1021 + nnode->nbranch[i].offs = ubifs_unpack_bits(c, &addr, &pos, 1024 1022 c->lpt_offs_bits); 1025 1023 } 1026 1024 err = check_lpt_crc(c, buf, c->nnode_sz); ··· 1043 1041 if (err) 1044 1042 return err; 1045 1043 for (i = 0; i < c->lpt_lebs; i++) { 1046 - int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits); 1047 - int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits); 1044 + int free = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits); 1045 + int dirty = ubifs_unpack_bits(c, &addr, &pos, c->lpt_spc_bits); 1048 1046 1049 1047 if (free < 0 || free > c->leb_size || dirty < 0 || 1050 1048 dirty > c->leb_size || free + dirty > c->leb_size) ··· 1075 1073 if (err) 1076 1074 return err; 1077 1075 for (i = 0; i < c->lsave_cnt; i++) { 1078 - int lnum = ubifs_unpack_bits(&addr, &pos, c->lnum_bits); 1076 + int lnum = ubifs_unpack_bits(c, &addr, &pos, c->lnum_bits); 1079 1077 1080 1078 if (lnum < c->main_first || lnum >= c->leb_cnt) 1081 1079 return -EINVAL; ··· 1517 1515 branch->cnode->parent = n; 1518 1516 } 1519 1517 1520 - ubifs_assert(!test_bit(OBSOLETE_CNODE, &nnode->flags)); 1518 + ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &nnode->flags)); 1521 1519 __set_bit(OBSOLETE_CNODE, &nnode->flags); 1522 1520 1523 1521 c->dirty_nn_cnt += 1; ··· 1560 1558 __clear_bit(COW_CNODE, &p->flags); 1561 1559 replace_cats(c, pnode, p); 1562 1560 1563 - ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags)); 1561 + ubifs_assert(c, !test_bit(OBSOLETE_CNODE, &pnode->flags)); 1564 1562 __set_bit(OBSOLETE_CNODE, &pnode->flags); 1565 1563 1566 1564 c->dirty_pn_cnt += 1; ··· 1615 1613 dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum, 1616 1614 pnode->lprops[iip].free, pnode->lprops[iip].dirty, 1617 1615 pnode->lprops[iip].flags); 1618 - ubifs_assert(test_bit(DIRTY_CNODE, &pnode->flags)); 1616 + ubifs_assert(c, test_bit(DIRTY_CNODE, &pnode->flags)); 1619 1617 return &pnode->lprops[iip]; 1620 1618 } 1621 1619 ··· 1891 1889 lprops->flags = ubifs_categorize_lprops(c, lprops); 1892 1890 } 1893 1891 } else { 1894 - ubifs_assert(branch->lnum >= c->lpt_first && 1892 + ubifs_assert(c, branch->lnum >= c->lpt_first && 1895 1893 branch->lnum <= c->lpt_last); 1896 - ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size); 1894 + ubifs_assert(c, branch->offs >= 0 && branch->offs < c->leb_size); 1897 1895 err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, 1898 1896 c->pnode_sz, 1); 1899 1897 if (err) ··· 1937 1935 start_lnum = c->main_first; 1938 1936 } 1939 1937 1940 - ubifs_assert(start_lnum >= c->main_first && start_lnum < c->leb_cnt); 1941 - ubifs_assert(end_lnum >= c->main_first && end_lnum < c->leb_cnt); 1938 + ubifs_assert(c, start_lnum >= c->main_first && start_lnum < c->leb_cnt); 1939 + ubifs_assert(c, end_lnum >= c->main_first && end_lnum < c->leb_cnt); 1942 1940 1943 1941 if (!c->nroot) { 1944 1942 err = ubifs_read_nnode(c, NULL, 0); ··· 2057 2055 iip = pnode->iip; 2058 2056 while (1) { 2059 2057 h -= 1; 2060 - ubifs_assert(h >= 0); 2058 + ubifs_assert(c, h >= 0); 2061 2059 nnode = path[h].ptr.nnode; 2062 2060 if (iip + 1 < UBIFS_LPT_FANOUT) 2063 2061 break; ··· 2236 2234 return 0; 2237 2235 2238 2236 while (cnode) { 2239 - ubifs_assert(row >= 0); 2237 + ubifs_assert(c, row >= 0); 2240 2238 nnode = cnode->parent; 2241 2239 if (cnode->level) { 2242 2240 /* cnode is a nnode */
+26 -24
fs/ubifs/lpt_commit.c
··· 34 34 35 35 /** 36 36 * first_dirty_cnode - find first dirty cnode. 37 + * @c: UBIFS file-system description object 37 38 * @nnode: nnode at which to start 38 39 * 39 40 * This function returns the first dirty cnode or %NULL if there is not one. 40 41 */ 41 - static struct ubifs_cnode *first_dirty_cnode(struct ubifs_nnode *nnode) 42 + static struct ubifs_cnode *first_dirty_cnode(const struct ubifs_info *c, struct ubifs_nnode *nnode) 42 43 { 43 - ubifs_assert(nnode); 44 + ubifs_assert(c, nnode); 44 45 while (1) { 45 46 int i, cont = 0; 46 47 ··· 65 64 66 65 /** 67 66 * next_dirty_cnode - find next dirty cnode. 67 + * @c: UBIFS file-system description object 68 68 * @cnode: cnode from which to begin searching 69 69 * 70 70 * This function returns the next dirty cnode or %NULL if there is not one. 71 71 */ 72 - static struct ubifs_cnode *next_dirty_cnode(struct ubifs_cnode *cnode) 72 + static struct ubifs_cnode *next_dirty_cnode(const struct ubifs_info *c, struct ubifs_cnode *cnode) 73 73 { 74 74 struct ubifs_nnode *nnode; 75 75 int i; 76 76 77 - ubifs_assert(cnode); 77 + ubifs_assert(c, cnode); 78 78 nnode = cnode->parent; 79 79 if (!nnode) 80 80 return NULL; ··· 85 83 if (cnode->level == 0) 86 84 return cnode; /* cnode is a pnode */ 87 85 /* cnode is a nnode */ 88 - return first_dirty_cnode((struct ubifs_nnode *)cnode); 86 + return first_dirty_cnode(c, (struct ubifs_nnode *)cnode); 89 87 } 90 88 } 91 89 return (struct ubifs_cnode *)nnode; ··· 108 106 if (!test_bit(DIRTY_CNODE, &c->nroot->flags)) 109 107 return 0; 110 108 111 - c->lpt_cnext = first_dirty_cnode(c->nroot); 109 + c->lpt_cnext = first_dirty_cnode(c, c->nroot); 112 110 cnode = c->lpt_cnext; 113 111 if (!cnode) 114 112 return 0; 115 113 cnt += 1; 116 114 while (1) { 117 - ubifs_assert(!test_bit(COW_CNODE, &cnode->flags)); 115 + ubifs_assert(c, !test_bit(COW_CNODE, &cnode->flags)); 118 116 __set_bit(COW_CNODE, &cnode->flags); 119 - cnext = next_dirty_cnode(cnode); 117 + cnext = next_dirty_cnode(c, cnode); 120 118 if (!cnext) { 121 119 cnode->cnext = c->lpt_cnext; 122 120 break; ··· 127 125 } 128 126 dbg_cmt("committing %d cnodes", cnt); 129 127 dbg_lp("committing %d cnodes", cnt); 130 - ubifs_assert(cnt == c->dirty_nn_cnt + c->dirty_pn_cnt); 128 + ubifs_assert(c, cnt == c->dirty_nn_cnt + c->dirty_pn_cnt); 131 129 return cnt; 132 130 } 133 131 ··· 143 141 dbg_lp("LEB %d free %d dirty %d to %d +%d", 144 142 lnum, c->ltab[lnum - c->lpt_first].free, 145 143 c->ltab[lnum - c->lpt_first].dirty, free, dirty); 146 - ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last); 144 + ubifs_assert(c, lnum >= c->lpt_first && lnum <= c->lpt_last); 147 145 c->ltab[lnum - c->lpt_first].free = free; 148 146 c->ltab[lnum - c->lpt_first].dirty += dirty; 149 147 } ··· 239 237 if (err) 240 238 goto no_space; 241 239 offs = 0; 242 - ubifs_assert(lnum >= c->lpt_first && 240 + ubifs_assert(c, lnum >= c->lpt_first && 243 241 lnum <= c->lpt_last); 244 242 /* Try to place lsave and ltab nicely */ 245 243 if (!done_lsave) { ··· 282 280 if (err) 283 281 goto no_space; 284 282 offs = 0; 285 - ubifs_assert(lnum >= c->lpt_first && 283 + ubifs_assert(c, lnum >= c->lpt_first && 286 284 lnum <= c->lpt_last); 287 285 } 288 286 done_lsave = 1; ··· 302 300 if (err) 303 301 goto no_space; 304 302 offs = 0; 305 - ubifs_assert(lnum >= c->lpt_first && 303 + ubifs_assert(c, lnum >= c->lpt_first && 306 304 lnum <= c->lpt_last); 307 305 } 308 306 c->ltab_lnum = lnum; ··· 425 423 if (err) 426 424 goto no_space; 427 425 offs = from = 0; 428 - ubifs_assert(lnum >= c->lpt_first && 426 + ubifs_assert(c, lnum >= c->lpt_first && 429 427 lnum <= c->lpt_last); 430 428 err = ubifs_leb_unmap(c, lnum); 431 429 if (err) ··· 482 480 if (err) 483 481 goto no_space; 484 482 offs = from = 0; 485 - ubifs_assert(lnum >= c->lpt_first && 483 + ubifs_assert(c, lnum >= c->lpt_first && 486 484 lnum <= c->lpt_last); 487 485 err = ubifs_leb_unmap(c, lnum); 488 486 if (err) ··· 508 506 if (err) 509 507 goto no_space; 510 508 offs = from = 0; 511 - ubifs_assert(lnum >= c->lpt_first && 509 + ubifs_assert(c, lnum >= c->lpt_first && 512 510 lnum <= c->lpt_last); 513 511 err = ubifs_leb_unmap(c, lnum); 514 512 if (err) ··· 808 806 struct ubifs_lpt_heap *heap; 809 807 int i, cnt = 0; 810 808 811 - ubifs_assert(c->big_lpt); 809 + ubifs_assert(c, c->big_lpt); 812 810 if (!(c->lpt_drty_flgs & LSAVE_DIRTY)) { 813 811 c->lpt_drty_flgs |= LSAVE_DIRTY; 814 812 ubifs_add_lpt_dirt(c, c->lsave_lnum, c->lsave_sz); ··· 1097 1095 uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; 1098 1096 int pos = 0, node_type; 1099 1097 1100 - node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS); 1101 - *node_num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits); 1098 + node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS); 1099 + *node_num = ubifs_unpack_bits(c, &addr, &pos, c->pcnt_bits); 1102 1100 return node_type; 1103 1101 } 1104 1102 ··· 1118 1116 1119 1117 if (len < UBIFS_LPT_CRC_BYTES + (UBIFS_LPT_TYPE_BITS + 7) / 8) 1120 1118 return 0; 1121 - node_type = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_TYPE_BITS); 1119 + node_type = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_TYPE_BITS); 1122 1120 if (node_type == UBIFS_LPT_NOT_A_NODE) 1123 1121 return 0; 1124 1122 node_len = get_lpt_node_len(c, node_type); ··· 1126 1124 return 0; 1127 1125 pos = 0; 1128 1126 addr = buf; 1129 - crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS); 1127 + crc = ubifs_unpack_bits(c, &addr, &pos, UBIFS_LPT_CRC_BITS); 1130 1128 calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES, 1131 1129 node_len - UBIFS_LPT_CRC_BYTES); 1132 1130 if (crc != calc_crc) ··· 1172 1170 node_type = get_lpt_node_type(c, buf, &node_num); 1173 1171 node_len = get_lpt_node_len(c, node_type); 1174 1172 offs = c->leb_size - len; 1175 - ubifs_assert(node_len != 0); 1173 + ubifs_assert(c, node_len != 0); 1176 1174 mutex_lock(&c->lp_mutex); 1177 1175 err = make_node_dirty(c, node_type, node_num, lnum, offs); 1178 1176 mutex_unlock(&c->lp_mutex); ··· 1197 1195 1198 1196 mutex_lock(&c->lp_mutex); 1199 1197 for (i = 0; i < c->lpt_lebs; i++) { 1200 - ubifs_assert(!c->ltab[i].tgc); 1198 + ubifs_assert(c, !c->ltab[i].tgc); 1201 1199 if (i + c->lpt_first == c->nhead_lnum || 1202 1200 c->ltab[i].free + c->ltab[i].dirty == c->leb_size) 1203 1201 continue; ··· 1273 1271 populate_lsave(c); 1274 1272 1275 1273 cnt = get_cnodes_to_commit(c); 1276 - ubifs_assert(cnt != 0); 1274 + ubifs_assert(c, cnt != 0); 1277 1275 1278 1276 err = layout_cnodes(c); 1279 1277 if (err)
+1 -1
fs/ubifs/master.c
··· 360 360 { 361 361 int err, lnum, offs, len; 362 362 363 - ubifs_assert(!c->ro_media && !c->ro_mount); 363 + ubifs_assert(c, !c->ro_media && !c->ro_mount); 364 364 if (c->ro_error) 365 365 return -EROFS; 366 366
+11
fs/ubifs/misc.c
··· 56 56 57 57 va_end(args); 58 58 } 59 + 60 + static char *assert_names[] = { 61 + [ASSACT_REPORT] = "report", 62 + [ASSACT_RO] = "read-only", 63 + [ASSACT_PANIC] = "panic", 64 + }; 65 + 66 + const char *ubifs_assert_action_name(struct ubifs_info *c) 67 + { 68 + return assert_names[c->assert_action]; 69 + }
+10 -6
fs/ubifs/misc.h
··· 105 105 /** 106 106 * ubifs_compr_present - check if compressor was compiled in. 107 107 * @compr_type: compressor type to check 108 + * @c: the UBIFS file-system description object 108 109 * 109 110 * This function returns %1 of compressor of type @compr_type is present, and 110 111 * %0 if not. 111 112 */ 112 - static inline int ubifs_compr_present(int compr_type) 113 + static inline int ubifs_compr_present(struct ubifs_info *c, int compr_type) 113 114 { 114 - ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); 115 + ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); 115 116 return !!ubifs_compressors[compr_type]->capi_name; 116 117 } 117 118 118 119 /** 119 120 * ubifs_compr_name - get compressor name string by its type. 120 121 * @compr_type: compressor type 122 + * @c: the UBIFS file-system description object 121 123 * 122 124 * This function returns compressor type string. 123 125 */ 124 - static inline const char *ubifs_compr_name(int compr_type) 126 + static inline const char *ubifs_compr_name(struct ubifs_info *c, int compr_type) 125 127 { 126 - ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); 128 + ubifs_assert(c, compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT); 127 129 return ubifs_compressors[compr_type]->name; 128 130 } 129 131 ··· 264 262 */ 265 263 static inline void ubifs_release_lprops(struct ubifs_info *c) 266 264 { 267 - ubifs_assert(mutex_is_locked(&c->lp_mutex)); 268 - ubifs_assert(c->lst.empty_lebs >= 0 && 265 + ubifs_assert(c, mutex_is_locked(&c->lp_mutex)); 266 + ubifs_assert(c, c->lst.empty_lebs >= 0 && 269 267 c->lst.empty_lebs <= c->main_lebs); 270 268 mutex_unlock(&c->lp_mutex); 271 269 } ··· 286 284 287 285 return lnum; 288 286 } 287 + 288 + const char *ubifs_assert_action_name(struct ubifs_info *c); 289 289 290 290 #endif /* __UBIFS_MISC_H__ */
+13 -13
fs/ubifs/orphan.c
··· 172 172 spin_lock(&c->orphan_lock); 173 173 last = &c->orph_cnext; 174 174 list_for_each_entry(orphan, &c->orph_new, new_list) { 175 - ubifs_assert(orphan->new); 176 - ubifs_assert(!orphan->cmt); 175 + ubifs_assert(c, orphan->new); 176 + ubifs_assert(c, !orphan->cmt); 177 177 orphan->new = 0; 178 178 orphan->cmt = 1; 179 179 *last = orphan; ··· 244 244 int err = 0; 245 245 246 246 if (atomic) { 247 - ubifs_assert(c->ohead_offs == 0); 247 + ubifs_assert(c, c->ohead_offs == 0); 248 248 ubifs_prepare_node(c, c->orph_buf, len, 1); 249 249 len = ALIGN(len, c->min_io_size); 250 250 err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len); ··· 276 276 struct ubifs_orph_node *orph; 277 277 int gap, err, len, cnt, i; 278 278 279 - ubifs_assert(c->cmt_orphans > 0); 279 + ubifs_assert(c, c->cmt_orphans > 0); 280 280 gap = c->leb_size - c->ohead_offs; 281 281 if (gap < UBIFS_ORPH_NODE_SZ + sizeof(__le64)) { 282 282 c->ohead_lnum += 1; ··· 295 295 if (cnt > c->cmt_orphans) 296 296 cnt = c->cmt_orphans; 297 297 len = UBIFS_ORPH_NODE_SZ + cnt * sizeof(__le64); 298 - ubifs_assert(c->orph_buf); 298 + ubifs_assert(c, c->orph_buf); 299 299 orph = c->orph_buf; 300 300 orph->ch.node_type = UBIFS_ORPH_NODE; 301 301 spin_lock(&c->orphan_lock); 302 302 cnext = c->orph_cnext; 303 303 for (i = 0; i < cnt; i++) { 304 304 orphan = cnext; 305 - ubifs_assert(orphan->cmt); 305 + ubifs_assert(c, orphan->cmt); 306 306 orph->inos[i] = cpu_to_le64(orphan->inum); 307 307 orphan->cmt = 0; 308 308 cnext = orphan->cnext; ··· 316 316 else 317 317 /* Mark the last node of the commit */ 318 318 orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63)); 319 - ubifs_assert(c->ohead_offs + len <= c->leb_size); 320 - ubifs_assert(c->ohead_lnum >= c->orph_first); 321 - ubifs_assert(c->ohead_lnum <= c->orph_last); 319 + ubifs_assert(c, c->ohead_offs + len <= c->leb_size); 320 + ubifs_assert(c, c->ohead_lnum >= c->orph_first); 321 + ubifs_assert(c, c->ohead_lnum <= c->orph_last); 322 322 err = do_write_orph_node(c, len, atomic); 323 323 c->ohead_offs += ALIGN(len, c->min_io_size); 324 324 c->ohead_offs = ALIGN(c->ohead_offs, 8); ··· 388 388 cnt += 1; 389 389 } 390 390 *last = NULL; 391 - ubifs_assert(cnt == c->tot_orphans - c->new_orphans); 391 + ubifs_assert(c, cnt == c->tot_orphans - c->new_orphans); 392 392 c->cmt_orphans = cnt; 393 393 c->ohead_lnum = c->orph_first; 394 394 c->ohead_offs = 0; ··· 415 415 { 416 416 int avail, atomic = 0, err; 417 417 418 - ubifs_assert(c->cmt_orphans > 0); 418 + ubifs_assert(c, c->cmt_orphans > 0); 419 419 avail = avail_orphs(c); 420 420 if (avail < c->cmt_orphans) { 421 421 /* Not enough space to write new orphans, so consolidate */ ··· 446 446 while (dnext) { 447 447 orphan = dnext; 448 448 dnext = orphan->dnext; 449 - ubifs_assert(!orphan->new); 450 - ubifs_assert(orphan->del); 449 + ubifs_assert(c, !orphan->new); 450 + ubifs_assert(c, orphan->del); 451 451 rb_erase(&orphan->rb, &c->orph_tree); 452 452 list_del(&orphan->list); 453 453 c->tot_orphans -= 1;
+7 -7
fs/ubifs/recovery.c
··· 444 444 445 445 dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs); 446 446 447 - ubifs_assert(!(*offs & 7)); 447 + ubifs_assert(c, !(*offs & 7)); 448 448 empty_offs = ALIGN(*offs, c->min_io_size); 449 449 pad_len = empty_offs - *offs; 450 450 ubifs_pad(c, *buf, pad_len); ··· 644 644 if (IS_ERR(sleb)) 645 645 return sleb; 646 646 647 - ubifs_assert(len >= 8); 647 + ubifs_assert(c, len >= 8); 648 648 while (len >= 8) { 649 649 dbg_scan("look at LEB %d:%d (%d bytes left)", 650 650 lnum, offs, len); ··· 966 966 { 967 967 int err; 968 968 969 - ubifs_assert(!c->ro_mount || c->remounting_rw); 969 + ubifs_assert(c, !c->ro_mount || c->remounting_rw); 970 970 971 971 dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs); 972 972 err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf); ··· 1187 1187 return grab_empty_leb(c); 1188 1188 } 1189 1189 1190 - ubifs_assert(!(lp.flags & LPROPS_INDEX)); 1191 - ubifs_assert(lp.free + lp.dirty >= wbuf->offs); 1190 + ubifs_assert(c, !(lp.flags & LPROPS_INDEX)); 1191 + ubifs_assert(c, lp.free + lp.dirty >= wbuf->offs); 1192 1192 1193 1193 /* 1194 1194 * We run the commit before garbage collection otherwise subsequent ··· 1216 1216 return err; 1217 1217 } 1218 1218 1219 - ubifs_assert(err == LEB_RETAINED); 1219 + ubifs_assert(c, err == LEB_RETAINED); 1220 1220 if (err != LEB_RETAINED) 1221 1221 return -EINVAL; 1222 1222 ··· 1507 1507 struct inode *inode; 1508 1508 struct ubifs_inode *ui; 1509 1509 1510 - ubifs_assert(!e->inode); 1510 + ubifs_assert(c, !e->inode); 1511 1511 1512 1512 inode = ubifs_iget(c->vfs_sb, e->inum); 1513 1513 if (IS_ERR(inode))
+7 -6
fs/ubifs/replay.c
··· 273 273 static int replay_entries_cmp(void *priv, struct list_head *a, 274 274 struct list_head *b) 275 275 { 276 + struct ubifs_info *c = priv; 276 277 struct replay_entry *ra, *rb; 277 278 278 279 cond_resched(); ··· 282 281 283 282 ra = list_entry(a, struct replay_entry, list); 284 283 rb = list_entry(b, struct replay_entry, list); 285 - ubifs_assert(ra->sqnum != rb->sqnum); 284 + ubifs_assert(c, ra->sqnum != rb->sqnum); 286 285 if (ra->sqnum > rb->sqnum) 287 286 return 1; 288 287 return -1; ··· 669 668 goto out; 670 669 } 671 670 672 - ubifs_assert(ubifs_search_bud(c, lnum)); 673 - ubifs_assert(sleb->endpt - offs >= used); 674 - ubifs_assert(sleb->endpt % c->min_io_size == 0); 671 + ubifs_assert(c, ubifs_search_bud(c, lnum)); 672 + ubifs_assert(c, sleb->endpt - offs >= used); 673 + ubifs_assert(c, sleb->endpt % c->min_io_size == 0); 675 674 676 675 b->dirty = sleb->endpt - offs - used; 677 676 b->free = c->leb_size - sleb->endpt; ··· 707 706 if (err) 708 707 return err; 709 708 710 - ubifs_assert(b->sqnum > prev_sqnum); 709 + ubifs_assert(c, b->sqnum > prev_sqnum); 711 710 prev_sqnum = b->sqnum; 712 711 } 713 712 ··· 1068 1067 c->bi.uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); 1069 1068 c->bi.uncommitted_idx *= c->max_idx_node_sz; 1070 1069 1071 - ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1070 + ubifs_assert(c, c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1072 1071 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, highest_inum %lu", 1073 1072 c->lhead_lnum, c->lhead_offs, c->max_sqnum, 1074 1073 (unsigned long)c->highest_inum);
+9 -9
fs/ubifs/sb.c
··· 85 85 long long tmp64, main_bytes; 86 86 __le64 tmp_le64; 87 87 __le32 tmp_le32; 88 - struct timespec ts; 88 + struct timespec64 ts; 89 89 90 90 /* Some functions called from here depend on the @c->key_len filed */ 91 91 c->key_len = UBIFS_SK_LEN; ··· 301 301 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); 302 302 ino->nlink = cpu_to_le32(2); 303 303 304 - ktime_get_real_ts(&ts); 305 - ts = timespec_trunc(ts, DEFAULT_TIME_GRAN); 304 + ktime_get_real_ts64(&ts); 305 + ts = timespec64_trunc(ts, DEFAULT_TIME_GRAN); 306 306 tmp_le64 = cpu_to_le64(ts.tv_sec); 307 307 ino->atime_sec = tmp_le64; 308 308 ino->ctime_sec = tmp_le64; ··· 563 563 * due to the unavailability of time-travelling equipment. 564 564 */ 565 565 if (c->fmt_version > UBIFS_FORMAT_VERSION) { 566 - ubifs_assert(!c->ro_media || c->ro_mount); 566 + ubifs_assert(c, !c->ro_media || c->ro_mount); 567 567 if (!c->ro_mount || 568 568 c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) { 569 569 ubifs_err(c, "on-flash format version is w%d/r%d, but software only supports up to version w%d/r%d", ··· 705 705 { 706 706 int err; 707 707 708 - ubifs_assert(len >= 0); 709 - ubifs_assert(len % c->min_io_size == 0); 710 - ubifs_assert(len < c->leb_size); 708 + ubifs_assert(c, len >= 0); 709 + ubifs_assert(c, len % c->min_io_size == 0); 710 + ubifs_assert(c, len < c->leb_size); 711 711 712 712 if (len == 0) { 713 713 dbg_mnt("unmap empty LEB %d", lnum); ··· 817 817 int err; 818 818 struct ubifs_sb_node *sup; 819 819 820 - ubifs_assert(c->space_fixup); 821 - ubifs_assert(!c->ro_mount); 820 + ubifs_assert(c, c->space_fixup); 821 + ubifs_assert(c, !c->ro_mount); 822 822 823 823 ubifs_msg(c, "start fixing up free space"); 824 824
+1 -1
fs/ubifs/scan.c
··· 176 176 int lnum, int offs) 177 177 { 178 178 dbg_scan("stop scanning LEB %d at offset %d", lnum, offs); 179 - ubifs_assert(offs % c->min_io_size == 0); 179 + ubifs_assert(c, offs % c->min_io_size == 0); 180 180 181 181 sleb->endpt = ALIGN(offs, c->min_io_size); 182 182 }
+6 -6
fs/ubifs/shrinker.c
··· 71 71 { 72 72 int total_freed = 0; 73 73 struct ubifs_znode *znode, *zprev; 74 - int time = get_seconds(); 74 + time64_t time = ktime_get_seconds(); 75 75 76 - ubifs_assert(mutex_is_locked(&c->umount_mutex)); 77 - ubifs_assert(mutex_is_locked(&c->tnc_mutex)); 76 + ubifs_assert(c, mutex_is_locked(&c->umount_mutex)); 77 + ubifs_assert(c, mutex_is_locked(&c->tnc_mutex)); 78 78 79 79 if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0) 80 80 return 0; ··· 89 89 * changed only when the 'c->tnc_mutex' is held. 90 90 */ 91 91 zprev = NULL; 92 - znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); 92 + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); 93 93 while (znode && total_freed < nr && 94 94 atomic_long_read(&c->clean_zn_cnt) > 0) { 95 95 int freed; ··· 125 125 else 126 126 c->zroot.znode = NULL; 127 127 128 - freed = ubifs_destroy_tnc_subtree(znode); 128 + freed = ubifs_destroy_tnc_subtree(c, znode); 129 129 atomic_long_sub(freed, &ubifs_clean_zn_cnt); 130 130 atomic_long_sub(freed, &c->clean_zn_cnt); 131 131 total_freed += freed; ··· 136 136 break; 137 137 138 138 zprev = znode; 139 - znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); 139 + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); 140 140 cond_resched(); 141 141 } 142 142
+52 -24
fs/ubifs/super.c
··· 89 89 if (ui->xattr && !S_ISREG(inode->i_mode)) 90 90 return 5; 91 91 92 - if (!ubifs_compr_present(ui->compr_type)) { 92 + if (!ubifs_compr_present(c, ui->compr_type)) { 93 93 ubifs_warn(c, "inode %lu uses '%s' compression, but it was not compiled in", 94 - inode->i_ino, ubifs_compr_name(ui->compr_type)); 94 + inode->i_ino, ubifs_compr_name(c, ui->compr_type)); 95 95 } 96 96 97 97 err = dbg_check_dir(c, inode); ··· 296 296 struct ubifs_info *c = inode->i_sb->s_fs_info; 297 297 struct ubifs_inode *ui = ubifs_inode(inode); 298 298 299 - ubifs_assert(!ui->xattr); 299 + ubifs_assert(c, !ui->xattr); 300 300 if (is_bad_inode(inode)) 301 301 return 0; 302 302 ··· 349 349 goto out; 350 350 351 351 dbg_gen("inode %lu, mode %#x", inode->i_ino, (int)inode->i_mode); 352 - ubifs_assert(!atomic_read(&inode->i_count)); 352 + ubifs_assert(c, !atomic_read(&inode->i_count)); 353 353 354 354 truncate_inode_pages_final(&inode->i_data); 355 355 ··· 384 384 385 385 static void ubifs_dirty_inode(struct inode *inode, int flags) 386 386 { 387 + struct ubifs_info *c = inode->i_sb->s_fs_info; 387 388 struct ubifs_inode *ui = ubifs_inode(inode); 388 389 389 - ubifs_assert(mutex_is_locked(&ui->ui_mutex)); 390 + ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 390 391 if (!ui->dirty) { 391 392 ui->dirty = 1; 392 393 dbg_gen("inode %lu", inode->i_ino); ··· 417 416 buf->f_namelen = UBIFS_MAX_NLEN; 418 417 buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]); 419 418 buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]); 420 - ubifs_assert(buf->f_bfree <= c->block_cnt); 419 + ubifs_assert(c, buf->f_bfree <= c->block_cnt); 421 420 return 0; 422 421 } 423 422 ··· 442 441 443 442 if (c->mount_opts.override_compr) { 444 443 seq_printf(s, ",compr=%s", 445 - ubifs_compr_name(c->mount_opts.compr_type)); 444 + ubifs_compr_name(c, c->mount_opts.compr_type)); 446 445 } 447 446 447 + seq_printf(s, ",assert=%s", ubifs_assert_action_name(c)); 448 448 seq_printf(s, ",ubi=%d,vol=%d", c->vi.ubi_num, c->vi.vol_id); 449 449 450 450 return 0; ··· 923 921 * Opt_chk_data_crc: check CRCs when reading data nodes 924 922 * Opt_no_chk_data_crc: do not check CRCs when reading data nodes 925 923 * Opt_override_compr: override default compressor 924 + * Opt_assert: set ubifs_assert() action 926 925 * Opt_err: just end of array marker 927 926 */ 928 927 enum { ··· 934 931 Opt_chk_data_crc, 935 932 Opt_no_chk_data_crc, 936 933 Opt_override_compr, 934 + Opt_assert, 937 935 Opt_ignore, 938 936 Opt_err, 939 937 }; ··· 949 945 {Opt_override_compr, "compr=%s"}, 950 946 {Opt_ignore, "ubi=%s"}, 951 947 {Opt_ignore, "vol=%s"}, 948 + {Opt_assert, "assert=%s"}, 952 949 {Opt_err, NULL}, 953 950 }; 954 951 ··· 1050 1045 c->default_compr = c->mount_opts.compr_type; 1051 1046 break; 1052 1047 } 1048 + case Opt_assert: 1049 + { 1050 + char *act = match_strdup(&args[0]); 1051 + 1052 + if (!act) 1053 + return -ENOMEM; 1054 + if (!strcmp(act, "report")) 1055 + c->assert_action = ASSACT_REPORT; 1056 + else if (!strcmp(act, "read-only")) 1057 + c->assert_action = ASSACT_RO; 1058 + else if (!strcmp(act, "panic")) 1059 + c->assert_action = ASSACT_PANIC; 1060 + else { 1061 + ubifs_err(c, "unknown assert action \"%s\"", act); 1062 + kfree(act); 1063 + return -EINVAL; 1064 + } 1065 + kfree(act); 1066 + break; 1067 + } 1053 1068 case Opt_ignore: 1054 1069 break; 1055 1070 default: ··· 1128 1103 */ 1129 1104 static void bu_init(struct ubifs_info *c) 1130 1105 { 1131 - ubifs_assert(c->bulk_read == 1); 1106 + ubifs_assert(c, c->bulk_read == 1); 1132 1107 1133 1108 if (c->bu.buf) 1134 1109 return; /* Already initialized */ ··· 1159 1134 */ 1160 1135 static int check_free_space(struct ubifs_info *c) 1161 1136 { 1162 - ubifs_assert(c->dark_wm > 0); 1137 + ubifs_assert(c, c->dark_wm > 0); 1163 1138 if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { 1164 1139 ubifs_err(c, "insufficient free space to mount in R/W mode"); 1165 1140 ubifs_dump_budg(c, &c->bi); ··· 1259 1234 * Make sure the compressor which is set as default in the superblock 1260 1235 * or overridden by mount options is actually compiled in. 1261 1236 */ 1262 - if (!ubifs_compr_present(c->default_compr)) { 1237 + if (!ubifs_compr_present(c, c->default_compr)) { 1263 1238 ubifs_err(c, "'compressor \"%s\" is not compiled in", 1264 - ubifs_compr_name(c->default_compr)); 1239 + ubifs_compr_name(c, c->default_compr)); 1265 1240 err = -ENOTSUPP; 1266 1241 goto out_free; 1267 1242 } ··· 1421 1396 * the journal head LEBs may also be accounted as 1422 1397 * "empty taken" if they are empty. 1423 1398 */ 1424 - ubifs_assert(c->lst.taken_empty_lebs > 0); 1399 + ubifs_assert(c, c->lst.taken_empty_lebs > 0); 1425 1400 } 1426 1401 } else 1427 - ubifs_assert(c->lst.taken_empty_lebs > 0); 1402 + ubifs_assert(c, c->lst.taken_empty_lebs > 0); 1428 1403 1429 1404 err = dbg_check_filesystem(c); 1430 1405 if (err) ··· 1454 1429 UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION, c->uuid, 1455 1430 c->big_lpt ? ", big LPT model" : ", small LPT model"); 1456 1431 1457 - dbg_gen("default compressor: %s", ubifs_compr_name(c->default_compr)); 1432 + dbg_gen("default compressor: %s", ubifs_compr_name(c, c->default_compr)); 1458 1433 dbg_gen("data journal heads: %d", 1459 1434 c->jhead_cnt - NONDATA_JHEADS_CNT); 1460 1435 dbg_gen("log LEBs: %d (%d - %d)", ··· 1635 1610 goto out; 1636 1611 } else { 1637 1612 /* A readonly mount is not allowed to have orphans */ 1638 - ubifs_assert(c->tot_orphans == 0); 1613 + ubifs_assert(c, c->tot_orphans == 0); 1639 1614 err = ubifs_clear_orphans(c); 1640 1615 if (err) 1641 1616 goto out; ··· 1752 1727 { 1753 1728 int i, err; 1754 1729 1755 - ubifs_assert(!c->need_recovery); 1756 - ubifs_assert(!c->ro_mount); 1730 + ubifs_assert(c, !c->need_recovery); 1731 + ubifs_assert(c, !c->ro_mount); 1757 1732 1758 1733 mutex_lock(&c->umount_mutex); 1759 1734 if (c->bgt) { ··· 1803 1778 * to write them back because of I/O errors. 1804 1779 */ 1805 1780 if (!c->ro_error) { 1806 - ubifs_assert(c->bi.idx_growth == 0); 1807 - ubifs_assert(c->bi.dd_growth == 0); 1808 - ubifs_assert(c->bi.data_growth == 0); 1781 + ubifs_assert(c, c->bi.idx_growth == 0); 1782 + ubifs_assert(c, c->bi.dd_growth == 0); 1783 + ubifs_assert(c, c->bi.data_growth == 0); 1809 1784 } 1810 1785 1811 1786 /* ··· 1912 1887 mutex_unlock(&c->bu_mutex); 1913 1888 } 1914 1889 1915 - ubifs_assert(c->lst.taken_empty_lebs > 0); 1890 + ubifs_assert(c, c->lst.taken_empty_lebs > 0); 1916 1891 return 0; 1917 1892 } 1918 1893 ··· 2027 2002 INIT_LIST_HEAD(&c->orph_list); 2028 2003 INIT_LIST_HEAD(&c->orph_new); 2029 2004 c->no_chk_data_crc = 1; 2005 + c->assert_action = ASSACT_RO; 2030 2006 2031 2007 c->highest_inum = UBIFS_FIRST_INO; 2032 2008 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; ··· 2079 2053 if (c->max_inode_sz > MAX_LFS_FILESIZE) 2080 2054 sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; 2081 2055 sb->s_op = &ubifs_super_operations; 2056 + #ifdef CONFIG_UBIFS_FS_XATTR 2082 2057 sb->s_xattr = ubifs_xattr_handlers; 2058 + #endif 2083 2059 #ifdef CONFIG_UBIFS_FS_ENCRYPTION 2084 2060 sb->s_cop = &ubifs_crypt_operations; 2085 2061 #endif ··· 2089 2061 mutex_lock(&c->umount_mutex); 2090 2062 err = mount_ubifs(c); 2091 2063 if (err) { 2092 - ubifs_assert(err < 0); 2064 + ubifs_assert(c, err < 0); 2093 2065 goto out_unlock; 2094 2066 } 2095 2067 ··· 2332 2304 2333 2305 static void __exit ubifs_exit(void) 2334 2306 { 2335 - ubifs_assert(list_empty(&ubifs_infos)); 2336 - ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0); 2307 + WARN_ON(list_empty(&ubifs_infos)); 2308 + WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0); 2337 2309 2338 2310 dbg_debugfs_exit(); 2339 2311 ubifs_compressors_exit();
+56 -55
fs/ubifs/tnc.c
··· 211 211 __set_bit(DIRTY_ZNODE, &zn->flags); 212 212 __clear_bit(COW_ZNODE, &zn->flags); 213 213 214 - ubifs_assert(!ubifs_zn_obsolete(znode)); 214 + ubifs_assert(c, !ubifs_zn_obsolete(znode)); 215 215 __set_bit(OBSOLETE_ZNODE, &znode->flags); 216 216 217 217 if (znode->level != 0) { ··· 321 321 void *lnc_node; 322 322 const struct ubifs_dent_node *dent = node; 323 323 324 - ubifs_assert(!zbr->leaf); 325 - ubifs_assert(zbr->len != 0); 326 - ubifs_assert(is_hash_key(c, &zbr->key)); 324 + ubifs_assert(c, !zbr->leaf); 325 + ubifs_assert(c, zbr->len != 0); 326 + ubifs_assert(c, is_hash_key(c, &zbr->key)); 327 327 328 328 err = ubifs_validate_entry(c, dent); 329 329 if (err) { ··· 355 355 { 356 356 int err; 357 357 358 - ubifs_assert(!zbr->leaf); 359 - ubifs_assert(zbr->len != 0); 358 + ubifs_assert(c, !zbr->leaf); 359 + ubifs_assert(c, zbr->len != 0); 360 360 361 361 err = ubifs_validate_entry(c, node); 362 362 if (err) { ··· 398 398 { 399 399 int err; 400 400 401 - ubifs_assert(is_hash_key(c, &zbr->key)); 401 + ubifs_assert(c, is_hash_key(c, &zbr->key)); 402 402 403 403 if (zbr->leaf) { 404 404 /* Read from the leaf node cache */ 405 - ubifs_assert(zbr->len != 0); 405 + ubifs_assert(c, zbr->len != 0); 406 406 memcpy(node, zbr->leaf, zbr->len); 407 407 return 0; 408 408 } ··· 721 721 while (1) { 722 722 err = tnc_prev(c, zn, n); 723 723 if (err == -ENOENT) { 724 - ubifs_assert(*n == 0); 724 + ubifs_assert(c, *n == 0); 725 725 *n = -1; 726 726 return 0; 727 727 } ··· 761 761 err = tnc_next(c, zn, n); 762 762 if (err) { 763 763 /* Should be impossible */ 764 - ubifs_assert(0); 764 + ubifs_assert(c, 0); 765 765 if (err == -ENOENT) 766 766 err = -EINVAL; 767 767 return err; 768 768 } 769 - ubifs_assert(*n == 0); 769 + ubifs_assert(c, *n == 0); 770 770 *n = -1; 771 771 } 772 772 return 0; ··· 778 778 return 0; 779 779 if (err == NAME_MATCHES) 780 780 return 1; 781 - ubifs_assert(err == NAME_GREATER); 781 + ubifs_assert(c, err == NAME_GREATER); 782 782 } 783 783 } else { 784 784 int nn = *n; ··· 802 802 *n = nn; 803 803 if (err == NAME_MATCHES) 804 804 return 1; 805 - ubifs_assert(err == NAME_LESS); 805 + ubifs_assert(c, err == NAME_LESS); 806 806 } 807 807 } 808 808 } ··· 843 843 err = NOT_ON_MEDIA; 844 844 goto out_free; 845 845 } 846 - ubifs_assert(err == 1); 846 + ubifs_assert(c, err == 1); 847 847 848 848 err = lnc_add_directly(c, zbr, dent); 849 849 if (err) ··· 923 923 while (1) { 924 924 err = tnc_prev(c, zn, n); 925 925 if (err == -ENOENT) { 926 - ubifs_assert(*n == 0); 926 + ubifs_assert(c, *n == 0); 927 927 *n = -1; 928 928 break; 929 929 } ··· 935 935 err = tnc_next(c, zn, n); 936 936 if (err) { 937 937 /* Should be impossible */ 938 - ubifs_assert(0); 938 + ubifs_assert(c, 0); 939 939 if (err == -ENOENT) 940 940 err = -EINVAL; 941 941 return err; 942 942 } 943 - ubifs_assert(*n == 0); 943 + ubifs_assert(c, *n == 0); 944 944 *n = -1; 945 945 } 946 946 break; ··· 1100 1100 struct ubifs_znode *zp; 1101 1101 int *path = c->bottom_up_buf, p = 0; 1102 1102 1103 - ubifs_assert(c->zroot.znode); 1104 - ubifs_assert(znode); 1103 + ubifs_assert(c, c->zroot.znode); 1104 + ubifs_assert(c, znode); 1105 1105 if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) { 1106 1106 kfree(c->bottom_up_buf); 1107 1107 c->bottom_up_buf = kmalloc_array(c->zroot.znode->level, ··· 1120 1120 if (!zp) 1121 1121 break; 1122 1122 n = znode->iip; 1123 - ubifs_assert(p < c->zroot.znode->level); 1123 + ubifs_assert(c, p < c->zroot.znode->level); 1124 1124 path[p++] = n; 1125 1125 if (!zp->cnext && ubifs_zn_dirty(znode)) 1126 1126 break; ··· 1134 1134 1135 1135 zp = znode->parent; 1136 1136 if (zp) { 1137 - ubifs_assert(path[p - 1] >= 0); 1138 - ubifs_assert(path[p - 1] < zp->child_cnt); 1137 + ubifs_assert(c, path[p - 1] >= 0); 1138 + ubifs_assert(c, path[p - 1] < zp->child_cnt); 1139 1139 zbr = &zp->zbranch[path[--p]]; 1140 1140 znode = dirty_cow_znode(c, zbr); 1141 1141 } else { 1142 - ubifs_assert(znode == c->zroot.znode); 1142 + ubifs_assert(c, znode == c->zroot.znode); 1143 1143 znode = dirty_cow_znode(c, &c->zroot); 1144 1144 } 1145 1145 if (IS_ERR(znode) || !p) 1146 1146 break; 1147 - ubifs_assert(path[p - 1] >= 0); 1148 - ubifs_assert(path[p - 1] < znode->child_cnt); 1147 + ubifs_assert(c, path[p - 1] >= 0); 1148 + ubifs_assert(c, path[p - 1] < znode->child_cnt); 1149 1149 znode = znode->zbranch[path[p - 1]].znode; 1150 1150 } 1151 1151 ··· 1179 1179 { 1180 1180 int err, exact; 1181 1181 struct ubifs_znode *znode; 1182 - unsigned long time = get_seconds(); 1182 + time64_t time = ktime_get_seconds(); 1183 1183 1184 1184 dbg_tnck(key, "search key "); 1185 - ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); 1185 + ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY); 1186 1186 1187 1187 znode = c->zroot.znode; 1188 1188 if (unlikely(!znode)) { ··· 1315 1315 { 1316 1316 int err, exact; 1317 1317 struct ubifs_znode *znode; 1318 - unsigned long time = get_seconds(); 1318 + time64_t time = ktime_get_seconds(); 1319 1319 1320 1320 dbg_tnck(key, "search and dirty key "); 1321 1321 ··· 1658 1658 int rlen, overlap; 1659 1659 1660 1660 dbg_io("LEB %d:%d, length %d", lnum, offs, len); 1661 - ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 1662 - ubifs_assert(!(offs & 7) && offs < c->leb_size); 1663 - ubifs_assert(offs + len <= c->leb_size); 1661 + ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 1662 + ubifs_assert(c, !(offs & 7) && offs < c->leb_size); 1663 + ubifs_assert(c, offs + len <= c->leb_size); 1664 1664 1665 1665 spin_lock(&wbuf->lock); 1666 1666 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); ··· 1824 1824 goto out_unlock; 1825 1825 } 1826 1826 1827 - ubifs_assert(n >= 0); 1827 + ubifs_assert(c, n >= 0); 1828 1828 1829 1829 err = resolve_collision(c, key, &znode, &n, nm); 1830 1830 dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n); ··· 1922 1922 struct ubifs_znode *znode; 1923 1923 union ubifs_key start_key; 1924 1924 1925 - ubifs_assert(is_hash_key(c, key)); 1925 + ubifs_assert(c, is_hash_key(c, key)); 1926 1926 1927 1927 lowest_dent_key(c, &start_key, key_inum(c, key)); 1928 1928 ··· 1993 1993 { 1994 1994 union ubifs_key *key, *key1; 1995 1995 1996 - ubifs_assert(znode->parent); 1997 - ubifs_assert(znode->iip == 0); 1996 + ubifs_assert(c, znode->parent); 1997 + ubifs_assert(c, znode->iip == 0); 1998 1998 1999 1999 key = &znode->zbranch[0].key; 2000 2000 key1 = &znode->parent->zbranch[0].key; ··· 2011 2011 2012 2012 /** 2013 2013 * insert_zbranch - insert a zbranch into a znode. 2014 + * @c: UBIFS file-system description object 2014 2015 * @znode: znode into which to insert 2015 2016 * @zbr: zbranch to insert 2016 2017 * @n: slot number to insert to ··· 2021 2020 * zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th 2022 2021 * slot, zbranches starting from @n have to be moved right. 2023 2022 */ 2024 - static void insert_zbranch(struct ubifs_znode *znode, 2023 + static void insert_zbranch(struct ubifs_info *c, struct ubifs_znode *znode, 2025 2024 const struct ubifs_zbranch *zbr, int n) 2026 2025 { 2027 2026 int i; 2028 2027 2029 - ubifs_assert(ubifs_zn_dirty(znode)); 2028 + ubifs_assert(c, ubifs_zn_dirty(znode)); 2030 2029 2031 2030 if (znode->level) { 2032 2031 for (i = znode->child_cnt; i > n; i--) { ··· 2080 2079 int i, keep, move, appending = 0; 2081 2080 union ubifs_key *key = &zbr->key, *key1; 2082 2081 2083 - ubifs_assert(n >= 0 && n <= c->fanout); 2082 + ubifs_assert(c, n >= 0 && n <= c->fanout); 2084 2083 2085 2084 /* Implement naive insert for now */ 2086 2085 again: 2087 2086 zp = znode->parent; 2088 2087 if (znode->child_cnt < c->fanout) { 2089 - ubifs_assert(n != c->fanout); 2088 + ubifs_assert(c, n != c->fanout); 2090 2089 dbg_tnck(key, "inserted at %d level %d, key ", n, znode->level); 2091 2090 2092 - insert_zbranch(znode, zbr, n); 2091 + insert_zbranch(c, znode, zbr, n); 2093 2092 2094 2093 /* Ensure parent's key is correct */ 2095 2094 if (n == 0 && zp && znode->iip == 0) ··· 2198 2197 /* Insert new key and branch */ 2199 2198 dbg_tnck(key, "inserting at %d level %d, key ", n, zn->level); 2200 2199 2201 - insert_zbranch(zi, zbr, n); 2200 + insert_zbranch(c, zi, zbr, n); 2202 2201 2203 2202 /* Insert new znode (produced by spitting) into the parent */ 2204 2203 if (zp) { ··· 2496 2495 int i, err; 2497 2496 2498 2497 /* Delete without merge for now */ 2499 - ubifs_assert(znode->level == 0); 2500 - ubifs_assert(n >= 0 && n < c->fanout); 2498 + ubifs_assert(c, znode->level == 0); 2499 + ubifs_assert(c, n >= 0 && n < c->fanout); 2501 2500 dbg_tnck(&znode->zbranch[n].key, "deleting key "); 2502 2501 2503 2502 zbr = &znode->zbranch[n]; ··· 2523 2522 */ 2524 2523 2525 2524 do { 2526 - ubifs_assert(!ubifs_zn_obsolete(znode)); 2527 - ubifs_assert(ubifs_zn_dirty(znode)); 2525 + ubifs_assert(c, !ubifs_zn_obsolete(znode)); 2526 + ubifs_assert(c, ubifs_zn_dirty(znode)); 2528 2527 2529 2528 zp = znode->parent; 2530 2529 n = znode->iip; ··· 2546 2545 2547 2546 /* Remove from znode, entry n - 1 */ 2548 2547 znode->child_cnt -= 1; 2549 - ubifs_assert(znode->level != 0); 2548 + ubifs_assert(c, znode->level != 0); 2550 2549 for (i = n; i < znode->child_cnt; i++) { 2551 2550 znode->zbranch[i] = znode->zbranch[i + 1]; 2552 2551 if (znode->zbranch[i].znode) ··· 2579 2578 c->zroot.offs = zbr->offs; 2580 2579 c->zroot.len = zbr->len; 2581 2580 c->zroot.znode = znode; 2582 - ubifs_assert(!ubifs_zn_obsolete(zp)); 2583 - ubifs_assert(ubifs_zn_dirty(zp)); 2581 + ubifs_assert(c, !ubifs_zn_obsolete(zp)); 2582 + ubifs_assert(c, ubifs_zn_dirty(zp)); 2584 2583 atomic_long_dec(&c->dirty_zn_cnt); 2585 2584 2586 2585 if (zp->cnext) { ··· 2945 2944 union ubifs_key *dkey; 2946 2945 2947 2946 dbg_tnck(key, "key "); 2948 - ubifs_assert(is_hash_key(c, key)); 2947 + ubifs_assert(c, is_hash_key(c, key)); 2949 2948 2950 2949 mutex_lock(&c->tnc_mutex); 2951 2950 err = ubifs_lookup_level0(c, key, &znode, &n); ··· 3032 3031 3033 3032 if (!c->cnext) 3034 3033 return; 3035 - ubifs_assert(c->cmt_state == COMMIT_BROKEN); 3034 + ubifs_assert(c, c->cmt_state == COMMIT_BROKEN); 3036 3035 cnext = c->cnext; 3037 3036 do { 3038 3037 struct ubifs_znode *znode = cnext; ··· 3054 3053 long n, freed; 3055 3054 3056 3055 n = atomic_long_read(&c->clean_zn_cnt); 3057 - freed = ubifs_destroy_tnc_subtree(c->zroot.znode); 3058 - ubifs_assert(freed == n); 3056 + freed = ubifs_destroy_tnc_subtree(c, c->zroot.znode); 3057 + ubifs_assert(c, freed == n); 3059 3058 atomic_long_sub(n, &ubifs_clean_zn_cnt); 3060 3059 } 3061 3060 kfree(c->gap_lebs); ··· 3168 3167 struct ubifs_znode *znode, *zn; 3169 3168 int n, nn; 3170 3169 3171 - ubifs_assert(key_type(c, key) < UBIFS_INVALID_KEY); 3170 + ubifs_assert(c, key_type(c, key) < UBIFS_INVALID_KEY); 3172 3171 3173 3172 /* 3174 3173 * The arguments have probably been read off flash, so don't assume ··· 3207 3206 if (IS_ERR(znode)) 3208 3207 return znode; 3209 3208 ubifs_search_zbranch(c, znode, key, &n); 3210 - ubifs_assert(n >= 0); 3209 + ubifs_assert(c, n >= 0); 3211 3210 } 3212 3211 if (znode->level == level + 1) 3213 3212 break; ··· 3498 3497 if (err < 0) 3499 3498 goto out_unlock; 3500 3499 3501 - ubifs_assert(err == 0); 3500 + ubifs_assert(c, err == 0); 3502 3501 key = &znode->zbranch[n].key; 3503 3502 if (!key_in_range(c, key, &from_key, &to_key)) 3504 3503 goto out_unlock;
+14 -14
fs/ubifs/tnc_commit.c
··· 87 87 88 88 atomic_long_dec(&c->dirty_zn_cnt); 89 89 90 - ubifs_assert(ubifs_zn_dirty(znode)); 91 - ubifs_assert(ubifs_zn_cow(znode)); 90 + ubifs_assert(c, ubifs_zn_dirty(znode)); 91 + ubifs_assert(c, ubifs_zn_cow(znode)); 92 92 93 93 /* 94 94 * Note, unlike 'write_index()' we do not add memory barriers here ··· 115 115 { 116 116 int len, gap_remains, gap_pos, written, pad_len; 117 117 118 - ubifs_assert((gap_start & 7) == 0); 119 - ubifs_assert((gap_end & 7) == 0); 120 - ubifs_assert(gap_end >= gap_start); 118 + ubifs_assert(c, (gap_start & 7) == 0); 119 + ubifs_assert(c, (gap_end & 7) == 0); 120 + ubifs_assert(c, gap_end >= gap_start); 121 121 122 122 gap_remains = gap_end - gap_start; 123 123 if (!gap_remains) ··· 131 131 const int alen = ALIGN(len, 8); 132 132 int err; 133 133 134 - ubifs_assert(alen <= gap_remains); 134 + ubifs_assert(c, alen <= gap_remains); 135 135 err = make_idx_node(c, c->ileb_buf + gap_pos, znode, 136 136 lnum, gap_pos, len); 137 137 if (err) ··· 259 259 struct ubifs_idx_node *idx; 260 260 int in_use, level; 261 261 262 - ubifs_assert(snod->type == UBIFS_IDX_NODE); 262 + ubifs_assert(c, snod->type == UBIFS_IDX_NODE); 263 263 idx = snod->node; 264 264 key_read(c, ubifs_idx_key(c, idx), &snod->key); 265 265 level = le16_to_cpu(idx->level); ··· 373 373 374 374 p = c->gap_lebs; 375 375 do { 376 - ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs); 376 + ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs); 377 377 written = layout_leb_in_gaps(c, p); 378 378 if (written < 0) { 379 379 err = written; ··· 639 639 } 640 640 cnt += 1; 641 641 while (1) { 642 - ubifs_assert(!ubifs_zn_cow(znode)); 642 + ubifs_assert(c, !ubifs_zn_cow(znode)); 643 643 __set_bit(COW_ZNODE, &znode->flags); 644 644 znode->alt = 0; 645 645 cnext = find_next_dirty(znode); ··· 652 652 cnt += 1; 653 653 } 654 654 dbg_cmt("committing %d znodes", cnt); 655 - ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt)); 655 + ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt)); 656 656 return cnt; 657 657 } 658 658 ··· 760 760 err = layout_commit(c, no_space, cnt); 761 761 if (err) 762 762 goto out_free; 763 - ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0); 763 + ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0); 764 764 err = free_unused_idx_lebs(c); 765 765 if (err) 766 766 goto out; ··· 781 781 * budgeting subsystem to assume the index is already committed, 782 782 * even though it is not. 783 783 */ 784 - ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 784 + ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c)); 785 785 c->bi.old_idx_sz = c->calc_idx_sz; 786 786 c->bi.uncommitted_idx = 0; 787 787 c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c); ··· 887 887 /* Grab some stuff from znode while we still can */ 888 888 cnext = znode->cnext; 889 889 890 - ubifs_assert(ubifs_zn_dirty(znode)); 891 - ubifs_assert(ubifs_zn_cow(znode)); 890 + ubifs_assert(c, ubifs_zn_dirty(znode)); 891 + ubifs_assert(c, ubifs_zn_cow(znode)); 892 892 893 893 /* 894 894 * It is important that other threads should see %DIRTY_ZNODE
+23 -17
fs/ubifs/tnc_misc.c
··· 31 31 32 32 /** 33 33 * ubifs_tnc_levelorder_next - next TNC tree element in levelorder traversal. 34 + * @c: UBIFS file-system description object 34 35 * @zr: root of the subtree to traverse 35 36 * @znode: previous znode 36 37 * 37 38 * This function implements levelorder TNC traversal. The LNC is ignored. 38 39 * Returns the next element or %NULL if @znode is already the last one. 39 40 */ 40 - struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, 41 + struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c, 42 + struct ubifs_znode *zr, 41 43 struct ubifs_znode *znode) 42 44 { 43 45 int level, iip, level_search = 0; 44 46 struct ubifs_znode *zn; 45 47 46 - ubifs_assert(zr); 48 + ubifs_assert(c, zr); 47 49 48 50 if (unlikely(!znode)) 49 51 return zr; ··· 60 58 61 59 iip = znode->iip; 62 60 while (1) { 63 - ubifs_assert(znode->level <= zr->level); 61 + ubifs_assert(c, znode->level <= zr->level); 64 62 65 63 /* 66 64 * First walk up until there is a znode with next branch to ··· 87 85 level_search = 1; 88 86 iip = -1; 89 87 znode = ubifs_tnc_find_child(zr, 0); 90 - ubifs_assert(znode); 88 + ubifs_assert(c, znode); 91 89 } 92 90 93 91 /* Switch to the next index */ ··· 113 111 } 114 112 115 113 if (zn) { 116 - ubifs_assert(zn->level >= 0); 114 + ubifs_assert(c, zn->level >= 0); 117 115 return zn; 118 116 } 119 117 } ··· 142 140 int uninitialized_var(cmp); 143 141 const struct ubifs_zbranch *zbr = &znode->zbranch[0]; 144 142 145 - ubifs_assert(end > beg); 143 + ubifs_assert(c, end > beg); 146 144 147 145 while (end > beg) { 148 146 mid = (beg + end) >> 1; ··· 160 158 *n = end - 1; 161 159 162 160 /* The insert point is after *n */ 163 - ubifs_assert(*n >= -1 && *n < znode->child_cnt); 161 + ubifs_assert(c, *n >= -1 && *n < znode->child_cnt); 164 162 if (*n == -1) 165 - ubifs_assert(keys_cmp(c, key, &zbr[0].key) < 0); 163 + ubifs_assert(c, keys_cmp(c, key, &zbr[0].key) < 0); 166 164 else 167 - ubifs_assert(keys_cmp(c, key, &zbr[*n].key) > 0); 165 + ubifs_assert(c, keys_cmp(c, key, &zbr[*n].key) > 0); 168 166 if (*n + 1 < znode->child_cnt) 169 - ubifs_assert(keys_cmp(c, key, &zbr[*n + 1].key) < 0); 167 + ubifs_assert(c, keys_cmp(c, key, &zbr[*n + 1].key) < 0); 170 168 171 169 return 0; 172 170 } ··· 197 195 198 196 /** 199 197 * ubifs_tnc_postorder_next - next TNC tree element in postorder traversal. 198 + * @c: UBIFS file-system description object 200 199 * @znode: previous znode 201 200 * 202 201 * This function implements postorder TNC traversal. The LNC is ignored. 203 202 * Returns the next element or %NULL if @znode is already the last one. 204 203 */ 205 - struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode) 204 + struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c, 205 + struct ubifs_znode *znode) 206 206 { 207 207 struct ubifs_znode *zn; 208 208 209 - ubifs_assert(znode); 209 + ubifs_assert(c, znode); 210 210 if (unlikely(!znode->parent)) 211 211 return NULL; 212 212 ··· 224 220 225 221 /** 226 222 * ubifs_destroy_tnc_subtree - destroy all znodes connected to a subtree. 223 + * @c: UBIFS file-system description object 227 224 * @znode: znode defining subtree to destroy 228 225 * 229 226 * This function destroys subtree of the TNC tree. Returns number of clean 230 227 * znodes in the subtree. 231 228 */ 232 - long ubifs_destroy_tnc_subtree(struct ubifs_znode *znode) 229 + long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, 230 + struct ubifs_znode *znode) 233 231 { 234 232 struct ubifs_znode *zn = ubifs_tnc_postorder_first(znode); 235 233 long clean_freed = 0; 236 234 int n; 237 235 238 - ubifs_assert(zn); 236 + ubifs_assert(c, zn); 239 237 while (1) { 240 238 for (n = 0; n < zn->child_cnt; n++) { 241 239 if (!zn->zbranch[n].znode) ··· 258 252 return clean_freed; 259 253 } 260 254 261 - zn = ubifs_tnc_postorder_next(zn); 255 + zn = ubifs_tnc_postorder_next(c, zn); 262 256 } 263 257 } 264 258 ··· 416 410 int err; 417 411 struct ubifs_znode *znode; 418 412 419 - ubifs_assert(!zbr->znode); 413 + ubifs_assert(c, !zbr->znode); 420 414 /* 421 415 * A slab cache is not presently used for znodes because the znode size 422 416 * depends on the fanout which is stored in the superblock. ··· 441 435 442 436 zbr->znode = znode; 443 437 znode->parent = parent; 444 - znode->time = get_seconds(); 438 + znode->time = ktime_get_seconds(); 445 439 znode->iip = iip; 446 440 447 441 return znode;
+32 -7
fs/ubifs/ubifs.h
··· 258 258 LEB_RETAINED, 259 259 }; 260 260 261 + /* 262 + * Action taken upon a failed ubifs_assert(). 263 + * @ASSACT_REPORT: just report the failed assertion 264 + * @ASSACT_RO: switch to read-only mode 265 + * @ASSACT_PANIC: call BUG() and possible panic the kernel 266 + */ 267 + enum { 268 + ASSACT_REPORT = 0, 269 + ASSACT_RO, 270 + ASSACT_PANIC, 271 + }; 272 + 261 273 /** 262 274 * struct ubifs_old_idx - index node obsoleted since last commit start. 263 275 * @rb: rb-tree node ··· 770 758 struct ubifs_znode *parent; 771 759 struct ubifs_znode *cnext; 772 760 unsigned long flags; 773 - unsigned long time; 761 + time64_t time; 774 762 int level; 775 763 int child_cnt; 776 764 int iip; ··· 1027 1015 * @bulk_read: enable bulk-reads 1028 1016 * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) 1029 1017 * @rw_incompat: the media is not R/W compatible 1018 + * @assert_action: action to take when a ubifs_assert() fails 1030 1019 * 1031 1020 * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and 1032 1021 * @calc_idx_sz ··· 1269 1256 unsigned int bulk_read:1; 1270 1257 unsigned int default_compr:2; 1271 1258 unsigned int rw_incompat:1; 1259 + unsigned int assert_action:2; 1272 1260 1273 1261 struct mutex tnc_mutex; 1274 1262 struct ubifs_zbranch zroot; ··· 1622 1608 int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu); 1623 1609 1624 1610 /* tnc_misc.c */ 1625 - struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr, 1611 + struct ubifs_znode *ubifs_tnc_levelorder_next(const struct ubifs_info *c, 1612 + struct ubifs_znode *zr, 1626 1613 struct ubifs_znode *znode); 1627 1614 int ubifs_search_zbranch(const struct ubifs_info *c, 1628 1615 const struct ubifs_znode *znode, 1629 1616 const union ubifs_key *key, int *n); 1630 1617 struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode); 1631 - struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode); 1632 - long ubifs_destroy_tnc_subtree(struct ubifs_znode *zr); 1618 + struct ubifs_znode *ubifs_tnc_postorder_next(const struct ubifs_info *c, 1619 + struct ubifs_znode *znode); 1620 + long ubifs_destroy_tnc_subtree(const struct ubifs_info *c, 1621 + struct ubifs_znode *zr); 1633 1622 struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c, 1634 1623 struct ubifs_zbranch *zbr, 1635 1624 struct ubifs_znode *parent, int iip); ··· 1715 1698 int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip); 1716 1699 void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty); 1717 1700 void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); 1718 - uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits); 1701 + uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, int nrbits); 1719 1702 struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); 1720 1703 /* Needed only in debugging code in lpt_commit.c */ 1721 1704 int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, ··· 1772 1755 size_t size, int flags, bool check_lock); 1773 1756 ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf, 1774 1757 size_t size); 1758 + 1759 + #ifdef CONFIG_UBIFS_FS_XATTR 1775 1760 void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum); 1761 + #else 1762 + static inline void ubifs_evict_xattr_inode(struct ubifs_info *c, 1763 + ino_t xattr_inum) { } 1764 + #endif 1776 1765 1777 1766 #ifdef CONFIG_UBIFS_FS_SECURITY 1778 1767 extern int ubifs_init_security(struct inode *dentry, struct inode *inode, ··· 1835 1812 unsigned int in_len, unsigned int *out_len, 1836 1813 int block) 1837 1814 { 1838 - ubifs_assert(0); 1815 + struct ubifs_info *c = inode->i_sb->s_fs_info; 1816 + ubifs_assert(c, 0); 1839 1817 return -EOPNOTSUPP; 1840 1818 } 1841 1819 static inline int ubifs_decrypt(const struct inode *inode, 1842 1820 struct ubifs_data_node *dn, 1843 1821 unsigned int *out_len, int block) 1844 1822 { 1845 - ubifs_assert(0); 1823 + struct ubifs_info *c = inode->i_sb->s_fs_info; 1824 + ubifs_assert(c, 0); 1846 1825 return -EOPNOTSUPP; 1847 1826 } 1848 1827 #else
+32 -8
fs/ubifs/xattr.c
··· 152 152 ui->data_len = size; 153 153 154 154 mutex_lock(&host_ui->ui_mutex); 155 + 156 + if (!host->i_nlink) { 157 + err = -ENOENT; 158 + goto out_noent; 159 + } 160 + 155 161 host->i_ctime = current_time(host); 156 162 host_ui->xattr_cnt += 1; 157 163 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); ··· 190 184 host_ui->xattr_size -= CALC_XATTR_BYTES(size); 191 185 host_ui->xattr_names -= fname_len(nm); 192 186 host_ui->flags &= ~UBIFS_CRYPT_FL; 187 + out_noent: 193 188 mutex_unlock(&host_ui->ui_mutex); 194 189 out_free: 195 190 make_bad_inode(inode); ··· 223 216 struct ubifs_budget_req req = { .dirtied_ino = 2, 224 217 .dirtied_ino_d = ALIGN(size, 8) + ALIGN(host_ui->data_len, 8) }; 225 218 226 - ubifs_assert(ui->data_len == inode->i_size); 219 + ubifs_assert(c, ui->data_len == inode->i_size); 227 220 err = ubifs_budget_space(c, &req); 228 221 if (err) 229 222 return err; ··· 242 235 mutex_unlock(&ui->ui_mutex); 243 236 244 237 mutex_lock(&host_ui->ui_mutex); 238 + 239 + if (!host->i_nlink) { 240 + err = -ENOENT; 241 + goto out_noent; 242 + } 243 + 245 244 host->i_ctime = current_time(host); 246 245 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); 247 246 host_ui->xattr_size += CALC_XATTR_BYTES(size); ··· 269 256 out_cancel: 270 257 host_ui->xattr_size -= CALC_XATTR_BYTES(size); 271 258 host_ui->xattr_size += CALC_XATTR_BYTES(old_size); 259 + out_noent: 272 260 mutex_unlock(&host_ui->ui_mutex); 273 261 make_bad_inode(inode); 274 262 out_free: ··· 305 291 int err; 306 292 307 293 if (check_lock) 308 - ubifs_assert(inode_is_locked(host)); 294 + ubifs_assert(c, inode_is_locked(host)); 309 295 310 296 if (size > UBIFS_MAX_INO_DATA) 311 297 return -ERANGE; ··· 388 374 } 389 375 390 376 ui = ubifs_inode(inode); 391 - ubifs_assert(inode->i_size == ui->data_len); 392 - ubifs_assert(ubifs_inode(host)->xattr_size > ui->data_len); 377 + ubifs_assert(c, inode->i_size == ui->data_len); 378 + ubifs_assert(c, ubifs_inode(host)->xattr_size > ui->data_len); 393 379 394 380 mutex_lock(&ui->ui_mutex); 395 381 if (buf) { ··· 476 462 return err; 477 463 } 478 464 479 - ubifs_assert(written <= size); 465 + ubifs_assert(c, written <= size); 480 466 return written; 481 467 } 482 468 ··· 489 475 struct ubifs_budget_req req = { .dirtied_ino = 2, .mod_dent = 1, 490 476 .dirtied_ino_d = ALIGN(host_ui->data_len, 8) }; 491 477 492 - ubifs_assert(ui->data_len == inode->i_size); 478 + ubifs_assert(c, ui->data_len == inode->i_size); 493 479 494 480 err = ubifs_budget_space(c, &req); 495 481 if (err) 496 482 return err; 497 483 498 484 mutex_lock(&host_ui->ui_mutex); 485 + 486 + if (!host->i_nlink) { 487 + err = -ENOENT; 488 + goto out_noent; 489 + } 490 + 499 491 host->i_ctime = current_time(host); 500 492 host_ui->xattr_cnt -= 1; 501 493 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); ··· 521 501 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); 522 502 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); 523 503 host_ui->xattr_names += fname_len(nm); 504 + out_noent: 524 505 mutex_unlock(&host_ui->ui_mutex); 525 506 ubifs_release_budget(c, &req); 526 507 make_bad_inode(inode); ··· 559 538 union ubifs_key key; 560 539 int err; 561 540 562 - ubifs_assert(inode_is_locked(host)); 541 + ubifs_assert(c, inode_is_locked(host)); 542 + 543 + if (!host->i_nlink) 544 + return -ENOENT; 563 545 564 546 if (fname_len(&nm) > UBIFS_MAX_NLEN) 565 547 return -ENAMETOOLONG; ··· 585 561 goto out_free; 586 562 } 587 563 588 - ubifs_assert(inode->i_nlink == 1); 564 + ubifs_assert(c, inode->i_nlink == 1); 589 565 clear_nlink(inode); 590 566 err = remove_xattr(c, host, inode, &nm); 591 567 if (err)
+16 -2
include/uapi/mtd/ubi-user.h
··· 285 285 __s8 padding[10]; 286 286 }; 287 287 288 + /* 289 + * UBI volume flags. 290 + * 291 + * @UBI_VOL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at 292 + * open time. Only valid for static volumes and 293 + * should only be used if the volume user has a 294 + * way to verify data integrity 295 + */ 296 + enum { 297 + UBI_VOL_SKIP_CRC_CHECK_FLG = 0x1, 298 + }; 299 + 300 + #define UBI_VOL_VALID_FLGS (UBI_VOL_SKIP_CRC_CHECK_FLG) 301 + 288 302 /** 289 303 * struct ubi_mkvol_req - volume description data structure used in 290 304 * volume creation requests. ··· 306 292 * @alignment: volume alignment 307 293 * @bytes: volume size in bytes 308 294 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 309 - * @padding1: reserved for future, not used, has to be zeroed 295 + * @flags: volume flags (%UBI_VOL_SKIP_CRC_CHECK_FLG) 310 296 * @name_len: volume name length 311 297 * @padding2: reserved for future, not used, has to be zeroed 312 298 * @name: volume name ··· 335 321 __s32 alignment; 336 322 __s64 bytes; 337 323 __s8 vol_type; 338 - __s8 padding1; 324 + __u8 flags; 339 325 __s16 name_len; 340 326 __s8 padding2[4]; 341 327 char name[UBI_MAX_VOLUME_NAME + 1];