Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ntfs3_for_6.4' of https://github.com/Paragon-Software-Group/linux-ntfs3

Pull ntfs3 updates from Konstantin Komarov:
"New code:

- add missed "nocase" in ntfs_show_options

- extend information on failures/errors

- small optimizations

Fixes:

- some logic errors

- some dead code was removed

- code is refactored and reformatted according to the new version of
clang-format

Code removal:

- 'noacsrules' option.

Currently, this option does not work properly, and its use leads to
unstable results. If we figure out how to implement it without
errors, we will add it later

- writepage"

* tag 'ntfs3_for_6.4' of https://github.com/Paragon-Software-Group/linux-ntfs3: (30 commits)
fs/ntfs3: Fix root inode checking
fs/ntfs3: Print details about mount fails
fs/ntfs3: Add missed "nocase" in ntfs_show_options
fs/ntfs3: Code formatting and refactoring
fs/ntfs3: Changed ntfs_get_acl() to use dentry
fs/ntfs3: Remove field sbi->used.bitmap.set_tail
fs/ntfs3: Undo critial modificatins to keep directory consistency
fs/ntfs3: Undo endian changes
fs/ntfs3: Optimization in ntfs_set_state()
fs/ntfs3: Fix ntfs_create_inode()
fs/ntfs3: Remove noacsrules
fs/ntfs3: Use bh_read to simplify code
fs/ntfs3: Fix a possible null-pointer dereference in ni_clear()
fs/ntfs3: Refactoring of various minor issues
fs/ntfs3: Restore overflow checking for attr size in mi_enum_attr
fs/ntfs3: Check for extremely large size of $AttrDef
fs/ntfs3: Improved checking of attribute's name length
fs/ntfs3: Add null pointer checks
fs/ntfs3: fix spelling mistake "attibute" -> "attribute"
fs/ntfs3: Add length check in indx_get_root
...

+538 -467
-11
Documentation/filesystems/ntfs3.rst
··· 61 61 directories, fmask applies only to files and dmask only to directories. 62 62 * - fmask= 63 63 64 - * - noacsrules 65 - - "No access rules" mount option sets access rights for files/folders to 66 - 777 and owner/group to root. This mount option absorbs all other 67 - permissions. 68 - 69 - - Permissions change for files/folders will be reported as successful, 70 - but they will remain 777. 71 - 72 - - Owner/group change will be reported as successful, butthey will stay 73 - as root. 74 - 75 64 * - nohidden 76 65 - Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN) attribute 77 66 will not be shown under Linux.
+8 -9
fs/ntfs3/attrib.c
··· 405 405 int err = 0; 406 406 struct ntfs_sb_info *sbi = ni->mi.sbi; 407 407 u8 cluster_bits = sbi->cluster_bits; 408 - bool is_mft = 409 - ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len; 408 + bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && 409 + !name_len; 410 410 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp; 411 411 struct ATTRIB *attr = NULL, *attr_b; 412 412 struct ATTR_LIST_ENTRY *le, *le_b; ··· 531 531 pre_alloc = 0; 532 532 if (type == ATTR_DATA && !name_len && 533 533 sbi->options->prealloc) { 534 - pre_alloc = 535 - bytes_to_cluster( 536 - sbi, 537 - get_pre_allocated(new_size)) - 538 - new_alen; 534 + pre_alloc = bytes_to_cluster( 535 + sbi, get_pre_allocated( 536 + new_size)) - 537 + new_alen; 539 538 } 540 539 541 540 /* Get the last LCN to allocate from. */ ··· 572 573 err = attr_allocate_clusters( 573 574 sbi, run, vcn, lcn, to_allocate, &pre_alloc, 574 575 is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen, 575 - is_mft ? 0 576 - : (sbi->record_size - 576 + is_mft ? 0 : 577 + (sbi->record_size - 577 578 le32_to_cpu(rec->used) + 8) / 578 579 3 + 579 580 1,
+13 -12
fs/ntfs3/bitmap.c
··· 40 40 41 41 int __init ntfs3_init_bitmap(void) 42 42 { 43 - ntfs_enode_cachep = 44 - kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0, 45 - SLAB_RECLAIM_ACCOUNT, NULL); 43 + ntfs_enode_cachep = kmem_cache_create("ntfs3_enode_cache", 44 + sizeof(struct e_node), 0, 45 + SLAB_RECLAIM_ACCOUNT, NULL); 46 46 return ntfs_enode_cachep ? 0 : -ENOMEM; 47 47 } 48 48 ··· 286 286 if (wnd->uptodated != 1) { 287 287 /* Check bits before 'bit'. */ 288 288 ib = wnd->zone_bit == wnd->zone_end || 289 - bit < wnd->zone_end 290 - ? 0 291 - : wnd->zone_end; 289 + bit < wnd->zone_end ? 290 + 0 : 291 + wnd->zone_end; 292 292 293 293 while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) { 294 294 bit -= 1; ··· 297 297 298 298 /* Check bits after 'end_in'. */ 299 299 ib = wnd->zone_bit == wnd->zone_end || 300 - end_in > wnd->zone_bit 301 - ? wnd->nbits 302 - : wnd->zone_bit; 300 + end_in > wnd->zone_bit ? 301 + wnd->nbits : 302 + wnd->zone_bit; 303 303 304 304 while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) { 305 305 end_in += 1; ··· 417 417 return; 418 418 n3 = rb_first(&wnd->count_tree); 419 419 wnd->extent_max = 420 - n3 ? rb_entry(n3, struct e_node, count.node)->count.key 421 - : 0; 420 + n3 ? rb_entry(n3, struct e_node, count.node)->count.key : 421 + 0; 422 422 return; 423 423 } 424 424 ··· 658 658 if (!wnd->bits_last) 659 659 wnd->bits_last = wbits; 660 660 661 - wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); 661 + wnd->free_bits = 662 + kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN); 662 663 if (!wnd->free_bits) 663 664 return -ENOMEM; 664 665
+18 -32
fs/ntfs3/file.c
··· 22 22 { 23 23 struct fstrim_range __user *user_range; 24 24 struct fstrim_range range; 25 + struct block_device *dev; 25 26 int err; 26 27 27 28 if (!capable(CAP_SYS_ADMIN)) 28 29 return -EPERM; 29 30 30 - if (!bdev_max_discard_sectors(sbi->sb->s_bdev)) 31 + dev = sbi->sb->s_bdev; 32 + if (!bdev_max_discard_sectors(dev)) 31 33 return -EOPNOTSUPP; 32 34 33 35 user_range = (struct fstrim_range __user *)arg; 34 36 if (copy_from_user(&range, user_range, sizeof(range))) 35 37 return -EFAULT; 36 38 37 - range.minlen = max_t(u32, range.minlen, 38 - bdev_discard_granularity(sbi->sb->s_bdev)); 39 + range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); 39 40 40 41 err = ntfs_trim_fs(sbi, &range); 41 42 if (err < 0) ··· 191 190 192 191 for (; idx < idx_end; idx += 1, from = 0) { 193 192 page_off = (loff_t)idx << PAGE_SHIFT; 194 - to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) 195 - : PAGE_SIZE; 193 + to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : 194 + PAGE_SIZE; 196 195 iblock = page_off >> inode->i_blkbits; 197 196 198 197 page = find_or_create_page(mapping, idx, ··· 224 223 set_buffer_uptodate(bh); 225 224 226 225 if (!buffer_uptodate(bh)) { 227 - lock_buffer(bh); 228 - bh->b_end_io = end_buffer_read_sync; 229 - get_bh(bh); 230 - submit_bh(REQ_OP_READ, bh); 231 - 232 - wait_on_buffer(bh); 233 - if (!buffer_uptodate(bh)) { 226 + err = bh_read(bh, 0); 227 + if (err < 0) { 234 228 unlock_page(page); 235 229 put_page(page); 236 - err = -EIO; 237 230 goto out; 238 231 } 239 232 } ··· 565 570 ni_unlock(ni); 566 571 } else { 567 572 /* Check new size. */ 573 + u8 cluster_bits = sbi->cluster_bits; 568 574 569 575 /* generic/213: expected -ENOSPC instead of -EFBIG. */ 570 576 if (!is_supported_holes) { 571 577 loff_t to_alloc = new_size - inode_get_bytes(inode); 572 578 573 579 if (to_alloc > 0 && 574 - (to_alloc >> sbi->cluster_bits) > 580 + (to_alloc >> cluster_bits) > 575 581 wnd_zeroes(&sbi->used.bitmap)) { 576 582 err = -ENOSPC; 577 583 goto out; ··· 593 597 } 594 598 595 599 if (is_supported_holes) { 596 - CLST vcn = vbo >> sbi->cluster_bits; 600 + CLST vcn = vbo >> cluster_bits; 597 601 CLST cend = bytes_to_cluster(sbi, end); 598 602 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); 599 603 CLST lcn, clen; ··· 656 660 int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry, 657 661 struct iattr *attr) 658 662 { 659 - struct super_block *sb = dentry->d_sb; 660 - struct ntfs_sb_info *sbi = sb->s_fs_info; 661 663 struct inode *inode = d_inode(dentry); 662 664 struct ntfs_inode *ni = ntfs_i(inode); 663 665 u32 ia_valid = attr->ia_valid; 664 666 umode_t mode = inode->i_mode; 665 667 int err; 666 - 667 - if (sbi->options->noacsrules) { 668 - /* "No access rules" - Force any changes of time etc. */ 669 - attr->ia_valid |= ATTR_FORCE; 670 - /* and disable for editing some attributes. */ 671 - attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE); 672 - ia_valid = attr->ia_valid; 673 - } 674 668 675 669 err = setattr_prepare(idmap, dentry, attr); 676 670 if (err) ··· 705 719 } 706 720 707 721 if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) 708 - ntfs_save_wsl_perm(inode); 722 + ntfs_save_wsl_perm(inode, NULL); 709 723 mark_inode_dirty(inode); 710 724 out: 711 725 return err; ··· 1051 1065 if (ret) 1052 1066 goto out; 1053 1067 1054 - ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) 1055 - : __generic_file_write_iter(iocb, from); 1068 + ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : 1069 + __generic_file_write_iter(iocb, from); 1056 1070 1057 1071 out: 1058 1072 inode_unlock(inode); ··· 1104 1118 int err = 0; 1105 1119 1106 1120 /* If we are last writer on the inode, drop the block reservation. */ 1107 - if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && 1108 - atomic_read(&inode->i_writecount) == 1)) { 1121 + if (sbi->options->prealloc && 1122 + ((file->f_mode & FMODE_WRITE) && 1123 + atomic_read(&inode->i_writecount) == 1)) { 1109 1124 ni_lock(ni); 1110 1125 down_write(&ni->file.run_lock); 1111 1126 ··· 1146 1159 .getattr = ntfs_getattr, 1147 1160 .setattr = ntfs3_setattr, 1148 1161 .listxattr = ntfs_listxattr, 1149 - .permission = ntfs_permission, 1150 - .get_inode_acl = ntfs_get_acl, 1162 + .get_acl = ntfs_get_acl, 1151 1163 .set_acl = ntfs_set_acl, 1152 1164 .fiemap = ntfs_fiemap, 1153 1165 };
+23 -23
fs/ntfs3/frecord.c
··· 76 76 const struct ATTRIB *attr; 77 77 78 78 attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); 79 - return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) 80 - : NULL; 79 + return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) : 80 + NULL; 81 81 } 82 82 83 83 /* ··· 91 91 92 92 attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); 93 93 94 - return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) 95 - : NULL; 94 + return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) : 95 + NULL; 96 96 } 97 97 98 98 /* ··· 102 102 { 103 103 struct rb_node *node; 104 104 105 - if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec)) 105 + if (!ni->vfs_inode.i_nlink && ni->mi.mrec && is_rec_inuse(ni->mi.mrec)) 106 106 ni_delete_all(ni); 107 107 108 108 al_destroy(ni); ··· 1439 1439 int err; 1440 1440 CLST plen; 1441 1441 struct ATTRIB *attr; 1442 - bool is_ext = 1443 - (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn; 1442 + bool is_ext = (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && 1443 + !svcn; 1444 1444 u32 name_size = ALIGN(name_len * sizeof(short), 8); 1445 1445 u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT; 1446 1446 u32 run_off = name_off + name_size; ··· 1645 1645 { 1646 1646 struct ATTRIB *attr = NULL; 1647 1647 struct ATTR_FILE_NAME *fname; 1648 - struct le_str *fns; 1648 + struct le_str *fns; 1649 1649 1650 1650 if (le) 1651 1651 *le = NULL; ··· 1756 1756 } 1757 1757 1758 1758 /* Resize nonresident empty attribute in-place only. */ 1759 - new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) 1760 - ? (SIZEOF_NONRESIDENT_EX + 8) 1761 - : (SIZEOF_NONRESIDENT + 8); 1759 + new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ? 1760 + (SIZEOF_NONRESIDENT_EX + 8) : 1761 + (SIZEOF_NONRESIDENT + 8); 1762 1762 1763 1763 if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size))) 1764 1764 return -EOPNOTSUPP; ··· 2965 2965 { 2966 2966 struct ntfs_sb_info *sbi = ni->mi.sbi; 2967 2967 struct ATTRIB *attr; 2968 - u16 de_key_size = de2 ? le16_to_cpu(de2->key_size) : 0; 2968 + u16 de_key_size; 2969 2969 2970 2970 switch (undo_step) { 2971 2971 case 4: 2972 + de_key_size = le16_to_cpu(de2->key_size); 2972 2973 if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, 2973 - &attr, NULL, NULL)) { 2974 + &attr, NULL, NULL)) 2974 2975 return false; 2975 - } 2976 2976 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de2 + 1, de_key_size); 2977 2977 2978 2978 mi_get_ref(&ni->mi, &de2->ref); ··· 2981 2981 de2->flags = 0; 2982 2982 de2->res = 0; 2983 2983 2984 - if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL, 2985 - 1)) { 2984 + if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL, 1)) 2986 2985 return false; 2987 - } 2988 2986 fallthrough; 2989 2987 2990 2988 case 2: 2991 2989 de_key_size = le16_to_cpu(de->key_size); 2992 2990 2993 2991 if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, 2994 - &attr, NULL, NULL)) { 2992 + &attr, NULL, NULL)) 2995 2993 return false; 2996 - } 2997 2994 2998 2995 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size); 2999 2996 mi_get_ref(&ni->mi, &de->ref); ··· 3159 3162 u64 data_size = le64_to_cpu(attr->nres.data_size); 3160 3163 __le64 valid_le; 3161 3164 3162 - dup->alloc_size = is_attr_ext(attr) 3163 - ? attr->nres.total_size 3164 - : attr->nres.alloc_size; 3165 + dup->alloc_size = is_attr_ext(attr) ? 3166 + attr->nres.total_size : 3167 + attr->nres.alloc_size; 3165 3168 dup->data_size = attr->nres.data_size; 3166 3169 3167 3170 if (new_valid > data_size) ··· 3254 3257 mark_inode_dirty_sync(inode); 3255 3258 return 0; 3256 3259 } 3260 + 3261 + if (!ni->mi.mrec) 3262 + goto out; 3257 3263 3258 3264 if (is_rec_inuse(ni->mi.mrec) && 3259 3265 !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) { ··· 3360 3360 ni_unlock(ni); 3361 3361 3362 3362 if (err) { 3363 - ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err); 3363 + ntfs_inode_err(inode, "%s failed, %d.", hint, err); 3364 3364 ntfs_set_state(sbi, NTFS_DIRTY_ERROR); 3365 3365 return err; 3366 3366 }
+44 -39
fs/ntfs3/fslog.c
··· 827 827 828 828 memcpy(rt + 1, tbl + 1, esize * used); 829 829 830 - rt->free_goal = free_goal == ~0u 831 - ? cpu_to_le32(~0u) 832 - : cpu_to_le32(sizeof(struct RESTART_TABLE) + 833 - free_goal * esize); 830 + rt->free_goal = free_goal == ~0u ? 831 + cpu_to_le32(~0u) : 832 + cpu_to_le32(sizeof(struct RESTART_TABLE) + 833 + free_goal * esize); 834 834 835 835 if (tbl->first_free) { 836 836 rt->first_free = tbl->first_free; ··· 1089 1089 (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0)) 1090 1090 << log->file_data_bits) + 1091 1091 ((((is_log_record_end(hdr) && 1092 - h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) 1093 - ? le16_to_cpu(hdr->record_hdr.next_record_off) 1094 - : log->page_size) + 1092 + h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) ? 1093 + le16_to_cpu(hdr->record_hdr.next_record_off) : 1094 + log->page_size) + 1095 1095 lsn) >> 1096 1096 3); 1097 1097 ··· 1298 1298 if (!log->clst_per_page) 1299 1299 log->clst_per_page = 1; 1300 1300 1301 - log->first_page = major_ver >= 2 1302 - ? 0x22 * page_size 1303 - : ((sys_page_size << 1) + (page_size << 1)); 1301 + log->first_page = major_ver >= 2 ? 1302 + 0x22 * page_size : 1303 + ((sys_page_size << 1) + (page_size << 1)); 1304 1304 log->major_ver = major_ver; 1305 1305 log->minor_ver = minor_ver; 1306 1306 } ··· 1512 1512 * have to compute the free range. 1513 1513 * If there is no oldest lsn then start at the first page of the file. 1514 1514 */ 1515 - oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) 1516 - ? log->first_page 1517 - : (log->oldest_lsn_off & ~log->sys_page_mask); 1515 + oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) ? 1516 + log->first_page : 1517 + (log->oldest_lsn_off & ~log->sys_page_mask); 1518 1518 1519 1519 /* 1520 1520 * We will use the next log page offset to compute the next free page. 1521 1521 * If we are going to reuse this page go to the next page. 1522 1522 * If we are at the first page then use the end of the file. 1523 1523 */ 1524 - next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) 1525 - ? log->next_page + log->page_size 1526 - : log->next_page == log->first_page 1527 - ? log->l_size 1528 - : log->next_page; 1524 + next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) ? 1525 + log->next_page + log->page_size : 1526 + log->next_page == log->first_page ? log->l_size : 1527 + log->next_page; 1529 1528 1530 1529 /* If the two offsets are the same then there is no available space. */ 1531 1530 if (oldest_off == next_free_off) ··· 1534 1535 * this range from the total available pages. 1535 1536 */ 1536 1537 free_bytes = 1537 - oldest_off < next_free_off 1538 - ? log->total_avail_pages - (next_free_off - oldest_off) 1539 - : oldest_off - next_free_off; 1538 + oldest_off < next_free_off ? 1539 + log->total_avail_pages - (next_free_off - oldest_off) : 1540 + oldest_off - next_free_off; 1540 1541 1541 1542 free_bytes >>= log->page_bits; 1542 1543 return free_bytes * log->reserved; ··· 1670 1671 } 1671 1672 1672 1673 best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0; 1673 - best_lsn2 = 1674 - second_tail ? base_lsn(log, second_tail, second_file_off) : 0; 1674 + best_lsn2 = second_tail ? base_lsn(log, second_tail, second_file_off) : 1675 + 0; 1675 1676 1676 1677 if (first_tail && second_tail) { 1677 1678 if (best_lsn1 > best_lsn2) { ··· 1766 1767 1767 1768 page_cnt = page_pos = 1; 1768 1769 1769 - curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) 1770 - : log->next_page; 1770 + curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) : 1771 + log->next_page; 1771 1772 1772 1773 wrapped_file = 1773 1774 curpage_off == log->first_page && ··· 1825 1826 le64_to_cpu(cur_page->record_hdr.last_end_lsn) && 1826 1827 ((lsn_cur >> log->file_data_bits) + 1827 1828 ((curpage_off < 1828 - (lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) 1829 - ? 1 1830 - : 0)) != expected_seq) { 1829 + (lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) ? 1830 + 1 : 1831 + 0)) != expected_seq) { 1831 1832 goto check_tail; 1832 1833 } 1833 1834 ··· 2574 2575 return find_log_rec(log, *lsn, lcb); 2575 2576 } 2576 2577 2577 - static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes) 2578 + bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes) 2578 2579 { 2579 2580 __le16 mask; 2580 2581 u32 min_de, de_off, used, total; ··· 2641 2642 { 2642 2643 bool ret; 2643 2644 const struct INDEX_ROOT *root = resident_data(attr); 2644 - u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size 2645 - ? sbi->cluster_bits 2646 - : SECTOR_SHIFT; 2645 + u8 index_bits = le32_to_cpu(root->index_block_size) >= 2646 + sbi->cluster_size ? 2647 + sbi->cluster_bits : 2648 + SECTOR_SHIFT; 2647 2649 u8 block_clst = root->index_block_clst; 2648 2650 2649 2651 if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) || ··· 3683 3683 3684 3684 if (a_dirty) { 3685 3685 attr = oa->attr; 3686 - err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0); 3686 + err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 3687 + 0); 3687 3688 if (err) 3688 3689 goto out; 3689 3690 } ··· 3769 3768 if (!log) 3770 3769 return -ENOMEM; 3771 3770 3772 - memset(&rst_info, 0, sizeof(struct restart_info)); 3773 - 3774 3771 log->ni = ni; 3775 3772 log->l_size = l_size; 3776 3773 log->one_page_buf = kmalloc(page_size, GFP_NOFS); 3774 + 3777 3775 if (!log->one_page_buf) { 3778 3776 err = -ENOMEM; 3779 3777 goto out; ··· 3783 3783 log->page_bits = blksize_bits(page_size); 3784 3784 3785 3785 /* Look for a restart area on the disk. */ 3786 + memset(&rst_info, 0, sizeof(struct restart_info)); 3786 3787 err = log_read_rst(log, l_size, true, &rst_info); 3787 3788 if (err) 3788 3789 goto out; ··· 3860 3859 log->init_ra = !!rst_info.vbo; 3861 3860 3862 3861 /* If we have a valid page then grab a pointer to the restart area. */ 3863 - ra2 = rst_info.valid_page 3864 - ? Add2Ptr(rst_info.r_page, 3865 - le16_to_cpu(rst_info.r_page->ra_off)) 3866 - : NULL; 3862 + ra2 = rst_info.valid_page ? 3863 + Add2Ptr(rst_info.r_page, 3864 + le16_to_cpu(rst_info.r_page->ra_off)) : 3865 + NULL; 3867 3866 3868 3867 if (rst_info.chkdsk_was_run || 3869 3868 (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) { ··· 4257 4256 rec_len -= t32; 4258 4257 4259 4258 attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS); 4259 + if (!attr_names) { 4260 + err = -ENOMEM; 4261 + goto out; 4262 + } 4260 4263 4261 4264 lcb_put(lcb); 4262 4265 lcb = NULL;
+47 -39
fs/ntfs3/fsntfs.c
··· 172 172 u16 sample, fo, fn; 173 173 174 174 fo = le16_to_cpu(rhdr->fix_off); 175 - fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) 176 - : le16_to_cpu(rhdr->fix_num); 175 + fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) : 176 + le16_to_cpu(rhdr->fix_num); 177 177 178 178 /* Check errors. */ 179 179 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || ··· 223 223 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND); 224 224 if (IS_ERR(inode)) { 225 225 err = PTR_ERR(inode); 226 - ntfs_err(sb, "Failed to load $Extend."); 226 + ntfs_err(sb, "Failed to load $Extend (%d).", err); 227 227 inode = NULL; 228 228 goto out; 229 229 } ··· 282 282 283 283 /* Check for 4GB. */ 284 284 if (ni->vfs_inode.i_size >= 0x100000000ull) { 285 - ntfs_err(sb, "\x24LogFile is too big"); 285 + ntfs_err(sb, "\x24LogFile is large than 4G."); 286 286 err = -EINVAL; 287 287 goto out; 288 288 } ··· 646 646 NULL, 0, NULL, NULL)) 647 647 goto next; 648 648 649 - __clear_bit_le(ir - MFT_REC_RESERVED, 649 + __clear_bit(ir - MFT_REC_RESERVED, 650 650 &sbi->mft.reserved_bitmap); 651 651 } 652 652 } 653 653 654 654 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */ 655 - zbit = find_next_zero_bit_le(&sbi->mft.reserved_bitmap, 655 + zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap, 656 656 MFT_REC_FREE, MFT_REC_RESERVED); 657 657 if (zbit >= MFT_REC_FREE) { 658 658 sbi->mft.next_reserved = MFT_REC_FREE; ··· 720 720 if (*rno >= MFT_REC_FREE) 721 721 wnd_set_used(wnd, *rno, 1); 722 722 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) 723 - __set_bit_le(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap); 723 + __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap); 724 724 725 725 out: 726 726 if (!mft) ··· 748 748 else 749 749 wnd_set_free(wnd, rno, 1); 750 750 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) { 751 - __clear_bit_le(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap); 751 + __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap); 752 752 } 753 753 754 754 if (rno < wnd_zone_bit(wnd)) ··· 846 846 { 847 847 int err; 848 848 struct super_block *sb = sbi->sb; 849 - u32 blocksize; 849 + u32 blocksize, bytes; 850 850 sector_t block1, block2; 851 - u32 bytes; 852 851 853 - if (!sb) 852 + /* 853 + * sb can be NULL here. In this case sbi->flags should be 0 too. 854 + */ 855 + if (!sb || !(sbi->flags & NTFS_FLAGS_MFTMIRR)) 854 856 return; 855 857 856 858 blocksize = sb->s_blocksize; 857 - 858 - if (!(sbi->flags & NTFS_FLAGS_MFTMIRR)) 859 - return; 860 - 861 859 bytes = sbi->mft.recs_mirr << sbi->record_bits; 862 860 block1 = sbi->mft.lbo >> sb->s_blocksize_bits; 863 861 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits; ··· 923 925 struct VOLUME_INFO *info; 924 926 struct mft_inode *mi; 925 927 struct ntfs_inode *ni; 928 + __le16 info_flags; 926 929 927 930 /* 928 931 * Do not change state if fs was real_dirty. ··· 956 957 goto out; 957 958 } 958 959 960 + info_flags = info->flags; 961 + 959 962 switch (dirty) { 960 963 case NTFS_DIRTY_ERROR: 961 964 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors"); ··· 971 970 break; 972 971 } 973 972 /* Cache current volume flags. */ 974 - sbi->volume.flags = info->flags; 975 - mi->dirty = true; 973 + if (info_flags != info->flags) { 974 + sbi->volume.flags = info->flags; 975 + mi->dirty = true; 976 + } 976 977 err = 0; 977 978 978 979 out: ··· 1686 1683 1687 1684 out: 1688 1685 if (err) { 1686 + make_bad_inode(inode); 1689 1687 iput(inode); 1690 1688 ni = ERR_PTR(err); 1691 1689 } ··· 1863 1859 inode = ntfs_iget5(sb, &ref, &NAME_SECURE); 1864 1860 if (IS_ERR(inode)) { 1865 1861 err = PTR_ERR(inode); 1866 - ntfs_err(sb, "Failed to load $Secure."); 1862 + ntfs_err(sb, "Failed to load $Secure (%d).", err); 1867 1863 inode = NULL; 1868 1864 goto out; 1869 1865 } ··· 1874 1870 1875 1871 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME, 1876 1872 ARRAY_SIZE(SDH_NAME), NULL, NULL); 1877 - if (!attr) { 1878 - err = -EINVAL; 1879 - goto out; 1880 - } 1881 - 1882 - root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); 1883 - if (root_sdh->type != ATTR_ZERO || 1873 + if (!attr || 1874 + !(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) || 1875 + root_sdh->type != ATTR_ZERO || 1884 1876 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH || 1885 - offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) { 1877 + offsetof(struct INDEX_ROOT, ihdr) + 1878 + le32_to_cpu(root_sdh->ihdr.used) > 1879 + le32_to_cpu(attr->res.data_size)) { 1880 + ntfs_err(sb, "$Secure::$SDH is corrupted."); 1886 1881 err = -EINVAL; 1887 1882 goto out; 1888 1883 } 1889 1884 1890 1885 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH); 1891 - if (err) 1892 - goto out; 1893 - 1894 - attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME, 1895 - ARRAY_SIZE(SII_NAME), NULL, NULL); 1896 - if (!attr) { 1897 - err = -EINVAL; 1886 + if (err) { 1887 + ntfs_err(sb, "Failed to initialize $Secure::$SDH (%d).", err); 1898 1888 goto out; 1899 1889 } 1900 1890 1901 - root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); 1902 - if (root_sii->type != ATTR_ZERO || 1891 + attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME, 1892 + ARRAY_SIZE(SII_NAME), NULL, NULL); 1893 + if (!attr || 1894 + !(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) || 1895 + root_sii->type != ATTR_ZERO || 1903 1896 root_sii->rule != NTFS_COLLATION_TYPE_UINT || 1904 - offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) { 1897 + offsetof(struct INDEX_ROOT, ihdr) + 1898 + le32_to_cpu(root_sii->ihdr.used) > 1899 + le32_to_cpu(attr->res.data_size)) { 1900 + ntfs_err(sb, "$Secure::$SII is corrupted."); 1905 1901 err = -EINVAL; 1906 1902 goto out; 1907 1903 } 1908 1904 1909 1905 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII); 1910 - if (err) 1906 + if (err) { 1907 + ntfs_err(sb, "Failed to initialize $Secure::$SII (%d).", err); 1911 1908 goto out; 1909 + } 1912 1910 1913 1911 fnd_sii = fnd_get(); 1914 1912 if (!fnd_sii) { ··· 2600 2594 if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) { 2601 2595 port_digit = le16_to_cpu(name[3]); 2602 2596 if (port_digit >= '1' && port_digit <= '9') 2603 - if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) || 2604 - !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false)) 2597 + if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, 2598 + false) || 2599 + !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, 2600 + false)) 2605 2601 return true; 2606 2602 } 2607 2603
+56 -25
fs/ntfs3/index.c
··· 431 431 if (vbo + blocksize > data_size) 432 432 nbits = 8 * (data_size - vbo); 433 433 434 - ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret) 435 - : false; 434 + ok = nbits > from ? 435 + (*fn)((ulong *)bh->b_data, from, nbits, ret) : 436 + false; 436 437 put_bh(bh); 437 438 438 439 if (ok) { ··· 726 725 u32 e_size, e_key_len; 727 726 u32 end = le32_to_cpu(hdr->used); 728 727 u32 off = le32_to_cpu(hdr->de_off); 728 + u32 total = le32_to_cpu(hdr->total); 729 729 u16 offs[128]; 730 730 731 731 fill_table: 732 + if (end > total) 733 + return NULL; 734 + 732 735 if (off + sizeof(struct NTFS_DE) > end) 733 736 return NULL; 734 737 ··· 765 760 return NULL; 766 761 767 762 max_idx = 0; 768 - table_size = min(table_size * 2, 769 - (int)ARRAY_SIZE(offs)); 763 + table_size = min(table_size * 2, (int)ARRAY_SIZE(offs)); 770 764 goto fill_table; 771 765 } 772 766 } else if (diff2 < 0) { ··· 847 843 u16 esize = le16_to_cpu(re->size); 848 844 u32 off = PtrOffset(hdr, re); 849 845 int bytes = used - (off + esize); 846 + 847 + /* check INDEX_HDR valid before using INDEX_HDR */ 848 + if (!check_index_header(hdr, le32_to_cpu(hdr->total))) 849 + return NULL; 850 850 851 851 if (off >= used || esize < sizeof(struct NTFS_DE) || 852 852 bytes < sizeof(struct NTFS_DE)) ··· 994 986 struct ATTR_LIST_ENTRY *le = NULL; 995 987 struct ATTRIB *a; 996 988 const struct INDEX_NAMES *in = &s_index_names[indx->type]; 989 + struct INDEX_ROOT *root; 997 990 998 991 a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL, 999 992 mi); ··· 1004 995 if (attr) 1005 996 *attr = a; 1006 997 1007 - return resident_data_ex(a, sizeof(struct INDEX_ROOT)); 998 + root = resident_data_ex(a, sizeof(struct INDEX_ROOT)); 999 + 1000 + /* length check */ 1001 + if (root && 1002 + offsetof(struct INDEX_ROOT, ihdr) + le32_to_cpu(root->ihdr.used) > 1003 + le32_to_cpu(a->res.data_size)) { 1004 + return NULL; 1005 + } 1006 + 1007 + return root; 1008 1008 } 1009 1009 1010 1010 static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni, ··· 1103 1085 } 1104 1086 1105 1087 /* check for index header length */ 1106 - if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) { 1088 + if (offsetof(struct INDEX_BUFFER, ihdr) + le32_to_cpu(ib->ihdr.used) > 1089 + bytes) { 1107 1090 err = -EINVAL; 1108 1091 goto out; 1109 1092 } ··· 1170 1151 1171 1152 /* Read next level. */ 1172 1153 err = indx_read(indx, ni, de_get_vbn(e), &node); 1173 - if (err) 1154 + if (err) { 1155 + /* io error? */ 1174 1156 return err; 1157 + } 1175 1158 1176 1159 /* Lookup entry that is <= to the search value. */ 1177 1160 e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx, ··· 1675 1654 mi->dirty = true; 1676 1655 1677 1656 /* Create alloc and bitmap attributes (if not). */ 1678 - err = run_is_empty(&indx->alloc_run) 1679 - ? indx_create_allocate(indx, ni, &new_vbn) 1680 - : indx_add_allocate(indx, ni, &new_vbn); 1657 + err = run_is_empty(&indx->alloc_run) ? 1658 + indx_create_allocate(indx, ni, &new_vbn) : 1659 + indx_add_allocate(indx, ni, &new_vbn); 1681 1660 1682 1661 /* Layout of record may be changed, so rescan root. */ 1683 1662 root = indx_get_root(indx, ni, &attr, &mi); ··· 1780 1759 struct indx_node *n1 = fnd->nodes[level]; 1781 1760 struct INDEX_HDR *hdr1 = &n1->index->ihdr; 1782 1761 struct INDEX_HDR *hdr2; 1783 - u32 to_copy, used; 1762 + u32 to_copy, used, used1; 1784 1763 CLST new_vbn; 1785 1764 __le64 t_vbn, *sub_vbn; 1786 1765 u16 sp_size; 1766 + void *hdr1_saved = NULL; 1787 1767 1788 1768 /* Try the most easy case. */ 1789 1769 e = fnd->level - 1 == level ? fnd->de[level] : NULL; ··· 1816 1794 if (!up_e) 1817 1795 return -ENOMEM; 1818 1796 memcpy(up_e, sp, sp_size); 1797 + 1798 + used1 = le32_to_cpu(hdr1->used); 1799 + hdr1_saved = kmemdup(hdr1, used1, GFP_NOFS); 1800 + if (!hdr1_saved) { 1801 + err = -ENOMEM; 1802 + goto out; 1803 + } 1819 1804 1820 1805 if (!hdr1->flags) { 1821 1806 up_e->flags |= NTFS_IE_HAS_SUBNODES; ··· 1856 1827 hdr_insert_head(hdr2, de_t, to_copy); 1857 1828 1858 1829 /* Remove all entries (sp including) from hdr1. */ 1859 - used = le32_to_cpu(hdr1->used) - to_copy - sp_size; 1830 + used = used1 - to_copy - sp_size; 1860 1831 memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off)); 1861 1832 hdr1->used = cpu_to_le32(used); 1862 1833 ··· 1867 1838 hdr_insert_de(indx, 1868 1839 (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size), 1869 1840 up_e + 1, le16_to_cpu(up_e->key_size), 1870 - ctx) < 0 1871 - ? hdr2 1872 - : hdr1, 1841 + ctx) < 0 ? 1842 + hdr2 : 1843 + hdr1, 1873 1844 new_de, NULL, ctx); 1874 1845 1875 1846 indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits); ··· 1886 1857 if (!level) { 1887 1858 /* Insert in root. */ 1888 1859 err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd, 0); 1889 - if (err) 1890 - goto out; 1891 1860 } else { 1892 1861 /* 1893 1862 * The target buffer's parent is another index buffer. ··· 1893 1866 */ 1894 1867 err = indx_insert_into_buffer(indx, ni, root, up_e, ctx, 1895 1868 level - 1, fnd); 1896 - if (err) 1897 - goto out; 1869 + } 1870 + 1871 + if (err) { 1872 + /* 1873 + * Undo critical operations. 1874 + */ 1875 + indx_mark_free(indx, ni, new_vbn >> indx->idx2vbn_bits); 1876 + memcpy(hdr1, hdr1_saved, used1); 1877 + indx_write(indx, ni, n1, 0); 1898 1878 } 1899 1879 1900 1880 out: 1901 1881 kfree(up_e); 1882 + kfree(hdr1_saved); 1902 1883 1903 1884 return err; 1904 1885 } ··· 1965 1930 */ 1966 1931 err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx, 1967 1932 fnd, undo); 1968 - if (err) 1969 - goto out; 1970 1933 } else { 1971 1934 /* 1972 1935 * Found a leaf buffer, so we'll insert the new entry into it. 1973 1936 */ 1974 1937 err = indx_insert_into_buffer(indx, ni, root, new_de, ctx, 1975 1938 fnd->level - 1, fnd); 1976 - if (err) 1977 - goto out; 1978 1939 } 1979 1940 1980 1941 out: ··· 2339 2308 err = level ? indx_insert_into_buffer(indx, ni, root, 2340 2309 re, ctx, 2341 2310 fnd->level - 1, 2342 - fnd) 2343 - : indx_insert_into_root(indx, ni, re, e, 2311 + fnd) : 2312 + indx_insert_into_root(indx, ni, re, e, 2344 2313 ctx, fnd, 0); 2345 2314 kfree(re); 2346 2315
+75 -59
fs/ntfs3/inode.c
··· 100 100 /* Record should contain $I30 root. */ 101 101 is_dir = rec->flags & RECORD_FLAG_DIR; 102 102 103 + /* MFT_REC_MFT is not a dir */ 104 + if (is_dir && ino == MFT_REC_MFT) { 105 + err = -EINVAL; 106 + goto out; 107 + } 108 + 103 109 inode->i_generation = le16_to_cpu(rec->seq); 104 110 105 111 /* Enumerate all struct Attributes MFT. */ ··· 137 131 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size); 138 132 asize = le32_to_cpu(attr->size); 139 133 140 - if (le16_to_cpu(attr->name_off) + attr->name_len > asize) 134 + /* 135 + * Really this check was done in 'ni_enum_attr_ex' -> ... 'mi_enum_attr'. 136 + * There not critical to check this case again 137 + */ 138 + if (attr->name_len && 139 + sizeof(short) * attr->name_len + le16_to_cpu(attr->name_off) > 140 + asize) 141 141 goto out; 142 142 143 143 if (attr->non_res) { ··· 262 250 if (!attr->nres.alloc_size) 263 251 goto next_attr; 264 252 265 - run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run 266 - : &ni->file.run; 253 + run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run : 254 + &ni->file.run; 267 255 break; 268 256 269 257 case ATTR_ROOT: ··· 271 259 goto out; 272 260 273 261 root = Add2Ptr(attr, roff); 274 - is_root = true; 275 262 276 263 if (attr->name_len != ARRAY_SIZE(I30_NAME) || 277 264 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) ··· 283 272 if (!is_dir) 284 273 goto next_attr; 285 274 275 + is_root = true; 286 276 ni->ni_flags |= NI_FLAG_DIR; 287 277 288 278 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30); 289 279 if (err) 290 280 goto out; 291 281 292 - mode = sb->s_root 293 - ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) 294 - : (S_IFDIR | 0777); 282 + mode = sb->s_root ? 283 + (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) : 284 + (S_IFDIR | 0777); 295 285 goto next_attr; 296 286 297 287 case ATTR_ALLOC: ··· 449 437 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; 450 438 inode->i_op = &ntfs_file_inode_operations; 451 439 inode->i_fop = &ntfs_file_operations; 452 - inode->i_mapping->a_ops = 453 - is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; 440 + inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : 441 + &ntfs_aops; 454 442 if (ino != MFT_REC_MFT) 455 443 init_rwsem(&ni->file.run_lock); 456 444 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) || ··· 648 636 bh->b_size = block_size; 649 637 off = vbo & (PAGE_SIZE - 1); 650 638 set_bh_page(bh, page, off); 639 + 651 640 err = bh_read(bh, 0); 652 641 if (err < 0) 653 642 goto out; ··· 786 773 } 787 774 788 775 ret = blockdev_direct_IO(iocb, inode, iter, 789 - wr ? ntfs_get_block_direct_IO_W 790 - : ntfs_get_block_direct_IO_R); 776 + wr ? ntfs_get_block_direct_IO_W : 777 + ntfs_get_block_direct_IO_R); 791 778 792 779 if (ret > 0) 793 780 end = vbo + ret; ··· 846 833 } 847 834 848 835 static int ntfs_resident_writepage(struct folio *folio, 849 - struct writeback_control *wbc, void *data) 836 + struct writeback_control *wbc, void *data) 850 837 { 851 838 struct address_space *mapping = data; 852 839 struct ntfs_inode *ni = ntfs_i(mapping->host); ··· 887 874 888 875 *pagep = NULL; 889 876 if (is_resident(ni)) { 890 - struct page *page = grab_cache_page_write_begin( 891 - mapping, pos >> PAGE_SHIFT); 877 + struct page *page = 878 + grab_cache_page_write_begin(mapping, pos >> PAGE_SHIFT); 892 879 893 880 if (!page) { 894 881 err = -ENOMEM; ··· 920 907 /* 921 908 * ntfs_write_end - Address_space_operations::write_end. 922 909 */ 923 - int ntfs_write_end(struct file *file, struct address_space *mapping, 924 - loff_t pos, u32 len, u32 copied, struct page *page, 925 - void *fsdata) 910 + int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, 911 + u32 len, u32 copied, struct page *page, void *fsdata) 926 912 { 927 913 struct inode *inode = mapping->host; 928 914 struct ntfs_inode *ni = ntfs_i(inode); ··· 1319 1307 inode_init_owner(idmap, inode, dir, mode); 1320 1308 mode = inode->i_mode; 1321 1309 1322 - inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime = 1323 - current_time(inode); 1310 + ni->i_crtime = current_time(inode); 1324 1311 1325 1312 rec = ni->mi.mrec; 1326 1313 rec->hard_links = cpu_to_le16(1); ··· 1360 1349 attr->res.data_size = cpu_to_le32(dsize); 1361 1350 1362 1351 std5->cr_time = std5->m_time = std5->c_time = std5->a_time = 1363 - kernel2nt(&inode->i_atime); 1352 + kernel2nt(&ni->i_crtime); 1364 1353 1365 - ni->std_fa = fa; 1366 - std5->fa = fa; 1354 + std5->fa = ni->std_fa = fa; 1367 1355 1368 1356 attr = Add2Ptr(attr, asize); 1369 1357 ··· 1561 1551 } 1562 1552 1563 1553 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8); 1554 + /* Write non resident data. */ 1555 + err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, 1556 + nsize, 0); 1557 + if (err) 1558 + goto out5; 1564 1559 } else { 1565 1560 attr->res.data_off = SIZEOF_RESIDENT_LE; 1566 1561 attr->res.data_size = cpu_to_le32(nsize); 1567 1562 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize); 1568 - nsize = 0; 1569 1563 } 1570 1564 /* Size of symlink equals the length of input string. */ 1571 1565 inode->i_size = size; ··· 1590 1576 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8); 1591 1577 rec->next_attr_id = cpu_to_le16(aid); 1592 1578 1593 - /* Step 2: Add new name in index. */ 1594 - err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0); 1595 - if (err) 1596 - goto out6; 1597 - 1598 - /* Unlock parent directory before ntfs_init_acl. */ 1599 - if (!fnd) 1600 - ni_unlock(dir_ni); 1601 - 1602 1579 inode->i_generation = le16_to_cpu(rec->seq); 1603 - 1604 - dir->i_mtime = dir->i_ctime = inode->i_atime; 1605 1580 1606 1581 if (S_ISDIR(mode)) { 1607 1582 inode->i_op = &ntfs_dir_inode_operations; ··· 1604 1601 } else if (S_ISREG(mode)) { 1605 1602 inode->i_op = &ntfs_file_inode_operations; 1606 1603 inode->i_fop = &ntfs_file_operations; 1607 - inode->i_mapping->a_ops = 1608 - is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; 1604 + inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : 1605 + &ntfs_aops; 1609 1606 init_rwsem(&ni->file.run_lock); 1610 1607 } else { 1611 1608 inode->i_op = &ntfs_special_inode_operations; ··· 1616 1613 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) { 1617 1614 err = ntfs_init_acl(idmap, inode, dir); 1618 1615 if (err) 1619 - goto out7; 1616 + goto out5; 1620 1617 } else 1621 1618 #endif 1622 1619 { 1623 1620 inode->i_flags |= S_NOSEC; 1624 1621 } 1625 1622 1626 - /* Write non resident data. */ 1627 - if (nsize) { 1628 - err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0); 1629 - if (err) 1630 - goto out7; 1623 + /* 1624 + * ntfs_init_acl and ntfs_save_wsl_perm update extended attribute. 1625 + * The packed size of extended attribute is stored in direntry too. 1626 + * 'fname' here points to inside new_de. 1627 + */ 1628 + ntfs_save_wsl_perm(inode, &fname->dup.ea_size); 1629 + 1630 + /* 1631 + * update ea_size in file_name attribute too. 1632 + * Use ni_find_attr cause layout of MFT record may be changed 1633 + * in ntfs_init_acl and ntfs_save_wsl_perm. 1634 + */ 1635 + attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL); 1636 + if (attr) { 1637 + struct ATTR_FILE_NAME *fn; 1638 + 1639 + fn = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); 1640 + if (fn) 1641 + fn->dup.ea_size = fname->dup.ea_size; 1631 1642 } 1643 + 1644 + /* We do not need to update parent directory later */ 1645 + ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT; 1646 + 1647 + /* Step 2: Add new name in index. */ 1648 + err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0); 1649 + if (err) 1650 + goto out6; 1632 1651 1633 1652 /* 1634 1653 * Call 'd_instantiate' after inode->i_op is set ··· 1658 1633 */ 1659 1634 d_instantiate(dentry, inode); 1660 1635 1661 - ntfs_save_wsl_perm(inode); 1636 + /* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */ 1637 + inode->i_atime = inode->i_mtime = inode->i_ctime = dir->i_mtime = 1638 + dir->i_ctime = ni->i_crtime; 1639 + 1662 1640 mark_inode_dirty(dir); 1663 1641 mark_inode_dirty(inode); 1664 1642 1665 1643 /* Normal exit. */ 1666 1644 goto out2; 1667 1645 1668 - out7: 1669 - 1670 - /* Undo 'indx_insert_entry'. */ 1671 - if (!fnd) 1672 - ni_lock_dir(dir_ni); 1673 - indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1, 1674 - le16_to_cpu(new_de->key_size), sbi); 1675 - /* ni_unlock(dir_ni); will be called later. */ 1676 1646 out6: 1677 1647 if (rp_inserted) 1678 1648 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); ··· 1689 1669 kfree(rp); 1690 1670 1691 1671 out1: 1692 - if (err) { 1693 - if (!fnd) 1694 - ni_unlock(dir_ni); 1672 + if (!fnd) 1673 + ni_unlock(dir_ni); 1674 + 1675 + if (err) 1695 1676 return ERR_PTR(err); 1696 - } 1697 1677 1698 1678 unlock_new_inode(inode); 1699 1679 ··· 1789 1769 void ntfs_evict_inode(struct inode *inode) 1790 1770 { 1791 1771 truncate_inode_pages_final(&inode->i_data); 1792 - 1793 - if (inode->i_nlink) 1794 - _ni_write_inode(inode, inode_needs_sync(inode)); 1795 1772 1796 1773 invalidate_inode_buffers(inode); 1797 1774 clear_inode(inode); ··· 2074 2057 .get_link = ntfs_get_link, 2075 2058 .setattr = ntfs3_setattr, 2076 2059 .listxattr = ntfs_listxattr, 2077 - .permission = ntfs_permission, 2078 2060 }; 2079 2061 2080 2062 const struct address_space_operations ntfs_aops = {
+5 -5
fs/ntfs3/lznt.c
··· 296 296 */ 297 297 struct lznt *get_lznt_ctx(int level) 298 298 { 299 - struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) 300 - : sizeof(struct lznt), 299 + struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) : 300 + sizeof(struct lznt), 301 301 GFP_NOFS); 302 302 303 303 if (r) ··· 392 392 unc_use = err; 393 393 } else { 394 394 /* This chunk does not contain compressed data. */ 395 - unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end 396 - ? unc_end - unc_chunk 397 - : LZNT_CHUNK_SIZE; 395 + unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end ? 396 + unc_end - unc_chunk : 397 + LZNT_CHUNK_SIZE; 398 398 399 399 if (cmpr_chunk + sizeof(chunk_hdr) + unc_use > 400 400 cmpr_end) {
+14 -5
fs/ntfs3/namei.c
··· 88 88 __putname(uni); 89 89 } 90 90 91 + /* 92 + * Check for a null pointer 93 + * If the MFT record of ntfs inode is not a base record, inode->i_op can be NULL. 94 + * This causes null pointer dereference in d_splice_alias(). 95 + */ 96 + if (!IS_ERR_OR_NULL(inode) && !inode->i_op) { 97 + iput(inode); 98 + inode = ERR_PTR(-EINVAL); 99 + } 100 + 91 101 return d_splice_alias(inode, dentry); 92 102 } 93 103 ··· 433 423 434 424 inode = ntfs_create_inode(&nop_mnt_idmap, dir, dentry, uni, mode, 0, 435 425 NULL, 0, fnd); 436 - err = IS_ERR(inode) ? PTR_ERR(inode) 437 - : finish_open(file, dentry, ntfs_file_open); 426 + err = IS_ERR(inode) ? PTR_ERR(inode) : 427 + finish_open(file, dentry, ntfs_file_open); 438 428 dput(d); 439 429 440 430 out2: ··· 607 597 .rmdir = ntfs_rmdir, 608 598 .mknod = ntfs_mknod, 609 599 .rename = ntfs_rename, 610 - .permission = ntfs_permission, 611 - .get_inode_acl = ntfs_get_acl, 600 + .get_acl = ntfs_get_acl, 612 601 .set_acl = ntfs_set_acl, 613 602 .setattr = ntfs3_setattr, 614 603 .getattr = ntfs_getattr, ··· 620 611 .setattr = ntfs3_setattr, 621 612 .getattr = ntfs_getattr, 622 613 .listxattr = ntfs_listxattr, 623 - .get_inode_acl = ntfs_get_acl, 614 + .get_acl = ntfs_get_acl, 624 615 .set_acl = ntfs_set_acl, 625 616 }; 626 617
-3
fs/ntfs3/ntfs.h
··· 435 435 return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0; 436 436 } 437 437 438 - /* The size of resident attribute by its resident size. */ 439 - #define BYTES_PER_RESIDENT(b) (0x18 + (b)) 440 - 441 438 static_assert(sizeof(struct ATTRIB) == 0x48); 442 439 static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08); 443 440 static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
+8 -11
fs/ntfs3/ntfs_fs.h
··· 100 100 unsigned hide_dot_files : 1; /* Set hidden flag on dot files. */ 101 101 unsigned windows_names : 1; /* Disallow names forbidden by Windows. */ 102 102 unsigned force : 1; /* RW mount dirty volume. */ 103 - unsigned noacsrules : 1; /* Exclude acs rules. */ 104 103 unsigned prealloc : 1; /* Preallocate space when file is growing. */ 105 104 unsigned nocase : 1; /* case insensitive. */ 106 105 }; ··· 163 164 size_t zone_bit; 164 165 size_t zone_end; 165 166 166 - bool set_tail; // Not necessary in driver. 167 167 bool inited; 168 168 }; 169 169 ··· 338 340 }; 339 341 340 342 /* 341 - * sturct ntfs_inode 343 + * struct ntfs_inode 342 344 * 343 345 * Ntfs inode - extends linux inode. consists of one or more MFT inodes. 344 346 */ ··· 579 581 bool ni_is_dirty(struct inode *inode); 580 582 581 583 /* Globals from fslog.c */ 584 + bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes); 582 585 int log_replay(struct ntfs_inode *ni, bool *initialized); 583 586 584 587 /* Globals from fsntfs.c */ ··· 699 700 struct buffer_head *bh_result, int create); 700 701 int ntfs_write_begin(struct file *file, struct address_space *mapping, 701 702 loff_t pos, u32 len, struct page **pagep, void **fsdata); 702 - int ntfs_write_end(struct file *file, struct address_space *mapping, 703 - loff_t pos, u32 len, u32 copied, struct page *page, 704 - void *fsdata); 703 + int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, 704 + u32 len, u32 copied, struct page *page, void *fsdata); 705 705 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc); 706 706 int ntfs_sync_inode(struct inode *inode); 707 707 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1, ··· 856 858 857 859 /* globals from xattr.c */ 858 860 #ifdef CONFIG_NTFS3_FS_POSIX_ACL 859 - struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu); 861 + struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, 862 + struct dentry *dentry, int type); 860 863 int ntfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, 861 864 struct posix_acl *acl, int type); 862 865 int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode, 863 - struct inode *dir); 866 + struct inode *dir); 864 867 #else 865 868 #define ntfs_get_acl NULL 866 869 #define ntfs_set_acl NULL 867 870 #endif 868 871 869 872 int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry); 870 - int ntfs_permission(struct mnt_idmap *idmap, struct inode *inode, 871 - int mask); 872 873 ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 873 874 extern const struct xattr_handler *ntfs_xattr_handlers[]; 874 875 875 - int ntfs_save_wsl_perm(struct inode *inode); 876 + int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size); 876 877 void ntfs_get_wsl_perm(struct inode *inode); 877 878 878 879 /* globals from lznt.c */
+8 -7
fs/ntfs3/record.c
··· 221 221 } 222 222 223 223 if (off + asize < off) { 224 - /* overflow check */ 224 + /* Overflow check. */ 225 225 return NULL; 226 226 } 227 227 ··· 247 247 if ((t32 & 0xf) || (t32 > 0x100)) 248 248 return NULL; 249 249 250 - /* Check boundary. */ 251 - if (off + asize > used) 250 + /* Check overflow and boundary. */ 251 + if (off + asize < off || off + asize > used) 252 252 return NULL; 253 253 254 254 /* Check size of attribute. */ ··· 419 419 struct ntfs_sb_info *sbi = mi->sbi; 420 420 u32 used = le32_to_cpu(rec->used); 421 421 const u16 *upcase = sbi->upcase; 422 - int diff; 423 422 424 423 /* Can we insert mi attribute? */ 425 - if (used + asize > mi->sbi->record_size) 424 + if (used + asize > sbi->record_size) 426 425 return NULL; 427 426 428 427 /* ··· 430 431 */ 431 432 attr = NULL; 432 433 while ((attr = mi_enum_attr(mi, attr))) { 433 - diff = compare_attr(attr, type, name, name_len, upcase); 434 + int diff = compare_attr(attr, type, name, name_len, upcase); 434 435 435 436 if (diff < 0) 436 437 continue; ··· 441 442 } 442 443 443 444 if (!attr) { 444 - tail = 8; /* Not used, just to suppress warning. */ 445 + /* Append. */ 446 + tail = 8; 445 447 attr = Add2Ptr(rec, used - 8); 446 448 } else { 449 + /* Insert before 'attr'. */ 447 450 tail = used - PtrOffset(rec, attr); 448 451 } 449 452
+3 -3
fs/ntfs3/run.c
··· 433 433 should_add_tail = Tovcn < r->len; 434 434 435 435 if (should_add_tail) { 436 - tail_lcn = r->lcn == SPARSE_LCN 437 - ? SPARSE_LCN 438 - : (r->lcn + Tovcn); 436 + tail_lcn = r->lcn == SPARSE_LCN ? 437 + SPARSE_LCN : 438 + (r->lcn + Tovcn); 439 439 tail_vcn = r->vcn + Tovcn; 440 440 tail_len = r->len - Tovcn; 441 441 }
+190 -140
fs/ntfs3/super.c
··· 39 39 * To mount large volumes as ntfs one should use large cluster size (up to 2M) 40 40 * The maximum volume size in this case is 2^32 * 2^21 = 2^53 = 8P 41 41 * 42 - * ntfs limits, cluster size is 2M (2^31) 42 + * ntfs limits, cluster size is 2M (2^21) 43 43 * ----------------------------------------------------------------------------- 44 - * | < 8P, 2^54 | < 2^32 | yes | yes | yes | yes | yes | 45 - * | > 8P, 2^54 | > 2^32 | no | no | yes | yes | yes | 44 + * | < 8P, 2^53 | < 2^32 | yes | yes | yes | yes | yes | 45 + * | > 8P, 2^53 | > 2^32 | no | no | yes | yes | yes | 46 46 * ----------------------------------------------------------|------------------ 47 47 * 48 48 */ ··· 115 115 return; 116 116 117 117 /* Use static allocated buffer, if possible. */ 118 - name = atomic_dec_and_test(&s_name_buf_cnt) 119 - ? s_name_buf 120 - : kmalloc(sizeof(s_name_buf), GFP_NOFS); 118 + name = atomic_dec_and_test(&s_name_buf_cnt) ? 119 + s_name_buf : 120 + kmalloc(sizeof(s_name_buf), GFP_NOFS); 121 121 122 122 if (name) { 123 123 struct dentry *de = d_find_alias(inode); ··· 253 253 Opt_acl, 254 254 Opt_iocharset, 255 255 Opt_prealloc, 256 - Opt_noacsrules, 257 256 Opt_nocase, 258 257 Opt_err, 259 258 }; ··· 270 271 fsparam_flag_no("hidden", Opt_nohidden), 271 272 fsparam_flag_no("hide_dot_files", Opt_hide_dot_files), 272 273 fsparam_flag_no("windows_names", Opt_windows_names), 273 - fsparam_flag_no("acl", Opt_acl), 274 274 fsparam_flag_no("showmeta", Opt_showmeta), 275 - fsparam_flag_no("prealloc", Opt_prealloc), 276 - fsparam_flag_no("acsrules", Opt_noacsrules), 277 - fsparam_flag_no("nocase", Opt_nocase), 275 + fsparam_flag_no("acl", Opt_acl), 278 276 fsparam_string("iocharset", Opt_iocharset), 277 + fsparam_flag_no("prealloc", Opt_prealloc), 278 + fsparam_flag_no("nocase", Opt_nocase), 279 279 {} 280 280 }; 281 281 ··· 364 366 case Opt_windows_names: 365 367 opts->windows_names = result.negated ? 0 : 1; 366 368 break; 369 + case Opt_showmeta: 370 + opts->showmeta = result.negated ? 0 : 1; 371 + break; 367 372 case Opt_acl: 368 373 if (!result.negated) 369 374 #ifdef CONFIG_NTFS3_FS_POSIX_ACL 370 375 fc->sb_flags |= SB_POSIXACL; 371 376 #else 372 - return invalf(fc, "ntfs3: Support for ACL not compiled in!"); 377 + return invalf( 378 + fc, "ntfs3: Support for ACL not compiled in!"); 373 379 #endif 374 380 else 375 381 fc->sb_flags &= ~SB_POSIXACL; 376 - break; 377 - case Opt_showmeta: 378 - opts->showmeta = result.negated ? 0 : 1; 379 382 break; 380 383 case Opt_iocharset: 381 384 kfree(opts->nls_name); ··· 385 386 break; 386 387 case Opt_prealloc: 387 388 opts->prealloc = result.negated ? 0 : 1; 388 - break; 389 - case Opt_noacsrules: 390 - opts->noacsrules = result.negated ? 1 : 0; 391 389 break; 392 390 case Opt_nocase: 393 391 opts->nocase = result.negated ? 1 : 0; ··· 405 409 406 410 ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY); 407 411 if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) { 408 - errorf(fc, "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n"); 412 + errorf(fc, 413 + "ntfs3: Couldn't remount rw because journal is not replayed. Please umount/remount instead\n"); 409 414 return -EINVAL; 410 415 } 411 416 412 417 new_opts->nls = ntfs_load_nls(new_opts->nls_name); 413 418 if (IS_ERR(new_opts->nls)) { 414 419 new_opts->nls = NULL; 415 - errorf(fc, "ntfs3: Cannot load iocharset %s", new_opts->nls_name); 420 + errorf(fc, "ntfs3: Cannot load iocharset %s", 421 + new_opts->nls_name); 416 422 return -EINVAL; 417 423 } 418 424 if (new_opts->nls != sbi->options->nls) 419 - return invalf(fc, "ntfs3: Cannot use different iocharset when remounting!"); 425 + return invalf( 426 + fc, 427 + "ntfs3: Cannot use different iocharset when remounting!"); 420 428 421 429 sync_filesystem(sb); 422 430 423 431 if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) && 424 432 !new_opts->force) { 425 - errorf(fc, "ntfs3: Volume is dirty and \"force\" flag is not set!"); 433 + errorf(fc, 434 + "ntfs3: Volume is dirty and \"force\" flag is not set!"); 426 435 return -EINVAL; 427 436 } 428 437 ··· 545 544 struct ntfs_mount_options *opts = sbi->options; 546 545 struct user_namespace *user_ns = seq_user_ns(m); 547 546 548 - seq_printf(m, ",uid=%u", 549 - from_kuid_munged(user_ns, opts->fs_uid)); 550 - seq_printf(m, ",gid=%u", 551 - from_kgid_munged(user_ns, opts->fs_gid)); 552 - if (opts->fmask) 553 - seq_printf(m, ",fmask=%04o", opts->fs_fmask_inv ^ 0xffff); 547 + seq_printf(m, ",uid=%u", from_kuid_munged(user_ns, opts->fs_uid)); 548 + seq_printf(m, ",gid=%u", from_kgid_munged(user_ns, opts->fs_gid)); 554 549 if (opts->dmask) 555 550 seq_printf(m, ",dmask=%04o", opts->fs_dmask_inv ^ 0xffff); 556 - if (opts->nls) 557 - seq_printf(m, ",iocharset=%s", opts->nls->charset); 558 - else 559 - seq_puts(m, ",iocharset=utf8"); 551 + if (opts->fmask) 552 + seq_printf(m, ",fmask=%04o", opts->fs_fmask_inv ^ 0xffff); 560 553 if (opts->sys_immutable) 561 554 seq_puts(m, ",sys_immutable"); 562 555 if (opts->discard) 563 556 seq_puts(m, ",discard"); 564 - if (opts->sparse) 565 - seq_puts(m, ",sparse"); 566 - if (opts->showmeta) 567 - seq_puts(m, ",showmeta"); 568 - if (opts->nohidden) 569 - seq_puts(m, ",nohidden"); 570 - if (opts->windows_names) 571 - seq_puts(m, ",windows_names"); 572 - if (opts->hide_dot_files) 573 - seq_puts(m, ",hide_dot_files"); 574 557 if (opts->force) 575 558 seq_puts(m, ",force"); 576 - if (opts->noacsrules) 577 - seq_puts(m, ",noacsrules"); 578 - if (opts->prealloc) 579 - seq_puts(m, ",prealloc"); 559 + if (opts->sparse) 560 + seq_puts(m, ",sparse"); 561 + if (opts->nohidden) 562 + seq_puts(m, ",nohidden"); 563 + if (opts->hide_dot_files) 564 + seq_puts(m, ",hide_dot_files"); 565 + if (opts->windows_names) 566 + seq_puts(m, ",windows_names"); 567 + if (opts->showmeta) 568 + seq_puts(m, ",showmeta"); 580 569 if (sb->s_flags & SB_POSIXACL) 581 570 seq_puts(m, ",acl"); 571 + if (opts->nls) 572 + seq_printf(m, ",iocharset=%s", opts->nls->charset); 573 + else 574 + seq_puts(m, ",iocharset=utf8"); 575 + if (opts->prealloc) 576 + seq_puts(m, ",prealloc"); 577 + if (opts->nocase) 578 + seq_puts(m, ",nocase"); 582 579 583 580 return 0; 584 581 } ··· 705 706 if (boot->sectors_per_clusters <= 0x80) 706 707 return boot->sectors_per_clusters; 707 708 if (boot->sectors_per_clusters >= 0xf4) /* limit shift to 2MB max */ 708 - return 1U << -(s8)boot->sectors_per_clusters; 709 + return 1U << (-(s8)boot->sectors_per_clusters); 709 710 return -EINVAL; 710 711 } 711 712 ··· 723 724 struct buffer_head *bh; 724 725 struct MFT_REC *rec; 725 726 u16 fn, ao; 727 + u8 cluster_bits; 726 728 727 729 sbi->volume.blocks = dev_size >> PAGE_SHIFT; 728 730 ··· 734 734 err = -EINVAL; 735 735 boot = (struct NTFS_BOOT *)bh->b_data; 736 736 737 - if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) 737 + if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) { 738 + ntfs_err(sb, "Boot's signature is not NTFS."); 738 739 goto out; 740 + } 739 741 740 742 /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/ 741 743 /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1]) 742 744 * goto out; 743 745 */ 744 746 745 - boot_sector_size = (u32)boot->bytes_per_sector[1] << 8; 746 - if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE || 747 + boot_sector_size = ((u32)boot->bytes_per_sector[1] << 8) | 748 + boot->bytes_per_sector[0]; 749 + if (boot_sector_size < SECTOR_SIZE || 747 750 !is_power_of_2(boot_sector_size)) { 751 + ntfs_err(sb, "Invalid bytes per sector %u.", boot_sector_size); 748 752 goto out; 749 753 } 750 754 751 755 /* cluster size: 512, 1K, 2K, 4K, ... 2M */ 752 756 sct_per_clst = true_sectors_per_clst(boot); 753 - if ((int)sct_per_clst < 0) 757 + if ((int)sct_per_clst < 0 || !is_power_of_2(sct_per_clst)) { 758 + ntfs_err(sb, "Invalid sectors per cluster %u.", sct_per_clst); 754 759 goto out; 755 - if (!is_power_of_2(sct_per_clst)) 756 - goto out; 760 + } 761 + 762 + sbi->cluster_size = boot_sector_size * sct_per_clst; 763 + sbi->cluster_bits = cluster_bits = blksize_bits(sbi->cluster_size); 764 + sbi->cluster_mask = sbi->cluster_size - 1; 765 + sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask; 757 766 758 767 mlcn = le64_to_cpu(boot->mft_clst); 759 768 mlcn2 = le64_to_cpu(boot->mft2_clst); 760 769 sectors = le64_to_cpu(boot->sectors_per_volume); 761 770 762 - if (mlcn * sct_per_clst >= sectors) 763 - goto out; 764 - 765 - if (mlcn2 * sct_per_clst >= sectors) 766 - goto out; 767 - 768 - /* Check MFT record size. */ 769 - if ((boot->record_size < 0 && 770 - SECTOR_SIZE > (2U << (-boot->record_size))) || 771 - (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) { 771 + if (mlcn * sct_per_clst >= sectors || mlcn2 * sct_per_clst >= sectors) { 772 + ntfs_err( 773 + sb, 774 + "Start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.", 775 + mlcn, mlcn2, sectors); 772 776 goto out; 773 777 } 774 778 779 + sbi->record_size = record_size = 780 + boot->record_size < 0 ? 1 << (-boot->record_size) : 781 + (u32)boot->record_size << cluster_bits; 782 + sbi->record_bits = blksize_bits(record_size); 783 + sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes 784 + 785 + /* Check MFT record size. */ 786 + if (record_size < SECTOR_SIZE || !is_power_of_2(record_size)) { 787 + ntfs_err(sb, "Invalid bytes per MFT record %u (%d).", 788 + record_size, boot->record_size); 789 + goto out; 790 + } 791 + 792 + if (record_size > MAXIMUM_BYTES_PER_MFT) { 793 + ntfs_err(sb, "Unsupported bytes per MFT record %u.", 794 + record_size); 795 + goto out; 796 + } 797 + 798 + sbi->index_size = boot->index_size < 0 ? 799 + 1u << (-boot->index_size) : 800 + (u32)boot->index_size << cluster_bits; 801 + 775 802 /* Check index record size. */ 776 - if ((boot->index_size < 0 && 777 - SECTOR_SIZE > (2U << (-boot->index_size))) || 778 - (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) { 803 + if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) { 804 + ntfs_err(sb, "Invalid bytes per index %u(%d).", sbi->index_size, 805 + boot->index_size); 806 + goto out; 807 + } 808 + 809 + if (sbi->index_size > MAXIMUM_BYTES_PER_INDEX) { 810 + ntfs_err(sb, "Unsupported bytes per index %u.", 811 + sbi->index_size); 779 812 goto out; 780 813 } 781 814 ··· 824 791 if (boot_sector_size != sector_size) { 825 792 ntfs_warn( 826 793 sb, 827 - "Different NTFS' sector size (%u) and media sector size (%u)", 794 + "Different NTFS sector size (%u) and media sector size (%u).", 828 795 boot_sector_size, sector_size); 829 796 dev_size += sector_size - 1; 830 797 } 831 798 832 - sbi->cluster_size = boot_sector_size * sct_per_clst; 833 - sbi->cluster_bits = blksize_bits(sbi->cluster_size); 834 - 835 - sbi->mft.lbo = mlcn << sbi->cluster_bits; 836 - sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits; 799 + sbi->mft.lbo = mlcn << cluster_bits; 800 + sbi->mft.lbo2 = mlcn2 << cluster_bits; 837 801 838 802 /* Compare boot's cluster and sector. */ 839 - if (sbi->cluster_size < boot_sector_size) 803 + if (sbi->cluster_size < boot_sector_size) { 804 + ntfs_err(sb, "Invalid bytes per cluster (%u).", 805 + sbi->cluster_size); 840 806 goto out; 807 + } 841 808 842 809 /* Compare boot's cluster and media sector. */ 843 810 if (sbi->cluster_size < sector_size) { 844 811 /* No way to use ntfs_get_block in this case. */ 845 812 ntfs_err( 846 813 sb, 847 - "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u)", 814 + "Failed to mount 'cause NTFS's cluster size (%u) is less than media sector size (%u).", 848 815 sbi->cluster_size, sector_size); 849 816 goto out; 850 817 } 851 - 852 - sbi->cluster_mask = sbi->cluster_size - 1; 853 - sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask; 854 - sbi->record_size = record_size = boot->record_size < 0 855 - ? 1 << (-boot->record_size) 856 - : (u32)boot->record_size 857 - << sbi->cluster_bits; 858 - 859 - if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE) 860 - goto out; 861 - 862 - sbi->record_bits = blksize_bits(record_size); 863 - sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes 864 818 865 819 sbi->max_bytes_per_attr = 866 820 record_size - ALIGN(MFTRECORD_FIXUP_OFFSET_1, 8) - 867 821 ALIGN(((record_size >> SECTOR_SHIFT) * sizeof(short)), 8) - 868 822 ALIGN(sizeof(enum ATTR_TYPE), 8); 869 - 870 - sbi->index_size = boot->index_size < 0 871 - ? 1u << (-boot->index_size) 872 - : (u32)boot->index_size << sbi->cluster_bits; 873 823 874 824 sbi->volume.ser_num = le64_to_cpu(boot->serial_num); 875 825 ··· 863 847 gb0 = format_size_gb(dev_size, &mb0); 864 848 ntfs_warn( 865 849 sb, 866 - "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only", 850 + "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only.", 867 851 gb, mb, gb0, mb0); 868 852 sb->s_flags |= SB_RDONLY; 869 853 } 870 854 871 - clusters = sbi->volume.size >> sbi->cluster_bits; 855 + clusters = sbi->volume.size >> cluster_bits; 872 856 #ifndef CONFIG_NTFS3_64BIT_CLUSTER 873 857 /* 32 bits per cluster. */ 874 858 if (clusters >> 32) { 875 859 ntfs_notice( 876 860 sb, 877 - "NTFS %u.%02u Gb is too big to use 32 bits per cluster", 861 + "NTFS %u.%02u Gb is too big to use 32 bits per cluster.", 878 862 gb, mb); 879 863 goto out; 880 864 } ··· 908 892 sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits; 909 893 910 894 /* Maximum size for normal files. */ 911 - sbi->maxbytes = (clusters << sbi->cluster_bits) - 1; 895 + sbi->maxbytes = (clusters << cluster_bits) - 1; 912 896 913 897 #ifdef CONFIG_NTFS3_64BIT_CLUSTER 914 - if (clusters >= (1ull << (64 - sbi->cluster_bits))) 898 + if (clusters >= (1ull << (64 - cluster_bits))) 915 899 sbi->maxbytes = -1; 916 900 sbi->maxbytes_sparse = -1; 917 901 sb->s_maxbytes = MAX_LFS_FILESIZE; 918 902 #else 919 903 /* Maximum size for sparse file. */ 920 - sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1; 921 - sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits; 904 + sbi->maxbytes_sparse = (1ull << (cluster_bits + 32)) - 1; 905 + sb->s_maxbytes = 0xFFFFFFFFull << cluster_bits; 922 906 #endif 923 907 924 908 /* ··· 926 910 * It would be nice if we are able to allocate 1/8 of 927 911 * total clusters for MFT but not more then 512 MB. 928 912 */ 929 - sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3); 913 + sbi->zone_max = min_t(CLST, 0x20000000 >> cluster_bits, clusters >> 3); 930 914 931 915 err = 0; 932 916 ··· 944 928 int err; 945 929 struct ntfs_sb_info *sbi = sb->s_fs_info; 946 930 struct block_device *bdev = sb->s_bdev; 931 + struct ntfs_mount_options *options; 947 932 struct inode *inode; 948 933 struct ntfs_inode *ni; 949 934 size_t i, tt, bad_len, bad_frags; ··· 959 942 ref.high = 0; 960 943 961 944 sbi->sb = sb; 962 - sbi->options = fc->fs_private; 945 + sbi->options = options = fc->fs_private; 963 946 fc->fs_private = NULL; 964 947 sb->s_flags |= SB_NODIRATIME; 965 948 sb->s_magic = 0x7366746e; // "ntfs" ··· 967 950 sb->s_export_op = &ntfs_export_ops; 968 951 sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec 969 952 sb->s_xattr = ntfs_xattr_handlers; 970 - sb->s_d_op = sbi->options->nocase ? &ntfs_dentry_ops : NULL; 953 + sb->s_d_op = options->nocase ? &ntfs_dentry_ops : NULL; 971 954 972 - sbi->options->nls = ntfs_load_nls(sbi->options->nls_name); 973 - if (IS_ERR(sbi->options->nls)) { 974 - sbi->options->nls = NULL; 975 - errorf(fc, "Cannot load nls %s", sbi->options->nls_name); 955 + options->nls = ntfs_load_nls(options->nls_name); 956 + if (IS_ERR(options->nls)) { 957 + options->nls = NULL; 958 + errorf(fc, "Cannot load nls %s", options->nls_name); 976 959 err = -EINVAL; 977 960 goto out; 978 961 } ··· 997 980 ref.seq = cpu_to_le16(MFT_REC_VOL); 998 981 inode = ntfs_iget5(sb, &ref, &NAME_VOLUME); 999 982 if (IS_ERR(inode)) { 1000 - ntfs_err(sb, "Failed to load $Volume."); 1001 983 err = PTR_ERR(inode); 984 + ntfs_err(sb, "Failed to load $Volume (%d).", err); 1002 985 goto out; 1003 986 } 1004 987 ··· 1024 1007 } 1025 1008 1026 1009 attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL); 1027 - if (!attr || is_attr_ext(attr)) { 1028 - err = -EINVAL; 1029 - goto put_inode_out; 1030 - } 1031 - 1032 - info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO); 1033 - if (!info) { 1010 + if (!attr || is_attr_ext(attr) || 1011 + !(info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO))) { 1012 + ntfs_err(sb, "$Volume is corrupted."); 1034 1013 err = -EINVAL; 1035 1014 goto put_inode_out; 1036 1015 } ··· 1041 1028 ref.seq = cpu_to_le16(MFT_REC_MIRR); 1042 1029 inode = ntfs_iget5(sb, &ref, &NAME_MIRROR); 1043 1030 if (IS_ERR(inode)) { 1044 - ntfs_err(sb, "Failed to load $MFTMirr."); 1045 1031 err = PTR_ERR(inode); 1032 + ntfs_err(sb, "Failed to load $MFTMirr (%d).", err); 1046 1033 goto out; 1047 1034 } 1048 1035 1049 - sbi->mft.recs_mirr = 1050 - ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits; 1036 + sbi->mft.recs_mirr = ntfs_up_cluster(sbi, inode->i_size) >> 1037 + sbi->record_bits; 1051 1038 1052 1039 iput(inode); 1053 1040 ··· 1056 1043 ref.seq = cpu_to_le16(MFT_REC_LOG); 1057 1044 inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE); 1058 1045 if (IS_ERR(inode)) { 1059 - ntfs_err(sb, "Failed to load \x24LogFile."); 1060 1046 err = PTR_ERR(inode); 1047 + ntfs_err(sb, "Failed to load \x24LogFile (%d).", err); 1061 1048 goto out; 1062 1049 } 1063 1050 ··· 1077 1064 goto out; 1078 1065 } 1079 1066 } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) { 1080 - if (!sb_rdonly(sb) && !sbi->options->force) { 1067 + if (!sb_rdonly(sb) && !options->force) { 1081 1068 ntfs_warn( 1082 1069 sb, 1083 1070 "volume is dirty and \"force\" flag is not set!"); ··· 1092 1079 1093 1080 inode = ntfs_iget5(sb, &ref, &NAME_MFT); 1094 1081 if (IS_ERR(inode)) { 1095 - ntfs_err(sb, "Failed to load $MFT."); 1096 1082 err = PTR_ERR(inode); 1083 + ntfs_err(sb, "Failed to load $MFT (%d).", err); 1097 1084 goto out; 1098 1085 } 1099 1086 ··· 1108 1095 goto put_inode_out; 1109 1096 1110 1097 err = ni_load_all_mi(ni); 1111 - if (err) 1098 + if (err) { 1099 + ntfs_err(sb, "Failed to load $MFT's subrecords (%d).", err); 1112 1100 goto put_inode_out; 1101 + } 1113 1102 1114 1103 sbi->mft.ni = ni; 1115 1104 ··· 1120 1105 ref.seq = cpu_to_le16(MFT_REC_BITMAP); 1121 1106 inode = ntfs_iget5(sb, &ref, &NAME_BITMAP); 1122 1107 if (IS_ERR(inode)) { 1123 - ntfs_err(sb, "Failed to load $Bitmap."); 1124 1108 err = PTR_ERR(inode); 1109 + ntfs_err(sb, "Failed to load $Bitmap (%d).", err); 1125 1110 goto out; 1126 1111 } 1127 1112 ··· 1135 1120 /* Check bitmap boundary. */ 1136 1121 tt = sbi->used.bitmap.nbits; 1137 1122 if (inode->i_size < bitmap_size(tt)) { 1123 + ntfs_err(sb, "$Bitmap is corrupted."); 1138 1124 err = -EINVAL; 1139 1125 goto put_inode_out; 1140 1126 } 1141 1127 1142 - /* Not necessary. */ 1143 - sbi->used.bitmap.set_tail = true; 1144 1128 err = wnd_init(&sbi->used.bitmap, sb, tt); 1145 - if (err) 1129 + if (err) { 1130 + ntfs_err(sb, "Failed to initialize $Bitmap (%d).", err); 1146 1131 goto put_inode_out; 1132 + } 1147 1133 1148 1134 iput(inode); 1149 1135 1150 1136 /* Compute the MFT zone. */ 1151 1137 err = ntfs_refresh_zone(sbi); 1152 - if (err) 1138 + if (err) { 1139 + ntfs_err(sb, "Failed to initialize MFT zone (%d).", err); 1153 1140 goto out; 1141 + } 1154 1142 1155 1143 /* Load $BadClus. */ 1156 1144 ref.low = cpu_to_le32(MFT_REC_BADCLUST); ··· 1198 1180 ref.seq = cpu_to_le16(MFT_REC_ATTR); 1199 1181 inode = ntfs_iget5(sb, &ref, &NAME_ATTRDEF); 1200 1182 if (IS_ERR(inode)) { 1201 - ntfs_err(sb, "Failed to load $AttrDef -> %d", err); 1202 1183 err = PTR_ERR(inode); 1184 + ntfs_err(sb, "Failed to load $AttrDef (%d)", err); 1203 1185 goto out; 1204 1186 } 1205 1187 1206 - if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) { 1188 + /* 1189 + * Typical $AttrDef contains up to 20 entries. 1190 + * Check for extremely large/small size. 1191 + */ 1192 + if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY) || 1193 + inode->i_size > 100 * sizeof(struct ATTR_DEF_ENTRY)) { 1194 + ntfs_err(sb, "Looks like $AttrDef is corrupted (size=%llu).", 1195 + inode->i_size); 1207 1196 err = -EINVAL; 1208 1197 goto put_inode_out; 1209 1198 } 1199 + 1210 1200 bytes = inode->i_size; 1211 1201 sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN); 1212 1202 if (!t) { ··· 1228 1202 1229 1203 if (IS_ERR(page)) { 1230 1204 err = PTR_ERR(page); 1205 + ntfs_err(sb, "Failed to read $AttrDef (%d).", err); 1231 1206 goto put_inode_out; 1232 1207 } 1233 1208 memcpy(Add2Ptr(t, done), page_address(page), ··· 1236 1209 ntfs_unmap_page(page); 1237 1210 1238 1211 if (!idx && ATTR_STD != t->type) { 1212 + ntfs_err(sb, "$AttrDef is corrupted."); 1239 1213 err = -EINVAL; 1240 1214 goto put_inode_out; 1241 1215 } ··· 1271 1243 ref.seq = cpu_to_le16(MFT_REC_UPCASE); 1272 1244 inode = ntfs_iget5(sb, &ref, &NAME_UPCASE); 1273 1245 if (IS_ERR(inode)) { 1274 - ntfs_err(sb, "Failed to load $UpCase."); 1275 1246 err = PTR_ERR(inode); 1247 + ntfs_err(sb, "Failed to load $UpCase (%d).", err); 1276 1248 goto out; 1277 1249 } 1278 1250 1279 1251 if (inode->i_size != 0x10000 * sizeof(short)) { 1280 1252 err = -EINVAL; 1253 + ntfs_err(sb, "$UpCase is corrupted."); 1281 1254 goto put_inode_out; 1282 1255 } 1283 1256 ··· 1289 1260 1290 1261 if (IS_ERR(page)) { 1291 1262 err = PTR_ERR(page); 1263 + ntfs_err(sb, "Failed to read $UpCase (%d).", err); 1292 1264 goto put_inode_out; 1293 1265 } 1294 1266 ··· 1315 1285 if (is_ntfs3(sbi)) { 1316 1286 /* Load $Secure. */ 1317 1287 err = ntfs_security_init(sbi); 1318 - if (err) 1288 + if (err) { 1289 + ntfs_err(sb, "Failed to initialize $Secure (%d).", err); 1319 1290 goto out; 1291 + } 1320 1292 1321 1293 /* Load $Extend. */ 1322 1294 err = ntfs_extend_init(sbi); 1323 - if (err) 1295 + if (err) { 1296 + ntfs_warn(sb, "Failed to initialize $Extend."); 1324 1297 goto load_root; 1298 + } 1325 1299 1326 - /* Load $Extend\$Reparse. */ 1300 + /* Load $Extend/$Reparse. */ 1327 1301 err = ntfs_reparse_init(sbi); 1328 - if (err) 1302 + if (err) { 1303 + ntfs_warn(sb, "Failed to initialize $Extend/$Reparse."); 1329 1304 goto load_root; 1305 + } 1330 1306 1331 - /* Load $Extend\$ObjId. */ 1307 + /* Load $Extend/$ObjId. */ 1332 1308 err = ntfs_objid_init(sbi); 1333 - if (err) 1309 + if (err) { 1310 + ntfs_warn(sb, "Failed to initialize $Extend/$ObjId."); 1334 1311 goto load_root; 1312 + } 1335 1313 } 1336 1314 1337 1315 load_root: ··· 1347 1309 ref.low = cpu_to_le32(MFT_REC_ROOT); 1348 1310 ref.seq = cpu_to_le16(MFT_REC_ROOT); 1349 1311 inode = ntfs_iget5(sb, &ref, &NAME_ROOT); 1350 - if (IS_ERR(inode) || !inode->i_op) { 1351 - ntfs_err(sb, "Failed to load root."); 1352 - err = IS_ERR(inode) ? PTR_ERR(inode) : -EINVAL; 1312 + if (IS_ERR(inode)) { 1313 + err = PTR_ERR(inode); 1314 + ntfs_err(sb, "Failed to load root (%d).", err); 1353 1315 goto out; 1316 + } 1317 + 1318 + /* 1319 + * Final check. Looks like this case should never occurs. 1320 + */ 1321 + if (!inode->i_op) { 1322 + err = -EINVAL; 1323 + ntfs_err(sb, "Failed to load root (%d).", err); 1324 + goto put_inode_out; 1354 1325 } 1355 1326 1356 1327 sb->s_root = d_make_root(inode); ··· 1481 1434 }; 1482 1435 1483 1436 /* 1484 - * ntfs_init_fs_context - Initialize spi and opts 1437 + * ntfs_init_fs_context - Initialize sbi and opts 1485 1438 * 1486 1439 * This will called when mount/remount. We will first initialize 1487 1440 * options so that if remount we can use just that. ··· 1554 1507 if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL)) 1555 1508 pr_info("ntfs3: Enabled Linux POSIX ACLs support\n"); 1556 1509 if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER)) 1557 - pr_notice("ntfs3: Warning: Activated 64 bits per cluster. Windows does not support this\n"); 1510 + pr_notice( 1511 + "ntfs3: Warning: Activated 64 bits per cluster. Windows does not support this\n"); 1558 1512 if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS)) 1559 1513 pr_info("ntfs3: Read-only LZX/Xpress compression included\n"); 1560 1514 ··· 1598 1550 MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support"); 1599 1551 #endif 1600 1552 #ifdef CONFIG_NTFS3_64BIT_CLUSTER 1601 - MODULE_INFO(cluster, "Warning: Activated 64 bits per cluster. Windows does not support this"); 1553 + MODULE_INFO( 1554 + cluster, 1555 + "Warning: Activated 64 bits per cluster. Windows does not support this"); 1602 1556 #endif 1603 1557 #ifdef CONFIG_NTFS3_LZX_XPRESS 1604 1558 MODULE_INFO(compression, "Read-only lzx/xpress compression included");
+26 -44
fs/ntfs3/xattr.c
··· 23 23 24 24 static inline size_t unpacked_ea_size(const struct EA_FULL *ea) 25 25 { 26 - return ea->size ? le32_to_cpu(ea->size) 27 - : ALIGN(struct_size(ea, name, 26 + return ea->size ? le32_to_cpu(ea->size) : 27 + ALIGN(struct_size(ea, name, 28 28 1 + ea->name_len + 29 29 le16_to_cpu(ea->elength)), 30 30 4); ··· 296 296 297 297 static noinline int ntfs_set_ea(struct inode *inode, const char *name, 298 298 size_t name_len, const void *value, 299 - size_t val_size, int flags, bool locked) 299 + size_t val_size, int flags, bool locked, 300 + __le16 *ea_size) 300 301 { 301 302 struct ntfs_inode *ni = ntfs_i(inode); 302 303 struct ntfs_sb_info *sbi = ni->mi.sbi; ··· 411 410 412 411 /* 413 412 * 1. Check ea_info.size_pack for overflow. 414 - * 2. New attibute size must fit value from $AttrDef 413 + * 2. New attribute size must fit value from $AttrDef 415 414 */ 416 415 if (new_pack > 0xffff || size > sbi->ea_max_size) { 417 416 ntfs_inode_warn( ··· 505 504 506 505 if (ea_info.size_pack != size_pack) 507 506 ni->ni_flags |= NI_FLAG_UPDATE_PARENT; 507 + if (ea_size) 508 + *ea_size = ea_info.size_pack; 508 509 mark_inode_dirty(&ni->vfs_inode); 509 510 510 511 out: ··· 520 517 } 521 518 522 519 #ifdef CONFIG_NTFS3_FS_POSIX_ACL 523 - static struct posix_acl *ntfs_get_acl_ex(struct inode *inode, int type, 524 - int locked) 520 + 521 + /* 522 + * ntfs_get_acl - inode_operations::get_acl 523 + */ 524 + struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, 525 + struct dentry *dentry, int type) 525 526 { 527 + struct inode *inode = d_inode(dentry); 526 528 struct ntfs_inode *ni = ntfs_i(inode); 527 529 const char *name; 528 530 size_t name_len; ··· 550 542 name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1; 551 543 } 552 544 553 - if (!locked) 554 - ni_lock(ni); 545 + ni_lock(ni); 555 546 556 547 err = ntfs_get_ea(inode, name, name_len, buf, PATH_MAX, &req); 557 548 558 - if (!locked) 559 - ni_unlock(ni); 549 + ni_unlock(ni); 560 550 561 551 /* Translate extended attribute to acl. */ 562 552 if (err >= 0) { ··· 571 565 __putname(buf); 572 566 573 567 return acl; 574 - } 575 - 576 - /* 577 - * ntfs_get_acl - inode_operations::get_acl 578 - */ 579 - struct posix_acl *ntfs_get_acl(struct inode *inode, int type, bool rcu) 580 - { 581 - if (rcu) 582 - return ERR_PTR(-ECHILD); 583 - 584 - return ntfs_get_acl_ex(inode, type, 0); 585 568 } 586 569 587 570 static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap, ··· 628 633 flags = 0; 629 634 } 630 635 631 - err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0); 636 + err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0, NULL); 632 637 if (err == -ENODATA && !size) 633 638 err = 0; /* Removing non existed xattr. */ 634 639 if (!err) { ··· 707 712 } 708 713 709 714 /* 710 - * ntfs_permission - inode_operations::permission 711 - */ 712 - int ntfs_permission(struct mnt_idmap *idmap, struct inode *inode, 713 - int mask) 714 - { 715 - if (ntfs_sb(inode->i_sb)->options->noacsrules) { 716 - /* "No access rules" mode - Allow all changes. */ 717 - return 0; 718 - } 719 - 720 - return generic_permission(idmap, inode, mask); 721 - } 722 - 723 - /* 724 715 * ntfs_listxattr - inode_operations::listxattr 725 716 */ 726 717 ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ··· 761 780 err = sizeof(u32); 762 781 *(u32 *)buffer = le32_to_cpu(ni->std_fa); 763 782 if (!strcmp(name, SYSTEM_NTFS_ATTRIB_BE)) 764 - *(u32 *)buffer = cpu_to_be32(*(u32 *)buffer); 783 + *(__be32 *)buffer = cpu_to_be32(*(u32 *)buffer); 765 784 } 766 785 goto out; 767 786 } ··· 838 857 if (size != sizeof(u32)) 839 858 goto out; 840 859 if (!strcmp(name, SYSTEM_NTFS_ATTRIB_BE)) 841 - new_fa = cpu_to_le32(be32_to_cpu(*(u32 *)value)); 860 + new_fa = cpu_to_le32(be32_to_cpu(*(__be32 *)value)); 842 861 else 843 862 new_fa = cpu_to_le32(*(u32 *)value); 844 863 ··· 918 937 } 919 938 920 939 /* Deal with NTFS extended attribute. */ 921 - err = ntfs_set_ea(inode, name, strlen(name), value, size, flags, 0); 940 + err = ntfs_set_ea(inode, name, strlen(name), value, size, flags, 0, 941 + NULL); 922 942 923 943 out: 924 944 inode->i_ctime = current_time(inode); ··· 933 951 * 934 952 * save uid/gid/mode in xattr 935 953 */ 936 - int ntfs_save_wsl_perm(struct inode *inode) 954 + int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size) 937 955 { 938 956 int err; 939 957 __le32 value; ··· 942 960 ni_lock(ni); 943 961 value = cpu_to_le32(i_uid_read(inode)); 944 962 err = ntfs_set_ea(inode, "$LXUID", sizeof("$LXUID") - 1, &value, 945 - sizeof(value), 0, true); /* true == already locked. */ 963 + sizeof(value), 0, true, ea_size); 946 964 if (err) 947 965 goto out; 948 966 949 967 value = cpu_to_le32(i_gid_read(inode)); 950 968 err = ntfs_set_ea(inode, "$LXGID", sizeof("$LXGID") - 1, &value, 951 - sizeof(value), 0, true); 969 + sizeof(value), 0, true, ea_size); 952 970 if (err) 953 971 goto out; 954 972 955 973 value = cpu_to_le32(inode->i_mode); 956 974 err = ntfs_set_ea(inode, "$LXMOD", sizeof("$LXMOD") - 1, &value, 957 - sizeof(value), 0, true); 975 + sizeof(value), 0, true, ea_size); 958 976 if (err) 959 977 goto out; 960 978 961 979 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 962 980 value = cpu_to_le32(inode->i_rdev); 963 981 err = ntfs_set_ea(inode, "$LXDEV", sizeof("$LXDEV") - 1, &value, 964 - sizeof(value), 0, true); 982 + sizeof(value), 0, true, ea_size); 965 983 if (err) 966 984 goto out; 967 985 }