Merge tag 'ntfs3_for_6.19' of https://github.com/Paragon-Software-Group/linux-ntfs3

Pull ntfs3 updates from Konstantin Komarov:
"New code:
- support timestamps prior to epoch
- do not overwrite uptodate pages
- disable readahead for compressed files
- setting of dummy blocksize to read boot_block when mounting
- the run_lock initialization when loading $Extend
- initialization of allocated memory before use
- support for the NTFS3_IOC_SHUTDOWN ioctl
- check for minimum alignment when performing direct I/O reads
- check for shutdown in fsync

Fixes:
- mount failure for sparse runs in run_unpack()
- use-after-free of sbi->options in cmp_fnames
- KMSAN uninit bug after failed mi_read in mi_format_new
- uninit error after buffer allocation by __getname()
- KMSAN uninit-value in ni_create_attr_list
- double free of sbi->options->nls and ownership of fc->fs_private
- incorrect vcn adjustments in attr_collapse_range()
- mode update when ACL can be reduced to mode
- memory leaks in add sub record

Changes:
- refactor code, updated terminology, spelling
- do not kmap pages in (de)compression code
- after ntfs_look_free_mft(), code that fails must put mft_inode
- default mount options for "acl" and "prealloc"

Replaced:
- use unsafe_memcpy() to avoid memcpy size warning
- ntfs_bio_pages with page cache for compressed files"

* tag 'ntfs3_for_6.19' of https://github.com/Paragon-Software-Group/linux-ntfs3: (26 commits)
fs/ntfs3: check for shutdown in fsync
fs/ntfs3: change the default mount options for "acl" and "prealloc"
fs/ntfs3: Prevent memory leaks in add sub record
fs/ntfs3: out1 also needs to put mi
fs/ntfs3: Fix spelling mistake "recommened" -> "recommended"
fs/ntfs3: update mode in xattr when ACL can be reduced to mode
fs/ntfs3: check minimum alignment for direct I/O
fs/ntfs3: implement NTFS3_IOC_SHUTDOWN ioctl
fs/ntfs3: correct attr_collapse_range when file is too fragmented
ntfs3: fix double free of sbi->options->nls and clarify ownership of fc->fs_private
fs/ntfs3: Initialize allocated memory before use
fs/ntfs3: remove ntfs_bio_pages and use page cache for compressed I/O
ntfs3: avoid memcpy size warning
fs/ntfs3: fix KMSAN uninit-value in ni_create_attr_list
ntfs3: init run lock for extend inode
ntfs: set dummy blocksize to read boot_block when mounting
fs/ntfs3: disable readahead for compressed files
ntfs3: Fix uninit buffer allocated by __getname()
ntfs3: fix uninit memory after failed mi_read in mi_format_new
ntfs3: fix use-after-free of sbi->options in cmp_fnames
...

+42 -46
fs/ntfs3/attrib.c
··· 1457 1457 pgoff_t index = vbo[i] >> PAGE_SHIFT; 1458 1458 1459 1459 if (index != folio->index) { 1460 - struct page *page = &folio->page; 1461 1460 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); 1462 1461 u64 to = min(from + PAGE_SIZE, wof_size); 1463 1462 ··· 1466 1467 if (err) 1467 1468 goto out1; 1468 1469 1469 - err = ntfs_bio_pages(sbi, run, &page, 1, from, 1470 - to - from, REQ_OP_READ); 1470 + err = ntfs_read_run(sbi, run, addr, from, to - from); 1471 1471 if (err) { 1472 1472 folio->index = -1; 1473 1473 goto out1; ··· 1860 1862 struct ATTRIB *attr = NULL, *attr_b; 1861 1863 struct ATTR_LIST_ENTRY *le, *le_b; 1862 1864 struct mft_inode *mi, *mi_b; 1863 - CLST svcn, evcn1, len, dealloc, alen; 1865 + CLST svcn, evcn1, len, dealloc, alen, done; 1864 1866 CLST vcn, end; 1865 1867 u64 valid_size, data_size, alloc_size, total_size; 1866 1868 u32 mask; ··· 1923 1925 len = bytes >> sbi->cluster_bits; 1924 1926 end = vcn + len; 1925 1927 dealloc = 0; 1928 + done = 0; 1926 1929 1927 1930 svcn = le64_to_cpu(attr_b->nres.svcn); 1928 1931 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; ··· 1932 1933 attr = attr_b; 1933 1934 le = le_b; 1934 1935 mi = mi_b; 1935 - } else if (!le_b) { 1936 + goto check_seg; 1937 + } 1938 + 1939 + if (!le_b) { 1936 1940 err = -EINVAL; 1937 1941 goto out; 1938 - } else { 1939 - le = le_b; 1940 - attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1941 - &mi); 1942 - if (!attr) { 1943 - err = -EINVAL; 1944 - goto out; 1945 - } 1942 + } 1946 1943 1947 - svcn = le64_to_cpu(attr->nres.svcn); 1948 - evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1944 + le = le_b; 1945 + attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); 1946 + if (!attr) { 1947 + err = -EINVAL; 1948 + goto out; 1949 1949 } 1950 1950 1951 1951 for (;;) { 1952 + CLST vcn1, eat, next_svcn; 1953 + 1954 + svcn = le64_to_cpu(attr->nres.svcn); 1955 + evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1956 + 1957 + check_seg: 1952 1958 if (svcn >= end) { 1953 1959 /* Shift VCN- */ 1954 1960 attr->nres.svcn = cpu_to_le64(svcn - len); ··· 1963 1959 ni->attr_list.dirty = true; 1964 1960 } 1965 1961 mi->dirty = true; 1966 - } else if (svcn < vcn || end < evcn1) { 1967 - CLST vcn1, eat, next_svcn; 1962 + goto next_attr; 1963 + } 1968 1964 1965 + run_truncate(run, 0); 1966 + err = attr_load_runs(attr, ni, run, &svcn); 1967 + if (err) 1968 + goto out; 1969 + 1970 + vcn1 = vcn + done; /* original vcn in attr/run. */ 1971 + eat = min(end, evcn1) - vcn1; 1972 + 1973 + err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true); 1974 + if (err) 1975 + goto out; 1976 + 1977 + if (svcn + eat < evcn1) { 1969 1978 /* Collapse a part of this attribute segment. */ 1970 - err = attr_load_runs(attr, ni, run, &svcn); 1971 - if (err) 1972 - goto out; 1973 - vcn1 = max(vcn, svcn); 1974 - eat = min(end, evcn1) - vcn1; 1975 1979 1976 - err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, 1977 - true); 1978 - if (err) 1979 - goto out; 1980 - 1981 - if (!run_collapse_range(run, vcn1, eat)) { 1980 + if (!run_collapse_range(run, vcn1, eat, done)) { 1982 1981 err = -ENOMEM; 1983 1982 goto out; 1984 1983 } ··· 1989 1982 if (svcn >= vcn) { 1990 1983 /* Shift VCN */ 1991 1984 attr->nres.svcn = cpu_to_le64(vcn); 1992 - if (le) { 1985 + if (le && attr->nres.svcn != le->vcn) { 1993 1986 le->vcn = attr->nres.svcn; 1994 1987 ni->attr_list.dirty = true; 1995 1988 } ··· 2000 1993 goto out; 2001 1994 2002 1995 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 2003 - if (next_svcn + eat < evcn1) { 1996 + if (next_svcn + eat + done < evcn1) { 2004 1997 err = ni_insert_nonresident( 2005 1998 ni, ATTR_DATA, NULL, 0, run, next_svcn, 2006 1999 evcn1 - eat - next_svcn, a_flags, &attr, ··· 2014 2007 2015 2008 /* Free all allocated memory. */ 2016 2009 run_truncate(run, 0); 2010 + done += eat; 2017 2011 } else { 2018 2012 u16 le_sz; 2019 - u16 roff = le16_to_cpu(attr->nres.run_off); 2020 - 2021 - if (roff > le32_to_cpu(attr->size)) { 2022 - err = -EINVAL; 2023 - goto out; 2024 - } 2025 - 2026 - run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, 2027 - evcn1 - 1, svcn, Add2Ptr(attr, roff), 2028 - le32_to_cpu(attr->size) - roff); 2029 2013 2030 2014 /* Delete this attribute segment. */ 2031 2015 mi_remove_attr(NULL, mi, attr); ··· 2029 2031 goto out; 2030 2032 } 2031 2033 2034 + done += evcn1 - svcn; 2032 2035 if (evcn1 >= alen) 2033 2036 break; 2034 2037 ··· 2047 2048 err = -EINVAL; 2048 2049 goto out; 2049 2050 } 2050 - goto next_attr; 2051 + continue; 2051 2052 } 2052 2053 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 2053 2054 } 2054 2055 2056 + next_attr: 2055 2057 if (evcn1 >= alen) 2056 2058 break; 2057 2059 ··· 2061 2061 err = -EINVAL; 2062 2062 goto out; 2063 2063 } 2064 - 2065 - next_attr: 2066 - svcn = le64_to_cpu(attr->nres.svcn); 2067 - evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2068 2064 } 2069 2065 2070 2066 if (!attr_b) { ··· 2550 2554 if (attr_load_runs(attr, ni, run, NULL)) 2551 2555 goto bad_inode; 2552 2556 2553 - if (!run_collapse_range(run, vcn, len)) 2557 + if (!run_collapse_range(run, vcn, len, 0)) 2554 2558 goto bad_inode; 2555 2559 2556 2560 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
+1 -2
fs/ntfs3/dir.c
··· 332 332 * It does additional locks/reads just to get the type of name. 333 333 * Should we use additional mount option to enable branch below? 334 334 */ 335 - if (fname->dup.extend_data && 336 - ino != ni->mi.rno) { 335 + if (fname->dup.extend_data && ino != ni->mi.rno) { 337 336 struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL); 338 337 if (!IS_ERR_OR_NULL(inode)) { 339 338 dt_type = fs_umode_to_dtype(inode->i_mode);
+93 -16
fs/ntfs3/file.c
··· 19 19 #include "ntfs.h" 20 20 #include "ntfs_fs.h" 21 21 22 + /* 23 + * cifx, btrfs, exfat, ext4, f2fs use this constant. 24 + * Hope this value will become common to all fs. 25 + */ 26 + #define NTFS3_IOC_SHUTDOWN _IOR('X', 125, __u32) 27 + 22 28 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) 23 29 { 24 30 struct fstrim_range __user *user_range; ··· 65 59 66 60 static int ntfs_ioctl_set_volume_label(struct ntfs_sb_info *sbi, u8 __user *buf) 67 61 { 68 - u8 user[FSLABEL_MAX] = {0}; 62 + u8 user[FSLABEL_MAX] = { 0 }; 69 63 int len; 70 64 71 65 if (!capable(CAP_SYS_ADMIN)) ··· 80 74 } 81 75 82 76 /* 77 + * ntfs_force_shutdown - helper function. Called from ioctl 78 + */ 79 + static int ntfs_force_shutdown(struct super_block *sb, u32 flags) 80 + { 81 + int err; 82 + struct ntfs_sb_info *sbi = sb->s_fs_info; 83 + 84 + if (unlikely(ntfs3_forced_shutdown(sb))) 85 + return 0; 86 + 87 + /* No additional options yet (flags). */ 88 + err = bdev_freeze(sb->s_bdev); 89 + if (err) 90 + return err; 91 + set_bit(NTFS_FLAGS_SHUTDOWN_BIT, &sbi->flags); 92 + bdev_thaw(sb->s_bdev); 93 + return 0; 94 + } 95 + 96 + static int ntfs_ioctl_shutdown(struct super_block *sb, unsigned long arg) 97 + { 98 + u32 flags; 99 + 100 + if (!capable(CAP_SYS_ADMIN)) 101 + return -EPERM; 102 + 103 + if (get_user(flags, (__u32 __user *)arg)) 104 + return -EFAULT; 105 + 106 + return ntfs_force_shutdown(sb, flags); 107 + } 108 + 109 + /* 83 110 * ntfs_ioctl - file_operations::unlocked_ioctl 84 111 */ 85 112 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) 86 113 { 87 114 struct inode *inode = file_inode(filp); 88 - struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; 115 + struct super_block *sb = inode->i_sb; 116 + struct ntfs_sb_info *sbi = sb->s_fs_info; 89 117 90 118 /* Avoid any operation if inode is bad. */ 91 119 if (unlikely(is_bad_ni(ntfs_i(inode)))) ··· 132 92 return ntfs_ioctl_get_volume_label(sbi, (u8 __user *)arg); 133 93 case FS_IOC_SETFSLABEL: 134 94 return ntfs_ioctl_set_volume_label(sbi, (u8 __user *)arg); 95 + case NTFS3_IOC_SHUTDOWN: 96 + return ntfs_ioctl_shutdown(sb, arg); 135 97 } 136 98 return -ENOTTY; /* Inappropriate ioctl for device. */ 137 99 } ··· 367 325 return -EOPNOTSUPP; 368 326 } 369 327 370 - if (is_compressed(ni) && rw) { 371 - ntfs_inode_warn(inode, "mmap(write) compressed not supported"); 372 - return -EOPNOTSUPP; 328 + if (is_compressed(ni)) { 329 + if (rw) { 330 + ntfs_inode_warn(inode, 331 + "mmap(write) compressed not supported"); 332 + return -EOPNOTSUPP; 333 + } 334 + /* Turn off readahead for compressed files. */ 335 + file->f_ra.ra_pages = 0; 373 336 } 374 337 375 338 if (rw) { ··· 549 502 550 503 if (dirty) 551 504 mark_inode_dirty(inode); 552 - 553 - /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ 554 505 555 506 return 0; 556 507 } ··· 931 886 if (err) 932 887 return err; 933 888 934 - if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { 935 - ntfs_inode_warn(inode, "direct i/o + compressed not supported"); 936 - return -EOPNOTSUPP; 889 + if (is_compressed(ni)) { 890 + if (iocb->ki_flags & IOCB_DIRECT) { 891 + ntfs_inode_warn( 892 + inode, "direct i/o + compressed not supported"); 893 + return -EOPNOTSUPP; 894 + } 895 + /* Turn off readahead for compressed files. */ 896 + file->f_ra.ra_pages = 0; 897 + } 898 + 899 + /* Check minimum alignment for dio. */ 900 + if (iocb->ki_flags & IOCB_DIRECT) { 901 + struct super_block *sb = inode->i_sb; 902 + struct ntfs_sb_info *sbi = sb->s_fs_info; 903 + if ((iocb->ki_pos | iov_iter_alignment(iter)) & 904 + sbi->bdev_blocksize_mask) { 905 + iocb->ki_flags &= ~IOCB_DIRECT; 906 + } 937 907 } 938 908 939 909 return generic_file_read_iter(iocb, iter); ··· 967 907 err = check_read_restriction(inode); 968 908 if (err) 969 909 return err; 910 + 911 + if (is_compressed(ntfs_i(inode))) { 912 + /* Turn off readahead for compressed files. */ 913 + in->f_ra.ra_pages = 0; 914 + } 970 915 971 916 return filemap_splice_read(in, ppos, pipe, len, flags); 972 917 } ··· 1091 1026 1092 1027 if (!frame_uptodate && off) { 1093 1028 err = ni_read_frame(ni, frame_vbo, pages, 1094 - pages_per_frame); 1029 + pages_per_frame, 0); 1095 1030 if (err) { 1096 1031 for (ip = 0; ip < pages_per_frame; ip++) { 1097 1032 folio = page_folio(pages[ip]); ··· 1156 1091 1157 1092 if (off || (to < i_size && (to & (frame_size - 1)))) { 1158 1093 err = ni_read_frame(ni, frame_vbo, pages, 1159 - pages_per_frame); 1094 + pages_per_frame, 0); 1160 1095 if (err) { 1161 1096 for (ip = 0; ip < pages_per_frame; 1162 1097 ip++) { ··· 1179 1114 size_t cp, tail = PAGE_SIZE - off; 1180 1115 1181 1116 folio = page_folio(pages[ip]); 1182 - cp = copy_folio_from_iter_atomic(folio, off, 1183 - min(tail, bytes), from); 1117 + cp = copy_folio_from_iter_atomic( 1118 + folio, off, min(tail, bytes), from); 1184 1119 flush_dcache_folio(folio); 1185 1120 1186 1121 copied += cp; ··· 1377 1312 if (sbi->options->prealloc && 1378 1313 ((file->f_mode & FMODE_WRITE) && 1379 1314 atomic_read(&inode->i_writecount) == 1) 1380 - /* 1315 + /* 1381 1316 * The only file when inode->i_fop = &ntfs_file_operations and 1382 1317 * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. 1383 1318 * ··· 1440 1375 return iter_file_splice_write(pipe, file, ppos, len, flags); 1441 1376 } 1442 1377 1378 + /* 1379 + * ntfs_file_fsync - file_operations::fsync 1380 + */ 1381 + static int ntfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1382 + { 1383 + struct inode *inode = file_inode(file); 1384 + if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) 1385 + return -EIO; 1386 + 1387 + return generic_file_fsync(file, start, end, datasync); 1388 + } 1389 + 1443 1390 // clang-format off 1444 1391 const struct inode_operations ntfs_file_inode_operations = { 1445 1392 .getattr = ntfs_getattr, ··· 1474 1397 .splice_write = ntfs_file_splice_write, 1475 1398 .mmap_prepare = ntfs_file_mmap_prepare, 1476 1399 .open = ntfs_file_open, 1477 - .fsync = generic_file_fsync, 1400 + .fsync = ntfs_file_fsync, 1478 1401 .fallocate = ntfs_fallocate, 1479 1402 .release = ntfs_file_release, 1480 1403 };
+78 -141
fs/ntfs3/frecord.c
··· 325 325 326 326 mi_get_ref(&ni->mi, &m->mrec->parent_ref); 327 327 328 - ni_add_mi(ni, m); 329 - *mi = m; 328 + *mi = ni_ins_mi(ni, &ni->mi_tree, m->rno, &m->node); 329 + if (*mi != m) 330 + mi_put(m); 331 + 330 332 return true; 331 333 } 332 334 ··· 769 767 * Skip estimating exact memory requirement. 770 768 * Looks like one record_size is always enough. 771 769 */ 772 - le = kmalloc(al_aligned(rs), GFP_NOFS); 770 + le = kzalloc(al_aligned(rs), GFP_NOFS); 773 771 if (!le) 774 772 return -ENOMEM; 775 773 ··· 1017 1015 1018 1016 out2: 1019 1017 ni_remove_mi(ni, mi); 1020 - mi_put(mi); 1021 1018 1022 1019 out1: 1020 + mi_put(mi); 1023 1021 ntfs_mark_rec_free(sbi, rno, is_mft); 1024 1022 1025 1023 out: ··· 2022 2020 return err; 2023 2021 } 2024 2022 2023 + static struct page *ntfs_lock_new_page(struct address_space *mapping, 2024 + pgoff_t index, gfp_t gfp) 2025 + { 2026 + struct folio *folio = __filemap_get_folio(mapping, index, 2027 + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); 2028 + struct page *page; 2029 + 2030 + if (IS_ERR(folio)) 2031 + return ERR_CAST(folio); 2032 + 2033 + if (!folio_test_uptodate(folio)) 2034 + return folio_file_page(folio, index); 2035 + 2036 + /* Use a temporary page to avoid data corruption */ 2037 + folio_unlock(folio); 2038 + folio_put(folio); 2039 + page = alloc_page(gfp); 2040 + if (!page) 2041 + return ERR_PTR(-ENOMEM); 2042 + __SetPageLocked(page); 2043 + return page; 2044 + } 2045 + 2025 2046 /* 2026 2047 * ni_readpage_cmpr 2027 2048 * ··· 2099 2074 if (i == idx) 2100 2075 continue; 2101 2076 2102 - pg = find_or_create_page(mapping, index, gfp_mask); 2103 - if (!pg) { 2104 - err = -ENOMEM; 2077 + pg = ntfs_lock_new_page(mapping, index, gfp_mask); 2078 + if (IS_ERR(pg)) { 2079 + err = PTR_ERR(pg); 2105 2080 goto out1; 2106 2081 } 2107 2082 pages[i] = pg; 2108 2083 } 2109 2084 2110 - err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame); 2085 + err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0); 2111 2086 2112 2087 out1: 2113 2088 for (i = 0; i < pages_per_frame; i++) { ··· 2177 2152 */ 2178 2153 index = 0; 2179 2154 for (vbo = 0; vbo < i_size; vbo += bytes) { 2180 - u32 nr_pages; 2181 2155 bool new; 2182 2156 2183 - if (vbo + frame_size > i_size) { 2184 - bytes = i_size - vbo; 2185 - nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; 2186 - } else { 2187 - nr_pages = pages_per_frame; 2188 - bytes = frame_size; 2189 - } 2190 - 2157 + bytes = vbo + frame_size > i_size ? (i_size - vbo) : frame_size; 2191 2158 end = bytes_to_cluster(sbi, vbo + bytes); 2192 2159 2193 2160 for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) { ··· 2192 2175 for (i = 0; i < pages_per_frame; i++, index++) { 2193 2176 struct page *pg; 2194 2177 2195 - pg = find_or_create_page(mapping, index, gfp_mask); 2196 - if (!pg) { 2178 + pg = ntfs_lock_new_page(mapping, index, gfp_mask); 2179 + if (IS_ERR(pg)) { 2197 2180 while (i--) { 2198 2181 unlock_page(pages[i]); 2199 2182 put_page(pages[i]); 2200 2183 } 2201 - err = -ENOMEM; 2184 + err = PTR_ERR(pg); 2202 2185 goto out; 2203 2186 } 2204 2187 pages[i] = pg; 2205 2188 } 2206 2189 2207 - err = ni_read_frame(ni, vbo, pages, pages_per_frame); 2208 - 2209 - if (!err) { 2210 - down_read(&ni->file.run_lock); 2211 - err = ntfs_bio_pages(sbi, &ni->file.run, pages, 2212 - nr_pages, vbo, bytes, 2213 - REQ_OP_WRITE); 2214 - up_read(&ni->file.run_lock); 2215 - } 2190 + err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1); 2216 2191 2217 2192 for (i = 0; i < pages_per_frame; i++) { 2218 2193 unlock_page(pages[i]); ··· 2394 2385 * Pages - Array of locked pages. 2395 2386 */ 2396 2387 int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, 2397 - u32 pages_per_frame) 2388 + u32 pages_per_frame, int copy) 2398 2389 { 2399 2390 int err; 2400 2391 struct ntfs_sb_info *sbi = ni->mi.sbi; 2401 2392 u8 cluster_bits = sbi->cluster_bits; 2402 2393 char *frame_ondisk = NULL; 2403 2394 char *frame_mem = NULL; 2404 - struct page **pages_disk = NULL; 2405 2395 struct ATTR_LIST_ENTRY *le = NULL; 2406 2396 struct runs_tree *run = &ni->file.run; 2407 2397 u64 valid_size = ni->i_valid; 2408 2398 u64 vbo_disk; 2409 2399 size_t unc_size; 2410 - u32 frame_size, i, npages_disk, ondisk_size; 2400 + u32 frame_size, i, ondisk_size; 2411 2401 struct page *pg; 2412 2402 struct ATTRIB *attr; 2413 2403 CLST frame, clst_data; ··· 2415 2407 * To simplify decompress algorithm do vmap for source 2416 2408 * and target pages. 2417 2409 */ 2418 - for (i = 0; i < pages_per_frame; i++) 2419 - kmap(pages[i]); 2420 - 2421 2410 frame_size = pages_per_frame << PAGE_SHIFT; 2422 2411 frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL); 2423 2412 if (!frame_mem) { ··· 2498 2493 err = attr_wof_frame_info(ni, attr, run, frame64, frames, 2499 2494 frame_bits, &ondisk_size, &vbo_data); 2500 2495 if (err) 2501 - goto out2; 2496 + goto out1; 2502 2497 2503 2498 if (frame64 == frames) { 2504 2499 unc_size = 1 + ((i_size - 1) & (frame_size - 1)); ··· 2509 2504 2510 2505 if (ondisk_size > frame_size) { 2511 2506 err = -EINVAL; 2512 - goto out2; 2507 + goto out1; 2513 2508 } 2514 2509 2515 2510 if (!attr->non_res) { ··· 2530 2525 ARRAY_SIZE(WOF_NAME), run, vbo_disk, 2531 2526 vbo_data + ondisk_size); 2532 2527 if (err) 2533 - goto out2; 2534 - npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) + 2535 - PAGE_SIZE - 1) >> 2536 - PAGE_SHIFT; 2528 + goto out1; 2537 2529 #endif 2538 2530 } else if (is_attr_compressed(attr)) { 2539 2531 /* LZNT compression. */ ··· 2564 2562 if (clst_data >= NTFS_LZNT_CLUSTERS) { 2565 2563 /* Frame is not compressed. */ 2566 2564 down_read(&ni->file.run_lock); 2567 - err = ntfs_bio_pages(sbi, run, pages, pages_per_frame, 2568 - frame_vbo, ondisk_size, 2569 - REQ_OP_READ); 2565 + err = ntfs_read_run(sbi, run, frame_mem, frame_vbo, 2566 + ondisk_size); 2570 2567 up_read(&ni->file.run_lock); 2571 2568 goto out1; 2572 2569 } 2573 2570 vbo_disk = frame_vbo; 2574 - npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2575 2571 } else { 2576 2572 __builtin_unreachable(); 2577 2573 err = -EINVAL; 2578 2574 goto out1; 2579 2575 } 2580 2576 2581 - pages_disk = kcalloc(npages_disk, sizeof(*pages_disk), GFP_NOFS); 2582 - if (!pages_disk) { 2577 + /* Allocate memory to read compressed data to. */ 2578 + frame_ondisk = kvmalloc(ondisk_size, GFP_KERNEL); 2579 + if (!frame_ondisk) { 2583 2580 err = -ENOMEM; 2584 - goto out2; 2585 - } 2586 - 2587 - for (i = 0; i < npages_disk; i++) { 2588 - pg = alloc_page(GFP_KERNEL); 2589 - if (!pg) { 2590 - err = -ENOMEM; 2591 - goto out3; 2592 - } 2593 - pages_disk[i] = pg; 2594 - lock_page(pg); 2595 - kmap(pg); 2581 + goto out1; 2596 2582 } 2597 2583 2598 2584 /* Read 'ondisk_size' bytes from disk. */ 2599 2585 down_read(&ni->file.run_lock); 2600 - err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk, 2601 - ondisk_size, REQ_OP_READ); 2586 + err = ntfs_read_run(sbi, run, frame_ondisk, vbo_disk, ondisk_size); 2602 2587 up_read(&ni->file.run_lock); 2603 2588 if (err) 2604 - goto out3; 2589 + goto out2; 2605 2590 2606 - /* 2607 - * To simplify decompress algorithm do vmap for source and target pages. 2608 - */ 2609 - frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO); 2610 - if (!frame_ondisk) { 2611 - err = -ENOMEM; 2612 - goto out3; 2613 - } 2614 - 2615 - /* Decompress: Frame_ondisk -> frame_mem. */ 2616 2591 #ifdef CONFIG_NTFS3_LZX_XPRESS 2617 2592 if (run != &ni->file.run) { 2618 2593 /* LZX or XPRESS */ 2619 - err = decompress_lzx_xpress( 2620 - sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)), 2621 - ondisk_size, frame_mem, unc_size, frame_size); 2594 + err = decompress_lzx_xpress(sbi, frame_ondisk, ondisk_size, 2595 + frame_mem, unc_size, frame_size); 2622 2596 } else 2623 2597 #endif 2624 2598 { ··· 2612 2634 memset(frame_mem + ok, 0, frame_size - ok); 2613 2635 } 2614 2636 2615 - vunmap(frame_ondisk); 2616 - 2617 - out3: 2618 - for (i = 0; i < npages_disk; i++) { 2619 - pg = pages_disk[i]; 2620 - if (pg) { 2621 - kunmap(pg); 2622 - unlock_page(pg); 2623 - put_page(pg); 2624 - } 2625 - } 2626 - kfree(pages_disk); 2627 - 2628 2637 out2: 2638 + kvfree(frame_ondisk); 2639 + out1: 2629 2640 #ifdef CONFIG_NTFS3_LZX_XPRESS 2630 2641 if (run != &ni->file.run) 2631 2642 run_free(run); 2643 + if (!err && copy) { 2644 + /* We are called from 'ni_decompress_file' */ 2645 + /* Copy decompressed LZX or XPRESS data into new place. */ 2646 + down_read(&ni->file.run_lock); 2647 + err = ntfs_write_run(sbi, &ni->file.run, frame_mem, frame_vbo, 2648 + frame_size); 2649 + up_read(&ni->file.run_lock); 2650 + } 2632 2651 #endif 2633 - out1: 2634 2652 vunmap(frame_mem); 2635 2653 out: 2636 2654 for (i = 0; i < pages_per_frame; i++) { 2637 2655 pg = pages[i]; 2638 - kunmap(pg); 2639 2656 SetPageUptodate(pg); 2640 2657 } 2641 2658 ··· 2653 2680 u64 frame_vbo = folio_pos(folio); 2654 2681 CLST frame = frame_vbo >> frame_bits; 2655 2682 char *frame_ondisk = NULL; 2656 - struct page **pages_disk = NULL; 2657 2683 struct ATTR_LIST_ENTRY *le = NULL; 2658 2684 char *frame_mem; 2659 2685 struct ATTRIB *attr; 2660 2686 struct mft_inode *mi; 2661 - u32 i; 2662 - struct page *pg; 2663 2687 size_t compr_size, ondisk_size; 2664 2688 struct lznt *lznt; 2665 2689 ··· 2691 2721 goto out; 2692 2722 } 2693 2723 2694 - pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS); 2695 - if (!pages_disk) { 2724 + /* Allocate memory to write compressed data to. */ 2725 + frame_ondisk = kvmalloc(frame_size, GFP_KERNEL); 2726 + if (!frame_ondisk) { 2696 2727 err = -ENOMEM; 2697 2728 goto out; 2698 2729 } 2699 - 2700 - for (i = 0; i < pages_per_frame; i++) { 2701 - pg = alloc_page(GFP_KERNEL); 2702 - if (!pg) { 2703 - err = -ENOMEM; 2704 - goto out1; 2705 - } 2706 - pages_disk[i] = pg; 2707 - lock_page(pg); 2708 - kmap(pg); 2709 - } 2710 - 2711 - /* To simplify compress algorithm do vmap for source and target pages. */ 2712 - frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL); 2713 - if (!frame_ondisk) { 2714 - err = -ENOMEM; 2715 - goto out1; 2716 - } 2717 - 2718 - for (i = 0; i < pages_per_frame; i++) 2719 - kmap(pages[i]); 2720 2730 2721 2731 /* Map in-memory frame for read-only. */ 2722 2732 frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO); 2723 2733 if (!frame_mem) { 2724 2734 err = -ENOMEM; 2725 - goto out2; 2735 + goto out1; 2726 2736 } 2727 2737 2728 2738 mutex_lock(&sbi->compress.mtx_lznt); ··· 2718 2768 if (!lznt) { 2719 2769 mutex_unlock(&sbi->compress.mtx_lznt); 2720 2770 err = -ENOMEM; 2721 - goto out3; 2771 + goto out2; 2722 2772 } 2723 2773 2724 2774 sbi->compress.lznt = lznt; ··· 2755 2805 goto out2; 2756 2806 2757 2807 down_read(&ni->file.run_lock); 2758 - err = ntfs_bio_pages(sbi, &ni->file.run, 2759 - ondisk_size < frame_size ? pages_disk : pages, 2760 - pages_per_frame, frame_vbo, ondisk_size, 2761 - REQ_OP_WRITE); 2808 + err = ntfs_write_run(sbi, &ni->file.run, 2809 + ondisk_size < frame_size ? frame_ondisk : 2810 + frame_mem, 2811 + frame_vbo, ondisk_size); 2762 2812 up_read(&ni->file.run_lock); 2763 2813 2764 - out3: 2765 - vunmap(frame_mem); 2766 - 2767 2814 out2: 2768 - for (i = 0; i < pages_per_frame; i++) 2769 - kunmap(pages[i]); 2770 - 2771 - vunmap(frame_ondisk); 2815 + vunmap(frame_mem); 2772 2816 out1: 2773 - for (i = 0; i < pages_per_frame; i++) { 2774 - pg = pages_disk[i]; 2775 - if (pg) { 2776 - kunmap(pg); 2777 - unlock_page(pg); 2778 - put_page(pg); 2779 - } 2780 - } 2781 - kfree(pages_disk); 2817 + kvfree(frame_ondisk); 2782 2818 out: 2783 2819 return err; 2784 2820 } ··· 2962 3026 err = ni_add_name(new_dir_ni, ni, new_de); 2963 3027 if (!err) { 2964 3028 err = ni_remove_name(dir_ni, ni, de, &de2, &undo); 2965 - WARN_ON(err && ni_remove_name(new_dir_ni, ni, new_de, &de2, 2966 - &undo)); 3029 + WARN_ON(err && 3030 + ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo)); 2967 3031 } 2968 3032 2969 3033 /* ··· 3063 3127 if (attr) { 3064 3128 const struct REPARSE_POINT *rp; 3065 3129 3066 - rp = resident_data_ex(attr, sizeof(struct REPARSE_POINT)); 3130 + rp = resident_data_ex(attr, 3131 + sizeof(struct REPARSE_POINT)); 3067 3132 /* If ATTR_REPARSE exists 'rp' can't be NULL. */ 3068 3133 if (rp) 3069 3134 dup->extend_data = rp->ReparseTag;
+65 -71
fs/ntfs3/fsntfs.c
··· 1349 1349 } 1350 1350 if (buffer_locked(bh)) 1351 1351 __wait_on_buffer(bh); 1352 - set_buffer_uptodate(bh); 1352 + 1353 + lock_buffer(bh); 1354 + if (!buffer_uptodate(bh)) 1355 + { 1356 + memset(bh->b_data, 0, blocksize); 1357 + set_buffer_uptodate(bh); 1358 + } 1359 + unlock_buffer(bh); 1353 1360 } else { 1354 1361 bh = ntfs_bread(sb, block); 1355 1362 if (!bh) { ··· 1479 1472 } 1480 1473 1481 1474 /* 1482 - * ntfs_bio_pages - Read/write pages from/to disk. 1475 + * ntfs_read_write_run - Read/Write disk's page cache. 1483 1476 */ 1484 - int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, 1485 - struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, 1486 - enum req_op op) 1477 + int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run, 1478 + void *buf, u64 vbo, size_t bytes, int wr) 1487 1479 { 1488 - int err = 0; 1489 - struct bio *new, *bio = NULL; 1490 1480 struct super_block *sb = sbi->sb; 1491 - struct block_device *bdev = sb->s_bdev; 1492 - struct page *page; 1481 + struct address_space *mapping = sb->s_bdev->bd_mapping; 1493 1482 u8 cluster_bits = sbi->cluster_bits; 1494 - CLST lcn, clen, vcn, vcn_next; 1495 - u32 add, off, page_idx; 1483 + CLST vcn_next, vcn = vbo >> cluster_bits; 1484 + CLST lcn, clen; 1496 1485 u64 lbo, len; 1497 - size_t run_idx; 1498 - struct blk_plug plug; 1486 + size_t idx; 1487 + u32 off, op; 1488 + struct folio *folio; 1489 + char *kaddr; 1499 1490 1500 1491 if (!bytes) 1501 1492 return 0; 1502 1493 1503 - blk_start_plug(&plug); 1494 + if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) 1495 + return -ENOENT; 1504 1496 1505 - /* Align vbo and bytes to be 512 bytes aligned. */ 1506 - lbo = (vbo + bytes + 511) & ~511ull; 1507 - vbo = vbo & ~511ull; 1508 - bytes = lbo - vbo; 1497 + if (lcn == SPARSE_LCN) 1498 + return -EINVAL; 1509 1499 1510 - vcn = vbo >> cluster_bits; 1511 - if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) { 1512 - err = -ENOENT; 1513 - goto out; 1514 - } 1515 1500 off = vbo & sbi->cluster_mask; 1516 - page_idx = 0; 1517 - page = pages[0]; 1501 + lbo = ((u64)lcn << cluster_bits) + off; 1502 + len = ((u64)clen << cluster_bits) - off; 1518 1503 1519 1504 for (;;) { 1520 - lbo = ((u64)lcn << cluster_bits) + off; 1521 - len = ((u64)clen << cluster_bits) - off; 1522 - new_bio: 1523 - new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS); 1524 - if (bio) { 1525 - bio_chain(bio, new); 1526 - submit_bio(bio); 1505 + /* Read range [lbo, lbo+len). */ 1506 + folio = read_mapping_folio(mapping, lbo >> PAGE_SHIFT, NULL); 1507 + 1508 + if (IS_ERR(folio)) 1509 + return PTR_ERR(folio); 1510 + 1511 + off = offset_in_page(lbo); 1512 + op = PAGE_SIZE - off; 1513 + 1514 + if (op > len) 1515 + op = len; 1516 + if (op > bytes) 1517 + op = bytes; 1518 + 1519 + kaddr = kmap_local_folio(folio, 0); 1520 + if (wr) { 1521 + memcpy(kaddr + off, buf, op); 1522 + folio_mark_dirty(folio); 1523 + } else { 1524 + memcpy(buf, kaddr + off, op); 1525 + flush_dcache_folio(folio); 1527 1526 } 1528 - bio = new; 1529 - bio->bi_iter.bi_sector = lbo >> 9; 1527 + kunmap_local(kaddr); 1528 + folio_put(folio); 1530 1529 1531 - while (len) { 1532 - off = vbo & (PAGE_SIZE - 1); 1533 - add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len; 1530 + bytes -= op; 1531 + if (!bytes) 1532 + return 0; 1534 1533 1535 - if (bio_add_page(bio, page, add, off) < add) 1536 - goto new_bio; 1537 - 1538 - if (bytes <= add) 1539 - goto out; 1540 - bytes -= add; 1541 - vbo += add; 1542 - 1543 - if (add + off == PAGE_SIZE) { 1544 - page_idx += 1; 1545 - if (WARN_ON(page_idx >= nr_pages)) { 1546 - err = -EINVAL; 1547 - goto out; 1548 - } 1549 - page = pages[page_idx]; 1550 - } 1551 - 1552 - if (len <= add) 1553 - break; 1554 - len -= add; 1555 - lbo += add; 1534 + buf += op; 1535 + len -= op; 1536 + if (len) { 1537 + /* next volume's page. */ 1538 + lbo += op; 1539 + continue; 1556 1540 } 1557 1541 1542 + /* get next range. */ 1558 1543 vcn_next = vcn + clen; 1559 - if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) || 1544 + if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || 1560 1545 vcn != vcn_next) { 1561 - err = -ENOENT; 1562 - goto out; 1546 + return -ENOENT; 1563 1547 } 1564 - off = 0; 1565 - } 1566 - out: 1567 - if (bio) { 1568 - if (!err) 1569 - err = submit_bio_wait(bio); 1570 - bio_put(bio); 1571 - } 1572 - blk_finish_plug(&plug); 1573 1548 1574 - return err; 1549 + if (lcn == SPARSE_LCN) 1550 + return -EINVAL; 1551 + 1552 + lbo = ((u64)lcn << cluster_bits); 1553 + len = ((u64)clen << cluster_bits); 1554 + } 1575 1555 } 1576 1556 1577 1557 /*
+2 -1
fs/ntfs3/index.c
··· 1924 1924 * Undo critical operations. 1925 1925 */ 1926 1926 indx_mark_free(indx, ni, new_vbn >> indx->idx2vbn_bits); 1927 - memcpy(hdr1, hdr1_saved, used1); 1927 + unsafe_memcpy(hdr1, hdr1_saved, used1, 1928 + "There are entries after the structure"); 1928 1929 indx_write(indx, ni, n1, 0); 1929 1930 } 1930 1931
+15 -12
fs/ntfs3/inode.c
··· 472 472 /* Records in $Extend are not a files or general directories. */ 473 473 inode->i_op = &ntfs_file_inode_operations; 474 474 mode = S_IFREG; 475 + init_rwsem(&ni->file.run_lock); 475 476 } else { 476 477 err = -EINVAL; 477 478 goto out; ··· 976 975 /* 977 976 * ntfs_write_end - Address_space_operations::write_end. 978 977 */ 979 - int ntfs_write_end(const struct kiocb *iocb, 980 - struct address_space *mapping, loff_t pos, 981 - u32 len, u32 copied, struct folio *folio, void *fsdata) 978 + int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping, 979 + loff_t pos, u32 len, u32 copied, struct folio *folio, 980 + void *fsdata) 982 981 { 983 982 struct inode *inode = mapping->host; 984 983 struct ntfs_inode *ni = ntfs_i(inode); ··· 1100 1099 typeof(rp->SymbolicLinkReparseBuffer) *rs; 1101 1100 bool is_absolute; 1102 1101 1103 - is_absolute = (strlen(symname) > 1 && symname[1] == ':'); 1102 + is_absolute = symname[0] && symname[1] == ':'; 1104 1103 1105 1104 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2, is_absolute), GFP_NOFS); 1106 1105 if (!rp) ··· 1137 1136 1138 1137 /* PrintName + SubstituteName. */ 1139 1138 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); 1140 - rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0)); 1139 + rs->SubstituteNameLength = 1140 + cpu_to_le16(sizeof(short) * err + (is_absolute ? 8 : 0)); 1141 1141 rs->PrintNameLength = rs->SubstituteNameOffset; 1142 1142 1143 1143 /* 1144 1144 * TODO: Use relative path if possible to allow Windows to 1145 1145 * parse this path. 1146 - * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). 1146 + * 0-absolute path, 1- relative path (SYMLINK_FLAG_RELATIVE). 1147 1147 */ 1148 1148 rs->Flags = cpu_to_le32(is_absolute ? 0 : SYMLINK_FLAG_RELATIVE); 1149 1149 1150 - memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, sizeof(short) * err); 1150 + memmove(rp_name + err + (is_absolute ? 4 : 0), rp_name, 1151 + sizeof(short) * err); 1151 1152 1152 1153 if (is_absolute) { 1153 1154 /* Decorate SubstituteName. */ ··· 1281 1278 fa |= FILE_ATTRIBUTE_READONLY; 1282 1279 1283 1280 /* Allocate PATH_MAX bytes. */ 1284 - new_de = __getname(); 1281 + new_de = kmem_cache_zalloc(names_cachep, GFP_KERNEL); 1285 1282 if (!new_de) { 1286 1283 err = -ENOMEM; 1287 1284 goto out1; ··· 1638 1635 * Use ni_find_attr cause layout of MFT record may be changed 1639 1636 * in ntfs_init_acl and ntfs_save_wsl_perm. 1640 1637 */ 1641 - attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, NULL); 1638 + attr = ni_find_attr(ni, NULL, NULL, ATTR_NAME, NULL, 0, NULL, 1639 + NULL); 1642 1640 if (attr) { 1643 1641 struct ATTR_FILE_NAME *fn; 1644 1642 ··· 1723 1719 struct NTFS_DE *de; 1724 1720 1725 1721 /* Allocate PATH_MAX bytes. */ 1726 - de = __getname(); 1722 + de = kmem_cache_zalloc(names_cachep, GFP_KERNEL); 1727 1723 if (!de) 1728 1724 return -ENOMEM; 1729 1725 ··· 1761 1757 return -EINVAL; 1762 1758 1763 1759 /* Allocate PATH_MAX bytes. */ 1764 - de = __getname(); 1760 + de = kmem_cache_zalloc(names_cachep, GFP_KERNEL); 1765 1761 if (!de) 1766 1762 return -ENOMEM; 1767 1763 ··· 2106 2102 2107 2103 const struct address_space_operations ntfs_aops_cmpr = { 2108 2104 .read_folio = ntfs_read_folio, 2109 - .readahead = ntfs_readahead, 2110 2105 .dirty_folio = block_dirty_folio, 2111 2106 .direct_IO = ntfs_direct_IO, 2112 2107 };
+3 -3
fs/ntfs3/namei.c
··· 207 207 } 208 208 209 209 /* 210 - * ntfs_mkdir- inode_operations::mkdir 210 + * ntfs_mkdir - inode_operations::mkdir 211 211 */ 212 212 static struct dentry *ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, 213 213 struct dentry *dentry, umode_t mode) 214 214 { 215 - return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0, 216 - NULL, 0, NULL)); 215 + return ERR_PTR(ntfs_create_inode(idmap, dir, dentry, NULL, 216 + S_IFDIR | mode, 0, NULL, 0, NULL)); 217 217 } 218 218 219 219 /*
+27 -13
fs/ntfs3/ntfs_fs.h
··· 212 212 213 213 u32 discard_granularity; 214 214 u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1) 215 + u32 bdev_blocksize_mask; // bdev_logical_block_size(bdev) - 1; 215 216 216 217 u32 cluster_size; // bytes per cluster 217 218 u32 cluster_mask; // == cluster_size - 1 ··· 571 570 int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio); 572 571 int ni_decompress_file(struct ntfs_inode *ni); 573 572 int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, 574 - u32 pages_per_frame); 573 + u32 pages_per_frame, int copy); 575 574 int ni_write_frame(struct ntfs_inode *ni, struct page **pages, 576 575 u32 pages_per_frame); 577 576 int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, ··· 585 584 struct NTFS_DE *de); 586 585 587 586 int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni, 588 - struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de); 587 + struct ntfs_inode *ni, struct NTFS_DE *de, 588 + struct NTFS_DE *new_de); 589 589 590 590 bool ni_is_dirty(struct inode *inode); 591 591 ··· 634 632 u32 bytes, struct ntfs_buffers *nb); 635 633 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr, 636 634 struct ntfs_buffers *nb, int sync); 637 - int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run, 638 - struct page **pages, u32 nr_pages, u64 vbo, u32 bytes, 639 - enum req_op op); 635 + int ntfs_read_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run, 636 + void *buf, u64 vbo, size_t bytes, int wr); 637 + static inline int ntfs_read_run(struct ntfs_sb_info *sbi, 638 + const struct runs_tree *run, void *buf, u64 vbo, 639 + size_t bytes) 640 + { 641 + return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 0); 642 + } 643 + static inline int ntfs_write_run(struct ntfs_sb_info *sbi, 644 + const struct runs_tree *run, void *buf, 645 + u64 vbo, size_t bytes) 646 + { 647 + return ntfs_read_write_run(sbi, run, buf, vbo, bytes, 1); 648 + } 649 + 640 650 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run); 641 651 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run, 642 652 u64 vbo, u64 *lbo, u64 *bytes); ··· 723 709 int ntfs_get_block(struct inode *inode, sector_t vbn, 724 710 struct buffer_head *bh_result, int create); 725 711 int ntfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, 726 - loff_t pos, u32 len, struct folio **foliop, 727 - void **fsdata); 712 + loff_t pos, u32 len, struct folio **foliop, void **fsdata); 728 713 int ntfs_write_end(const struct kiocb *iocb, struct address_space *mapping, 729 714 loff_t pos, u32 len, u32 copied, struct folio *folio, 730 715 void *fsdata); ··· 778 765 struct ATTRIB *attr); 779 766 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes); 780 767 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, 781 - struct runs_tree *run, CLST len); 768 + const struct runs_tree *run, CLST len); 782 769 static inline bool mi_is_ref(const struct mft_inode *mi, 783 770 const struct MFT_REF *ref) 784 771 { ··· 813 800 void run_truncate_around(struct runs_tree *run, CLST vcn); 814 801 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, 815 802 bool is_mft); 816 - bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len); 803 + bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub); 817 804 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len); 818 805 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, 819 806 CLST *lcn, CLST *len); ··· 992 979 */ 993 980 static inline void nt2kernel(const __le64 tm, struct timespec64 *ts) 994 981 { 995 - u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970; 982 + s32 t32; 983 + /* use signed 64 bit to support timestamps prior to epoch. xfstest 258. */ 984 + s64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970; 996 985 997 - // WARNING: do_div changes its first argument(!) 998 - ts->tv_nsec = do_div(t, _100ns2seconds) * 100; 999 - ts->tv_sec = t; 986 + ts->tv_sec = div_s64_rem(t, _100ns2seconds, &t32); 987 + ts->tv_nsec = t32 * 100; 1000 988 } 1001 989 1002 990 static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
+1 -1
fs/ntfs3/record.c
··· 621 621 * If failed record is not changed. 622 622 */ 623 623 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, 624 - struct runs_tree *run, CLST len) 624 + const struct runs_tree *run, CLST len) 625 625 { 626 626 int err = 0; 627 627 struct ntfs_sb_info *sbi = mi->sbi;
+14 -3
fs/ntfs3/run.c
··· 487 487 * Helper for attr_collapse_range(), 488 488 * which is helper for fallocate(collapse_range). 489 489 */ 490 - bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) 490 + bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub) 491 491 { 492 492 size_t index, eat; 493 493 struct ntfs_run *r, *e, *eat_start, *eat_end; ··· 511 511 /* Collapse a middle part of normal run, split. */ 512 512 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) 513 513 return false; 514 - return run_collapse_range(run, vcn, len); 514 + return run_collapse_range(run, vcn, len, sub); 515 515 } 516 516 517 517 r += 1; ··· 544 544 eat = eat_end - eat_start; 545 545 memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); 546 546 run->count -= eat; 547 + 548 + if (sub) { 549 + e -= eat; 550 + for (r = run->runs; r < e; r++) { 551 + r->vcn -= sub; 552 + } 553 + } 547 554 548 555 return true; 549 556 } ··· 991 984 if (!dlcn) 992 985 return -EINVAL; 993 986 994 - if (check_add_overflow(prev_lcn, dlcn, &lcn)) 987 + /* Check special combination: 0 + SPARSE_LCN64. */ 988 + if (!prev_lcn && dlcn == SPARSE_LCN64) { 989 + lcn = SPARSE_LCN64; 990 + } else if (check_add_overflow(prev_lcn, dlcn, &lcn)) { 995 991 return -EINVAL; 992 + } 996 993 prev_lcn = lcn; 997 994 } else { 998 995 /* The size of 'dlcn' can't be > 8. */
+68 -20
fs/ntfs3/super.c
··· 16 16 * mi - MFT inode - One MFT record(usually 1024 bytes or 4K), consists of attributes. 17 17 * ni - NTFS inode - Extends linux inode. consists of one or more mft inodes. 18 18 * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size. 19 + * resident attribute - Attribute with content stored directly in the MFT record 20 + * non-resident attribute - Attribute with content stored in clusters 21 + * data_size - Size of attribute content in bytes. Equal to inode->i_size 22 + * valid_size - Number of bytes written to the non-resident attribute 23 + * allocated_size - Total size of clusters allocated for non-resident content 24 + * total_size - Actual size of allocated clusters for sparse or compressed attributes 25 + * - Constraint: valid_size <= data_size <= allocated_size 19 26 * 20 27 * WSL - Windows Subsystem for Linux 21 28 * https://docs.microsoft.com/en-us/windows/wsl/file-permissions ··· 285 278 fsparam_flag("hide_dot_files", Opt_hide_dot_files), 286 279 fsparam_flag("windows_names", Opt_windows_names), 287 280 fsparam_flag("showmeta", Opt_showmeta), 288 - fsparam_flag("acl", Opt_acl), 281 + fsparam_flag_no("acl", Opt_acl), 289 282 fsparam_string("iocharset", Opt_iocharset), 290 - fsparam_flag("prealloc", Opt_prealloc), 283 + fsparam_flag_no("prealloc", Opt_prealloc), 291 284 fsparam_flag("nocase", Opt_nocase), 292 285 {} 293 286 }; ··· 296 289 /* 297 290 * Load nls table or if @nls is utf8 then return NULL. 298 291 * 299 - * It is good idea to use here "const char *nls". 300 - * But load_nls accepts "char*". 301 292 */ 302 - static struct nls_table *ntfs_load_nls(char *nls) 293 + static struct nls_table *ntfs_load_nls(const char *nls) 303 294 { 304 295 struct nls_table *ret; 305 296 ··· 396 391 param->string = NULL; 397 392 break; 398 393 case Opt_prealloc: 399 - opts->prealloc = 1; 394 + opts->prealloc = !result.negated; 400 395 break; 401 396 case Opt_nocase: 402 397 opts->nocase = 1; ··· 572 567 if (e) { 573 568 struct ntfs_sb_info *sbi = sb->s_fs_info; 574 569 575 - proc_create_data("volinfo", 0444, e, 576 - &ntfs3_volinfo_fops, sb); 577 - proc_create_data("label", 0644, e, 578 - &ntfs3_label_fops, sb); 570 + proc_create_data("volinfo", 0444, e, &ntfs3_volinfo_fops, sb); 571 + proc_create_data("label", 0644, e, &ntfs3_label_fops, sb); 579 572 sbi->procdir = e; 580 573 } 581 574 } ··· 604 601 } 605 602 } 606 603 #else 607 - static void ntfs_create_procdir(struct super_block *sb) {} 608 - static void ntfs_remove_procdir(struct super_block *sb) {} 609 - static void ntfs_create_proc_root(void) {} 610 - static void ntfs_remove_proc_root(void) {} 604 + // clang-format off 605 + static void ntfs_create_procdir(struct super_block *sb){} 606 + static void ntfs_remove_procdir(struct super_block *sb){} 607 + static void ntfs_create_proc_root(void){} 608 + static void ntfs_remove_proc_root(void){} 609 + // clang-format on 611 610 #endif 612 611 613 612 static struct kmem_cache *ntfs_inode_cachep; ··· 703 698 704 699 /* Mark rw ntfs as clear, if possible. */ 705 700 ntfs_set_state(sbi, NTFS_DIRTY_CLEAR); 701 + 702 + if (sbi->options) { 703 + unload_nls(sbi->options->nls); 704 + kfree(sbi->options->nls_name); 705 + kfree(sbi->options); 706 + sbi->options = NULL; 707 + } 708 + 706 709 ntfs3_put_sbi(sbi); 707 710 } 708 711 ··· 947 934 948 935 sbi->volume.blocks = dev_size >> PAGE_SHIFT; 949 936 937 + /* Set dummy blocksize to read boot_block. */ 938 + if (!sb_min_blocksize(sb, PAGE_SIZE)) { 939 + return -EINVAL; 940 + } 941 + 950 942 read_boot: 951 943 bh = ntfs_bread(sb, boot_block); 952 944 if (!bh) ··· 1076 1058 dev_size += sector_size - 1; 1077 1059 } 1078 1060 1061 + sbi->bdev_blocksize_mask = max(boot_sector_size, sector_size) - 1; 1079 1062 sbi->mft.lbo = mlcn << cluster_bits; 1080 1063 sbi->mft.lbo2 = mlcn2 << cluster_bits; 1081 1064 ··· 1218 1199 int err; 1219 1200 struct ntfs_sb_info *sbi = sb->s_fs_info; 1220 1201 struct block_device *bdev = sb->s_bdev; 1221 - struct ntfs_mount_options *options; 1202 + struct ntfs_mount_options *fc_opts; 1203 + struct ntfs_mount_options *options = NULL; 1222 1204 struct inode *inode; 1223 1205 struct ntfs_inode *ni; 1224 1206 size_t i, tt, bad_len, bad_frags; ··· 1236 1216 ref.high = 0; 1237 1217 1238 1218 sbi->sb = sb; 1239 - sbi->options = options = fc->fs_private; 1219 + fc_opts = fc->fs_private; 1220 + if (!fc_opts) { 1221 + errorf(fc, "missing mount options"); 1222 + return -EINVAL; 1223 + } 1224 + options = kmemdup(fc_opts, sizeof(*fc_opts), GFP_KERNEL); 1225 + if (!options) 1226 + return -ENOMEM; 1227 + 1228 + if (fc_opts->nls_name) { 1229 + options->nls_name = kstrdup(fc_opts->nls_name, GFP_KERNEL); 1230 + if (!options->nls_name) { 1231 + kfree(options); 1232 + return -ENOMEM; 1233 + } 1234 + } 1235 + sbi->options = options; 1240 1236 fc->fs_private = NULL; 1241 1237 sb->s_flags |= SB_NODIRATIME; 1242 1238 sb->s_magic = 0x7366746e; // "ntfs" ··· 1260 1224 sb->s_export_op = &ntfs_export_ops; 1261 1225 sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec 1262 1226 sb->s_xattr = ntfs_xattr_handlers; 1263 - if (options->nocase) 1264 - set_default_d_op(sb, &ntfs_dentry_ops); 1227 + set_default_d_op(sb, options->nocase ? &ntfs_dentry_ops : NULL); 1265 1228 1266 1229 options->nls = ntfs_load_nls(options->nls_name); 1267 1230 if (IS_ERR(options->nls)) { ··· 1330 1295 sbi->volume.ni = ni; 1331 1296 if (info->flags & VOLUME_FLAG_DIRTY) { 1332 1297 sbi->volume.real_dirty = true; 1333 - ntfs_info(sb, "It is recommened to use chkdsk."); 1298 + ntfs_info(sb, "It is recommended to use chkdsk."); 1334 1299 } 1335 1300 1336 1301 /* Load $MFTMirr to estimate recs_mirr. */ ··· 1677 1642 put_inode_out: 1678 1643 iput(inode); 1679 1644 out: 1645 + /* sbi->options == options */ 1646 + if (options) { 1647 + unload_nls(options->nls); 1648 + kfree(options->nls_name); 1649 + kfree(options); 1650 + sbi->options = NULL; 1651 + } 1652 + 1680 1653 ntfs3_put_sbi(sbi); 1681 1654 kfree(boot2); 1682 - ntfs3_put_sbi(sbi); 1683 1655 return err; 1684 1656 } 1685 1657 ··· 1810 1768 opts->fs_gid = current_gid(); 1811 1769 opts->fs_fmask_inv = ~current_umask(); 1812 1770 opts->fs_dmask_inv = ~current_umask(); 1771 + opts->prealloc = 1; 1772 + 1773 + #ifdef CONFIG_NTFS3_FS_POSIX_ACL 1774 + /* Set the default value 'acl' */ 1775 + fc->sb_flags |= SB_POSIXACL; 1776 + #endif 1813 1777 1814 1778 if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) 1815 1779 goto ok;
+14 -4
fs/ntfs3/xattr.c
··· 654 654 err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0, NULL); 655 655 if (err == -ENODATA && !size) 656 656 err = 0; /* Removing non existed xattr. */ 657 - if (!err) { 658 - set_cached_acl(inode, type, acl); 657 + if (err) 658 + goto out; 659 + 660 + if (inode->i_mode != mode) { 661 + umode_t old_mode = inode->i_mode; 659 662 inode->i_mode = mode; 660 - inode_set_ctime_current(inode); 661 - mark_inode_dirty(inode); 663 + err = ntfs_save_wsl_perm(inode, NULL); 664 + if (err) { 665 + inode->i_mode = old_mode; 666 + goto out; 667 + } 668 + inode->i_mode = mode; 662 669 } 670 + set_cached_acl(inode, type, acl); 671 + inode_set_ctime_current(inode); 672 + mark_inode_dirty(inode); 663 673 664 674 out: 665 675 kfree(value);