Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs/ntfs3: correct attr_collapse_range when file is too fragmented

Fix incorrect VCN adjustments in attr_collapse_range() that caused
filesystem errors or corruption on very fragmented NTFS files when
performing collapse-range operations.

Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>

+53 -48
+41 -43
fs/ntfs3/attrib.c
··· 1860 1860 struct ATTRIB *attr = NULL, *attr_b; 1861 1861 struct ATTR_LIST_ENTRY *le, *le_b; 1862 1862 struct mft_inode *mi, *mi_b; 1863 - CLST svcn, evcn1, len, dealloc, alen; 1863 + CLST svcn, evcn1, len, dealloc, alen, done; 1864 1864 CLST vcn, end; 1865 1865 u64 valid_size, data_size, alloc_size, total_size; 1866 1866 u32 mask; ··· 1923 1923 len = bytes >> sbi->cluster_bits; 1924 1924 end = vcn + len; 1925 1925 dealloc = 0; 1926 + done = 0; 1926 1927 1927 1928 svcn = le64_to_cpu(attr_b->nres.svcn); 1928 1929 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; ··· 1932 1931 attr = attr_b; 1933 1932 le = le_b; 1934 1933 mi = mi_b; 1935 - } else if (!le_b) { 1934 + goto check_seg; 1935 + } 1936 + 1937 + if (!le_b) { 1936 1938 err = -EINVAL; 1937 1939 goto out; 1938 - } else { 1939 - le = le_b; 1940 - attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, 1941 - &mi); 1942 - if (!attr) { 1943 - err = -EINVAL; 1944 - goto out; 1945 - } 1940 + } 1946 1941 1947 - svcn = le64_to_cpu(attr->nres.svcn); 1948 - evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1942 + le = le_b; 1943 + attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); 1944 + if (!attr) { 1945 + err = -EINVAL; 1946 + goto out; 1949 1947 } 1950 1948 1951 1949 for (;;) { 1950 + CLST vcn1, eat, next_svcn; 1951 + 1952 + svcn = le64_to_cpu(attr->nres.svcn); 1953 + evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 1954 + 1955 + check_seg: 1952 1956 if (svcn >= end) { 1953 1957 /* Shift VCN- */ 1954 1958 attr->nres.svcn = cpu_to_le64(svcn - len); ··· 1963 1957 ni->attr_list.dirty = true; 1964 1958 } 1965 1959 mi->dirty = true; 1966 - } else if (svcn < vcn || end < evcn1) { 1967 - CLST vcn1, eat, next_svcn; 1960 + goto next_attr; 1961 + } 1968 1962 1963 + run_truncate(run, 0); 1964 + err = attr_load_runs(attr, ni, run, &svcn); 1965 + if (err) 1966 + goto out; 1967 + 1968 + vcn1 = vcn + done; /* original vcn in attr/run. */ 1969 + eat = min(end, evcn1) - vcn1; 1970 + 1971 + err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true); 1972 + if (err) 1973 + goto out; 1974 + 1975 + if (svcn + eat < evcn1) { 1969 1976 /* Collapse a part of this attribute segment. */ 1970 - err = attr_load_runs(attr, ni, run, &svcn); 1971 - if (err) 1972 - goto out; 1973 - vcn1 = max(vcn, svcn); 1974 - eat = min(end, evcn1) - vcn1; 1975 1977 1976 - err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, 1977 - true); 1978 - if (err) 1979 - goto out; 1980 - 1981 - if (!run_collapse_range(run, vcn1, eat)) { 1978 + if (!run_collapse_range(run, vcn1, eat, done)) { 1982 1979 err = -ENOMEM; 1983 1980 goto out; 1984 1981 } ··· 1989 1980 if (svcn >= vcn) { 1990 1981 /* Shift VCN */ 1991 1982 attr->nres.svcn = cpu_to_le64(vcn); 1992 - if (le) { 1983 + if (le && attr->nres.svcn != le->vcn) { 1993 1984 le->vcn = attr->nres.svcn; 1994 1985 ni->attr_list.dirty = true; 1995 1986 } ··· 2000 1991 goto out; 2001 1992 2002 1993 next_svcn = le64_to_cpu(attr->nres.evcn) + 1; 2003 - if (next_svcn + eat < evcn1) { 1994 + if (next_svcn + eat + done < evcn1) { 2004 1995 err = ni_insert_nonresident( 2005 1996 ni, ATTR_DATA, NULL, 0, run, next_svcn, 2006 1997 evcn1 - eat - next_svcn, a_flags, &attr, ··· 2014 2005 2015 2006 /* Free all allocated memory. */ 2016 2007 run_truncate(run, 0); 2008 + done += eat; 2017 2009 } else { 2018 2010 u16 le_sz; 2019 - u16 roff = le16_to_cpu(attr->nres.run_off); 2020 - 2021 - if (roff > le32_to_cpu(attr->size)) { 2022 - err = -EINVAL; 2023 - goto out; 2024 - } 2025 - 2026 - run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, 2027 - evcn1 - 1, svcn, Add2Ptr(attr, roff), 2028 - le32_to_cpu(attr->size) - roff); 2029 2011 2030 2012 /* Delete this attribute segment. */ 2031 2013 mi_remove_attr(NULL, mi, attr); ··· 2029 2029 goto out; 2030 2030 } 2031 2031 2032 + done += evcn1 - svcn; 2032 2033 if (evcn1 >= alen) 2033 2034 break; 2034 2035 ··· 2047 2046 err = -EINVAL; 2048 2047 goto out; 2049 2048 } 2050 - goto next_attr; 2049 + continue; 2051 2050 } 2052 2051 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); 2053 2052 } 2054 2053 2054 + next_attr: 2055 2055 if (evcn1 >= alen) 2056 2056 break; 2057 2057 ··· 2061 2059 err = -EINVAL; 2062 2060 goto out; 2063 2061 } 2064 - 2065 - next_attr: 2066 - svcn = le64_to_cpu(attr->nres.svcn); 2067 - evcn1 = le64_to_cpu(attr->nres.evcn) + 1; 2068 2062 } 2069 2063 2070 2064 if (!attr_b) { ··· 2550 2552 if (attr_load_runs(attr, ni, run, NULL)) 2551 2553 goto bad_inode; 2552 2554 2553 - if (!run_collapse_range(run, vcn, len)) 2555 + if (!run_collapse_range(run, vcn, len, 0)) 2554 2556 goto bad_inode; 2555 2557 2556 2558 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
+2 -2
fs/ntfs3/ntfs_fs.h
··· 777 777 struct ATTRIB *attr); 778 778 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes); 779 779 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, 780 - struct runs_tree *run, CLST len); 780 + const struct runs_tree *run, CLST len); 781 781 static inline bool mi_is_ref(const struct mft_inode *mi, 782 782 const struct MFT_REF *ref) 783 783 { ··· 812 812 void run_truncate_around(struct runs_tree *run, CLST vcn); 813 813 bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len, 814 814 bool is_mft); 815 - bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len); 815 + bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub); 816 816 bool run_insert_range(struct runs_tree *run, CLST vcn, CLST len); 817 817 bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn, 818 818 CLST *lcn, CLST *len);
+1 -1
fs/ntfs3/record.c
··· 621 621 * If failed record is not changed. 622 622 */ 623 623 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr, 624 - struct runs_tree *run, CLST len) 624 + const struct runs_tree *run, CLST len) 625 625 { 626 626 int err = 0; 627 627 struct ntfs_sb_info *sbi = mi->sbi;
+9 -2
fs/ntfs3/run.c
··· 487 487 * Helper for attr_collapse_range(), 488 488 * which is helper for fallocate(collapse_range). 489 489 */ 490 - bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len) 490 + bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len, CLST sub) 491 491 { 492 492 size_t index, eat; 493 493 struct ntfs_run *r, *e, *eat_start, *eat_end; ··· 511 511 /* Collapse a middle part of normal run, split. */ 512 512 if (!run_add_entry(run, vcn, SPARSE_LCN, len, false)) 513 513 return false; 514 - return run_collapse_range(run, vcn, len); 514 + return run_collapse_range(run, vcn, len, sub); 515 515 } 516 516 517 517 r += 1; ··· 544 544 eat = eat_end - eat_start; 545 545 memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r)); 546 546 run->count -= eat; 547 + 548 + if (sub) { 549 + e -= eat; 550 + for (r = run->runs; r < e; r++) { 551 + r->vcn -= sub; 552 + } 553 + } 547 554 548 555 return true; 549 556 }