Merge tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:

- Remove incorrect __user annotation from struct xattr_args::value

- Documentation fix: Add missing kernel-doc description for the @isnew
parameter in ilookup5_nowait() to silence Sphinx warnings

- Documentation fix: Fix kernel-doc comment for __start_dirop() - the
function name in the comment was wrong and the @state parameter was
undocumented

- Replace dynamic folio_batch allocation with stack allocation in
iomap_zero_range(). The dynamic allocation was problematic for
ext4-on-iomap work (didn't handle allocation failure properly) and
triggered lockdep complaints. Uses a flag instead to control batch
usage

- Re-add #ifdef guards around PIDFD_GET_<ns-type>_NAMESPACE ioctls.
When a namespace type is disabled, ns->ops is NULL, causes crashes
during inode eviction when closing the fd. The ifdefs were removed in
a recent simplification but are still needed

- Fixe a race where a folio could be unlocked before the trailing zeros
(for EOF within the page) were written

- Split out a dedicated lease_dispose_list() helper since lease code
paths always know they're disposing of leases. Removes unnecessary
runtime flag checks and prepares for upcoming lease_manager
enhancements

- Fix userland delegation requests succeeding despite conflicting
opens. Previously, FL_LAYOUT and FL_DELEG leases bypassed conflict
checks (a hack for nfsd). Adds new ->lm_open_conflict() lease_manager
operation so userland delegations get proper conflict checking while
nfsd can continue its own conflict handling

- Fix LOOKUP_CACHED path lookups incorrectly falling through to the
slow path. After legitimize_links() calls were conditionally elided,
the routine would always fail with LOOKUP_CACHED regardless of
whether there were any links. Now the flag is checked at the two
callsites before calling legitimize_links()

- Fix bug in media fd allocation in media_request_alloc()

- Fix mismatched API calls in ecryptfs_mknod(): was calling
end_removing() instead of end_creating() after
ecryptfs_start_creating_dentry()

- Fix dentry reference count leak in ecryptfs_mkdir(): a dget() of the
lower parent dir was added but never dput()'d, causing BUG during
lower filesystem unmount due to the still-in-use dentry

* tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs:
pidfs: protect PIDFD_GET_* ioctls() via ifdef
ecryptfs: Release lower parent dentry after creating dir
ecryptfs: Fix improper mknod pairing of start_creating()/end_removing()
get rid of bogus __user in struct xattr_args::value
VFS: fix __start_dirop() kernel-doc warnings
fs: Describe @isnew parameter in ilookup5_nowait()
fs: make sure to fail try_to_unlazy() and try_to_unlazy() for LOOKUP_CACHED
netfs: Fix early read unlock of page with EOF in middle
filelock: allow lease_managers to dictate what qualifies as a conflict
filelock: add lease_dispose_list() helper
iomap: replace folio_batch allocation with stack allocation
media: mc: fix potential use-after-free in media_request_alloc()

Changed files
+196 -97
Documentation
filesystems
drivers
media
fs
include
linux
uapi
linux
+1
Documentation/filesystems/locking.rst
··· 416 416 lm_breaker_owns_lease: yes no no 417 417 lm_lock_expirable yes no no 418 418 lm_expire_lock no no yes 419 + lm_open_conflict yes no no 419 420 ====================== ============= ================= ========= 420 421 421 422 buffer_head
+3 -3
drivers/media/mc/mc-request.c
··· 315 315 316 316 fd_prepare_file(fdf)->private_data = req; 317 317 318 - *alloc_fd = fd_publish(fdf); 319 - 320 318 snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", 321 - atomic_inc_return(&mdev->request_id), *alloc_fd); 319 + atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf)); 322 320 dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); 321 + 322 + *alloc_fd = fd_publish(fdf); 323 323 324 324 return 0; 325 325
+2 -1
fs/ecryptfs/inode.c
··· 533 533 fsstack_copy_inode_size(dir, lower_dir); 534 534 set_nlink(dir, lower_dir->i_nlink); 535 535 out: 536 + dput(lower_dir_dentry); 536 537 end_creating(lower_dentry); 537 538 if (d_really_is_negative(dentry)) 538 539 d_drop(dentry); ··· 585 584 fsstack_copy_attr_times(dir, lower_dir); 586 585 fsstack_copy_inode_size(dir, lower_dir); 587 586 out: 588 - end_removing(lower_dentry); 587 + end_creating(lower_dentry); 589 588 if (d_really_is_negative(dentry)) 590 589 d_drop(dentry); 591 590 return rc;
+3
fs/inode.c
··· 1593 1593 * @hashval: hash value (usually inode number) to search for 1594 1594 * @test: callback used for comparisons between inodes 1595 1595 * @data: opaque data pointer to pass to @test 1596 + * @isnew: return argument telling whether I_NEW was set when 1597 + * the inode was found in hash (the caller needs to 1598 + * wait for I_NEW to clear) 1596 1599 * 1597 1600 * Search for the inode specified by @hashval and @data in the inode cache. 1598 1601 * If the inode is in the cache, the inode is returned with an incremented
+35 -15
fs/iomap/buffered-io.c
··· 832 832 if (!mapping_large_folio_support(iter->inode->i_mapping)) 833 833 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); 834 834 835 - if (iter->fbatch) { 835 + if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) { 836 836 struct folio *folio = folio_batch_next(iter->fbatch); 837 837 838 838 if (!folio) ··· 929 929 * process so return and let the caller iterate and refill the batch. 930 930 */ 931 931 if (!folio) { 932 - WARN_ON_ONCE(!iter->fbatch); 932 + WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH)); 933 933 return 0; 934 934 } 935 935 ··· 1544 1544 return status; 1545 1545 } 1546 1546 1547 - loff_t 1547 + /** 1548 + * iomap_fill_dirty_folios - fill a folio batch with dirty folios 1549 + * @iter: Iteration structure 1550 + * @start: Start offset of range. Updated based on lookup progress. 1551 + * @end: End offset of range 1552 + * @iomap_flags: Flags to set on the associated iomap to track the batch. 1553 + * 1554 + * Returns the folio count directly. Also returns the associated control flag if 1555 + * the the batch lookup is performed and the expected offset of a subsequent 1556 + * lookup via out params. The caller is responsible to set the flag on the 1557 + * associated iomap. 1558 + */ 1559 + unsigned int 1548 1560 iomap_fill_dirty_folios( 1549 1561 struct iomap_iter *iter, 1550 - loff_t offset, 1551 - loff_t length) 1562 + loff_t *start, 1563 + loff_t end, 1564 + unsigned int *iomap_flags) 1552 1565 { 1553 1566 struct address_space *mapping = iter->inode->i_mapping; 1554 - pgoff_t start = offset >> PAGE_SHIFT; 1555 - pgoff_t end = (offset + length - 1) >> PAGE_SHIFT; 1567 + pgoff_t pstart = *start >> PAGE_SHIFT; 1568 + pgoff_t pend = (end - 1) >> PAGE_SHIFT; 1569 + unsigned int count; 1556 1570 1557 - iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL); 1558 - if (!iter->fbatch) 1559 - return offset + length; 1560 - folio_batch_init(iter->fbatch); 1571 + if (!iter->fbatch) { 1572 + *start = end; 1573 + return 0; 1574 + } 1561 1575 1562 - filemap_get_folios_dirty(mapping, &start, end, iter->fbatch); 1563 - return (start << PAGE_SHIFT); 1576 + count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch); 1577 + *start = (pstart << PAGE_SHIFT); 1578 + *iomap_flags |= IOMAP_F_FOLIO_BATCH; 1579 + return count; 1564 1580 } 1565 1581 EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios); 1566 1582 ··· 1585 1569 const struct iomap_ops *ops, 1586 1570 const struct iomap_write_ops *write_ops, void *private) 1587 1571 { 1572 + struct folio_batch fbatch; 1588 1573 struct iomap_iter iter = { 1589 1574 .inode = inode, 1590 1575 .pos = pos, 1591 1576 .len = len, 1592 1577 .flags = IOMAP_ZERO, 1593 1578 .private = private, 1579 + .fbatch = &fbatch, 1594 1580 }; 1595 1581 struct address_space *mapping = inode->i_mapping; 1596 1582 int ret; 1597 1583 bool range_dirty; 1584 + 1585 + folio_batch_init(&fbatch); 1598 1586 1599 1587 /* 1600 1588 * To avoid an unconditional flush, check pagecache state and only flush ··· 1610 1590 while ((ret = iomap_iter(&iter, ops)) > 0) { 1611 1591 const struct iomap *srcmap = iomap_iter_srcmap(&iter); 1612 1592 1613 - if (WARN_ON_ONCE(iter.fbatch && 1593 + if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && 1614 1594 srcmap->type != IOMAP_UNWRITTEN)) 1615 1595 return -EIO; 1616 1596 1617 - if (!iter.fbatch && 1597 + if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) && 1618 1598 (srcmap->type == IOMAP_HOLE || 1619 1599 srcmap->type == IOMAP_UNWRITTEN)) { 1620 1600 s64 status;
+3 -3
fs/iomap/iter.c
··· 8 8 9 9 static inline void iomap_iter_reset_iomap(struct iomap_iter *iter) 10 10 { 11 - if (iter->fbatch) { 11 + if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) { 12 12 folio_batch_release(iter->fbatch); 13 - kfree(iter->fbatch); 14 - iter->fbatch = NULL; 13 + folio_batch_reinit(iter->fbatch); 14 + iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH; 15 15 } 16 16 17 17 iter->status = 0;
+61 -58
fs/locks.c
··· 369 369 while (!list_empty(dispose)) { 370 370 flc = list_first_entry(dispose, struct file_lock_core, flc_list); 371 371 list_del_init(&flc->flc_list); 372 - if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) 373 - locks_free_lease(file_lease(flc)); 374 - else 375 - locks_free_lock(file_lock(flc)); 372 + locks_free_lock(file_lock(flc)); 373 + } 374 + } 375 + 376 + static void 377 + lease_dispose_list(struct list_head *dispose) 378 + { 379 + struct file_lock_core *flc; 380 + 381 + while (!list_empty(dispose)) { 382 + flc = list_first_entry(dispose, struct file_lock_core, flc_list); 383 + list_del_init(&flc->flc_list); 384 + locks_free_lease(file_lease(flc)); 376 385 } 377 386 } 378 387 ··· 585 576 __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); 586 577 } 587 578 579 + /** 580 + * lease_open_conflict - see if the given file points to an inode that has 581 + * an existing open that would conflict with the 582 + * desired lease. 583 + * @filp: file to check 584 + * @arg: type of lease that we're trying to acquire 585 + * 586 + * Check to see if there's an existing open fd on this file that would 587 + * conflict with the lease we're trying to set. 588 + */ 589 + static int 590 + lease_open_conflict(struct file *filp, const int arg) 591 + { 592 + struct inode *inode = file_inode(filp); 593 + int self_wcount = 0, self_rcount = 0; 594 + 595 + if (arg == F_RDLCK) 596 + return inode_is_open_for_write(inode) ? -EAGAIN : 0; 597 + else if (arg != F_WRLCK) 598 + return 0; 599 + 600 + /* 601 + * Make sure that only read/write count is from lease requestor. 602 + * Note that this will result in denying write leases when i_writecount 603 + * is negative, which is what we want. (We shouldn't grant write leases 604 + * on files open for execution.) 605 + */ 606 + if (filp->f_mode & FMODE_WRITE) 607 + self_wcount = 1; 608 + else if (filp->f_mode & FMODE_READ) 609 + self_rcount = 1; 610 + 611 + if (atomic_read(&inode->i_writecount) != self_wcount || 612 + atomic_read(&inode->i_readcount) != self_rcount) 613 + return -EAGAIN; 614 + 615 + return 0; 616 + } 617 + 588 618 static const struct lease_manager_operations lease_manager_ops = { 589 619 .lm_break = lease_break_callback, 590 620 .lm_change = lease_modify, 591 621 .lm_setup = lease_setup, 622 + .lm_open_conflict = lease_open_conflict, 592 623 }; 593 624 594 625 /* ··· 1669 1620 spin_unlock(&ctx->flc_lock); 1670 1621 percpu_up_read(&file_rwsem); 1671 1622 1672 - locks_dispose_list(&dispose); 1623 + lease_dispose_list(&dispose); 1673 1624 error = wait_event_interruptible_timeout(new_fl->c.flc_wait, 1674 1625 list_empty(&new_fl->c.flc_blocked_member), 1675 1626 break_time); ··· 1692 1643 out: 1693 1644 spin_unlock(&ctx->flc_lock); 1694 1645 percpu_up_read(&file_rwsem); 1695 - locks_dispose_list(&dispose); 1646 + lease_dispose_list(&dispose); 1696 1647 free_lock: 1697 1648 locks_free_lease(new_fl); 1698 1649 return error; ··· 1776 1727 spin_unlock(&ctx->flc_lock); 1777 1728 percpu_up_read(&file_rwsem); 1778 1729 1779 - locks_dispose_list(&dispose); 1730 + lease_dispose_list(&dispose); 1780 1731 } 1781 1732 return type; 1782 1733 } ··· 1791 1742 if (deleg->d_flags != 0 || deleg->__pad != 0) 1792 1743 return -EINVAL; 1793 1744 deleg->d_type = __fcntl_getlease(filp, FL_DELEG); 1794 - return 0; 1795 - } 1796 - 1797 - /** 1798 - * check_conflicting_open - see if the given file points to an inode that has 1799 - * an existing open that would conflict with the 1800 - * desired lease. 1801 - * @filp: file to check 1802 - * @arg: type of lease that we're trying to acquire 1803 - * @flags: current lock flags 1804 - * 1805 - * Check to see if there's an existing open fd on this file that would 1806 - * conflict with the lease we're trying to set. 1807 - */ 1808 - static int 1809 - check_conflicting_open(struct file *filp, const int arg, int flags) 1810 - { 1811 - struct inode *inode = file_inode(filp); 1812 - int self_wcount = 0, self_rcount = 0; 1813 - 1814 - if (flags & FL_LAYOUT) 1815 - return 0; 1816 - if (flags & FL_DELEG) 1817 - /* We leave these checks to the caller */ 1818 - return 0; 1819 - 1820 - if (arg == F_RDLCK) 1821 - return inode_is_open_for_write(inode) ? -EAGAIN : 0; 1822 - else if (arg != F_WRLCK) 1823 - return 0; 1824 - 1825 - /* 1826 - * Make sure that only read/write count is from lease requestor. 1827 - * Note that this will result in denying write leases when i_writecount 1828 - * is negative, which is what we want. (We shouldn't grant write leases 1829 - * on files open for execution.) 1830 - */ 1831 - if (filp->f_mode & FMODE_WRITE) 1832 - self_wcount = 1; 1833 - else if (filp->f_mode & FMODE_READ) 1834 - self_rcount = 1; 1835 - 1836 - if (atomic_read(&inode->i_writecount) != self_wcount || 1837 - atomic_read(&inode->i_readcount) != self_rcount) 1838 - return -EAGAIN; 1839 - 1840 1745 return 0; 1841 1746 } 1842 1747 ··· 1830 1827 percpu_down_read(&file_rwsem); 1831 1828 spin_lock(&ctx->flc_lock); 1832 1829 time_out_leases(inode, &dispose); 1833 - error = check_conflicting_open(filp, arg, lease->c.flc_flags); 1830 + error = lease->fl_lmops->lm_open_conflict(filp, arg); 1834 1831 if (error) 1835 1832 goto out; 1836 1833 ··· 1887 1884 * precedes these checks. 1888 1885 */ 1889 1886 smp_mb(); 1890 - error = check_conflicting_open(filp, arg, lease->c.flc_flags); 1887 + error = lease->fl_lmops->lm_open_conflict(filp, arg); 1891 1888 if (error) { 1892 1889 locks_unlink_lock_ctx(&lease->c); 1893 1890 goto out; ··· 1899 1896 out: 1900 1897 spin_unlock(&ctx->flc_lock); 1901 1898 percpu_up_read(&file_rwsem); 1902 - locks_dispose_list(&dispose); 1899 + lease_dispose_list(&dispose); 1903 1900 if (is_deleg) 1904 1901 inode_unlock(inode); 1905 1902 if (!error && !my_fl) ··· 1935 1932 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); 1936 1933 spin_unlock(&ctx->flc_lock); 1937 1934 percpu_up_read(&file_rwsem); 1938 - locks_dispose_list(&dispose); 1935 + lease_dispose_list(&dispose); 1939 1936 return error; 1940 1937 } 1941 1938 ··· 2738 2735 spin_unlock(&ctx->flc_lock); 2739 2736 percpu_up_read(&file_rwsem); 2740 2737 2741 - locks_dispose_list(&dispose); 2738 + lease_dispose_list(&dispose); 2742 2739 } 2743 2740 2744 2741 /*
+15 -6
fs/namei.c
··· 830 830 static bool legitimize_links(struct nameidata *nd) 831 831 { 832 832 int i; 833 - if (unlikely(nd->flags & LOOKUP_CACHED)) { 834 - drop_links(nd); 835 - nd->depth = 0; 836 - return false; 837 - } 833 + 834 + VFS_BUG_ON(nd->flags & LOOKUP_CACHED); 835 + 838 836 for (i = 0; i < nd->depth; i++) { 839 837 struct saved *last = nd->stack + i; 840 838 if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { ··· 881 883 882 884 BUG_ON(!(nd->flags & LOOKUP_RCU)); 883 885 886 + if (unlikely(nd->flags & LOOKUP_CACHED)) { 887 + drop_links(nd); 888 + nd->depth = 0; 889 + goto out1; 890 + } 884 891 if (unlikely(nd->depth && !legitimize_links(nd))) 885 892 goto out1; 886 893 if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) ··· 921 918 int res; 922 919 BUG_ON(!(nd->flags & LOOKUP_RCU)); 923 920 921 + if (unlikely(nd->flags & LOOKUP_CACHED)) { 922 + drop_links(nd); 923 + nd->depth = 0; 924 + goto out2; 925 + } 924 926 if (unlikely(nd->depth && !legitimize_links(nd))) 925 927 goto out2; 926 928 res = __legitimize_mnt(nd->path.mnt, nd->m_seq); ··· 2844 2836 } 2845 2837 2846 2838 /** 2847 - * start_dirop - begin a create or remove dirop, performing locking and lookup 2839 + * __start_dirop - begin a create or remove dirop, performing locking and lookup 2848 2840 * @parent: the dentry of the parent in which the operation will occur 2849 2841 * @name: a qstr holding the name within that parent 2850 2842 * @lookup_flags: intent and other lookup flags. 2843 + * @state: task state bitmask 2851 2844 * 2852 2845 * The lookup is performed and necessary locks are taken so that, on success, 2853 2846 * the returned dentry can be operated on safely.
+1 -1
fs/netfs/read_collect.c
··· 137 137 rreq->front_folio_order = order; 138 138 fsize = PAGE_SIZE << order; 139 139 fpos = folio_pos(folio); 140 - fend = umin(fpos + fsize, rreq->i_size); 140 + fend = fpos + fsize; 141 141 142 142 trace_netfs_collect_folio(rreq, folio, fend, collected_to); 143 143
+21 -2
fs/nfsd/nfs4layouts.c
··· 764 764 return lease_modify(onlist, arg, dispose); 765 765 } 766 766 767 + /** 768 + * nfsd4_layout_lm_open_conflict - see if the given file points to an inode that has 769 + * an existing open that would conflict with the 770 + * desired lease. 771 + * @filp: file to check 772 + * @arg: type of lease that we're trying to acquire 773 + * 774 + * The kernel will call into this operation to determine whether there 775 + * are conflicting opens that may prevent the layout from being granted. 776 + * For nfsd, that check is done at a higher level, so this trivially 777 + * returns 0. 778 + */ 779 + static int 780 + nfsd4_layout_lm_open_conflict(struct file *filp, int arg) 781 + { 782 + return 0; 783 + } 784 + 767 785 static const struct lease_manager_operations nfsd4_layouts_lm_ops = { 768 - .lm_break = nfsd4_layout_lm_break, 769 - .lm_change = nfsd4_layout_lm_change, 786 + .lm_break = nfsd4_layout_lm_break, 787 + .lm_change = nfsd4_layout_lm_change, 788 + .lm_open_conflict = nfsd4_layout_lm_open_conflict, 770 789 }; 771 790 772 791 int
+19
fs/nfsd/nfs4state.c
··· 5555 5555 return -EAGAIN; 5556 5556 } 5557 5557 5558 + /** 5559 + * nfsd4_deleg_lm_open_conflict - see if the given file points to an inode that has 5560 + * an existing open that would conflict with the 5561 + * desired lease. 5562 + * @filp: file to check 5563 + * @arg: type of lease that we're trying to acquire 5564 + * 5565 + * The kernel will call into this operation to determine whether there 5566 + * are conflicting opens that may prevent the deleg from being granted. 5567 + * For nfsd, that check is done at a higher level, so this trivially 5568 + * returns 0. 5569 + */ 5570 + static int 5571 + nfsd4_deleg_lm_open_conflict(struct file *filp, int arg) 5572 + { 5573 + return 0; 5574 + } 5575 + 5558 5576 static const struct lease_manager_operations nfsd_lease_mng_ops = { 5559 5577 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 5560 5578 .lm_break = nfsd_break_deleg_cb, 5561 5579 .lm_change = nfsd_change_deleg_cb, 5580 + .lm_open_conflict = nfsd4_deleg_lm_open_conflict, 5562 5581 }; 5563 5582 5564 5583 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
+18
fs/pidfs.c
··· 517 517 switch (cmd) { 518 518 /* Namespaces that hang of nsproxy. */ 519 519 case PIDFD_GET_CGROUP_NAMESPACE: 520 + #ifdef CONFIG_CGROUPS 520 521 if (!ns_ref_get(nsp->cgroup_ns)) 521 522 break; 522 523 ns_common = to_ns_common(nsp->cgroup_ns); 524 + #endif 523 525 break; 524 526 case PIDFD_GET_IPC_NAMESPACE: 527 + #ifdef CONFIG_IPC_NS 525 528 if (!ns_ref_get(nsp->ipc_ns)) 526 529 break; 527 530 ns_common = to_ns_common(nsp->ipc_ns); 531 + #endif 528 532 break; 529 533 case PIDFD_GET_MNT_NAMESPACE: 530 534 if (!ns_ref_get(nsp->mnt_ns)) ··· 536 532 ns_common = to_ns_common(nsp->mnt_ns); 537 533 break; 538 534 case PIDFD_GET_NET_NAMESPACE: 535 + #ifdef CONFIG_NET_NS 539 536 if (!ns_ref_get(nsp->net_ns)) 540 537 break; 541 538 ns_common = to_ns_common(nsp->net_ns); 539 + #endif 542 540 break; 543 541 case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE: 542 + #ifdef CONFIG_PID_NS 544 543 if (!ns_ref_get(nsp->pid_ns_for_children)) 545 544 break; 546 545 ns_common = to_ns_common(nsp->pid_ns_for_children); 546 + #endif 547 547 break; 548 548 case PIDFD_GET_TIME_NAMESPACE: 549 + #ifdef CONFIG_TIME_NS 549 550 if (!ns_ref_get(nsp->time_ns)) 550 551 break; 551 552 ns_common = to_ns_common(nsp->time_ns); 553 + #endif 552 554 break; 553 555 case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE: 556 + #ifdef CONFIG_TIME_NS 554 557 if (!ns_ref_get(nsp->time_ns_for_children)) 555 558 break; 556 559 ns_common = to_ns_common(nsp->time_ns_for_children); 560 + #endif 557 561 break; 558 562 case PIDFD_GET_UTS_NAMESPACE: 563 + #ifdef CONFIG_UTS_NS 559 564 if (!ns_ref_get(nsp->uts_ns)) 560 565 break; 561 566 ns_common = to_ns_common(nsp->uts_ns); 567 + #endif 562 568 break; 563 569 /* Namespaces that don't hang of nsproxy. */ 564 570 case PIDFD_GET_USER_NAMESPACE: 571 + #ifdef CONFIG_USER_NS 565 572 scoped_guard(rcu) { 566 573 struct user_namespace *user_ns; 567 574 ··· 581 566 break; 582 567 ns_common = to_ns_common(user_ns); 583 568 } 569 + #endif 584 570 break; 585 571 case PIDFD_GET_PID_NAMESPACE: 572 + #ifdef CONFIG_PID_NS 586 573 scoped_guard(rcu) { 587 574 struct pid_namespace *pid_ns; 588 575 ··· 593 576 break; 594 577 ns_common = to_ns_common(pid_ns); 595 578 } 579 + #endif 596 580 break; 597 581 default: 598 582 return -ENOIOCTLCMD;
+6 -5
fs/xfs/xfs_iomap.c
··· 1831 1831 */ 1832 1832 if (flags & IOMAP_ZERO) { 1833 1833 xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 1834 - u64 end; 1835 1834 1836 1835 if (isnullstartblock(imap.br_startblock) && 1837 1836 offset_fsb >= eof_fsb) ··· 1850 1851 */ 1851 1852 if (imap.br_state == XFS_EXT_UNWRITTEN && 1852 1853 offset_fsb < eof_fsb) { 1853 - loff_t len = min(count, 1854 - XFS_FSB_TO_B(mp, imap.br_blockcount)); 1854 + loff_t foffset = offset, fend; 1855 1855 1856 - end = iomap_fill_dirty_folios(iter, offset, len); 1856 + fend = offset + 1857 + min(count, XFS_FSB_TO_B(mp, imap.br_blockcount)); 1858 + iomap_fill_dirty_folios(iter, &foffset, fend, 1859 + &iomap_flags); 1857 1860 end_fsb = min_t(xfs_fileoff_t, end_fsb, 1858 - XFS_B_TO_FSB(mp, end)); 1861 + XFS_B_TO_FSB(mp, foffset)); 1859 1862 } 1860 1863 1861 1864 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
+1
include/linux/filelock.h
··· 49 49 int (*lm_change)(struct file_lease *, int, struct list_head *); 50 50 void (*lm_setup)(struct file_lease *, void **); 51 51 bool (*lm_breaker_owns_lease)(struct file_lease *); 52 + int (*lm_open_conflict)(struct file *, int); 52 53 }; 53 54 54 55 struct lock_manager {
+6 -2
include/linux/iomap.h
··· 88 88 /* 89 89 * Flags set by the core iomap code during operations: 90 90 * 91 + * IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active 92 + * for this operation, set by iomap_fill_dirty_folios(). 93 + * 91 94 * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size 92 95 * has changed as the result of this write operation. 93 96 * ··· 98 95 * range it covers needs to be remapped by the high level before the operation 99 96 * can proceed. 100 97 */ 98 + #define IOMAP_F_FOLIO_BATCH (1U << 13) 101 99 #define IOMAP_F_SIZE_CHANGED (1U << 14) 102 100 #define IOMAP_F_STALE (1U << 15) 103 101 ··· 356 352 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, 357 353 const struct iomap_ops *ops, 358 354 const struct iomap_write_ops *write_ops); 359 - loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset, 360 - loff_t length); 355 + unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start, 356 + loff_t end, unsigned int *iomap_flags); 361 357 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, 362 358 bool *did_zero, const struct iomap_ops *ops, 363 359 const struct iomap_write_ops *write_ops, void *private);
+1 -1
include/uapi/linux/xattr.h
··· 23 23 #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ 24 24 25 25 struct xattr_args { 26 - __aligned_u64 __user value; 26 + __aligned_u64 value; 27 27 __u32 size; 28 28 __u32 flags; 29 29 };