Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ceph: clean up inode work queueing

Add a generic function for taking an inode reference, setting the I_WORK
bit and queueing i_work. Turn the ceph_queue_* functions into static
inline wrappers that pass in the right bit.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>

authored by

Jeff Layton and committed by
Ilya Dryomov
64f28c62 64f36da5

+24 -52
+6 -49
fs/ceph/inode.c
··· 1816 1816 } 1817 1817 } 1818 1818 1819 - /* 1820 - * Write back inode data in a worker thread. (This can't be done 1821 - * in the message handler context.) 1822 - */ 1823 - void ceph_queue_writeback(struct inode *inode) 1819 + void ceph_queue_inode_work(struct inode *inode, int work_bit) 1824 1820 { 1821 + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1825 1822 struct ceph_inode_info *ci = ceph_inode(inode); 1826 - set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask); 1823 + set_bit(work_bit, &ci->i_work_mask); 1827 1824 1828 1825 ihold(inode); 1829 - if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1830 - &ci->i_work)) { 1831 - dout("ceph_queue_writeback %p\n", inode); 1826 + if (queue_work(fsc->inode_wq, &ci->i_work)) { 1827 + dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask); 1832 1828 } else { 1833 - dout("ceph_queue_writeback %p already queued, mask=%lx\n", 1834 - inode, ci->i_work_mask); 1835 - iput(inode); 1836 - } 1837 - } 1838 - 1839 - /* 1840 - * queue an async invalidation 1841 - */ 1842 - void ceph_queue_invalidate(struct inode *inode) 1843 - { 1844 - struct ceph_inode_info *ci = ceph_inode(inode); 1845 - set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask); 1846 - 1847 - ihold(inode); 1848 - if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1849 - &ceph_inode(inode)->i_work)) { 1850 - dout("ceph_queue_invalidate %p\n", inode); 1851 - } else { 1852 - dout("ceph_queue_invalidate %p already queued, mask=%lx\n", 1853 - inode, ci->i_work_mask); 1854 - iput(inode); 1855 - } 1856 - } 1857 - 1858 - /* 1859 - * Queue an async vmtruncate. If we fail to queue work, we will handle 1860 - * the truncation the next time we call __ceph_do_pending_vmtruncate. 1861 - */ 1862 - void ceph_queue_vmtruncate(struct inode *inode) 1863 - { 1864 - struct ceph_inode_info *ci = ceph_inode(inode); 1865 - set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask); 1866 - 1867 - ihold(inode); 1868 - if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1869 - &ci->i_work)) { 1870 - dout("ceph_queue_vmtruncate %p\n", inode); 1871 - } else { 1872 - dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n", 1829 + dout("queue_inode_work %p already queued, mask=%lx\n", 1873 1830 inode, ci->i_work_mask); 1874 1831 iput(inode); 1875 1832 }
+18 -3
fs/ceph/super.h
··· 962 962 963 963 extern bool ceph_inode_set_size(struct inode *inode, loff_t size); 964 964 extern void __ceph_do_pending_vmtruncate(struct inode *inode); 965 - extern void ceph_queue_vmtruncate(struct inode *inode); 966 - extern void ceph_queue_invalidate(struct inode *inode); 967 - extern void ceph_queue_writeback(struct inode *inode); 965 + 968 966 extern void ceph_async_iput(struct inode *inode); 967 + 968 + void ceph_queue_inode_work(struct inode *inode, int work_bit); 969 + 970 + static inline void ceph_queue_vmtruncate(struct inode *inode) 971 + { 972 + ceph_queue_inode_work(inode, CEPH_I_WORK_VMTRUNCATE); 973 + } 974 + 975 + static inline void ceph_queue_invalidate(struct inode *inode) 976 + { 977 + ceph_queue_inode_work(inode, CEPH_I_WORK_INVALIDATE_PAGES); 978 + } 979 + 980 + static inline void ceph_queue_writeback(struct inode *inode) 981 + { 982 + ceph_queue_inode_work(inode, CEPH_I_WORK_WRITEBACK); 983 + } 969 984 970 985 extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 971 986 int mask, bool force);