Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull more vfs updates from Al Viro:
"Assorted stuff from this cycle. The big ones here are multilayer
overlayfs from Miklos and beginning of sorting ->d_inode accesses out
from David"

* 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (51 commits)
autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for allocation
procfs: fix race between symlink removals and traversals
debugfs: leave freeing a symlink body until inode eviction
Documentation/filesystems/Locking: ->get_sb() is long gone
trylock_super(): replacement for grab_super_passive()
fanotify: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions
Cachefiles: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions
VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry)
SELinux: Use d_is_positive() rather than testing dentry->d_inode
Smack: Use d_is_positive() rather than testing dentry->d_inode
TOMOYO: Use d_is_dir() rather than d_inode and S_ISDIR()
Apparmor: Use d_is_positive/negative() rather than testing dentry->d_inode
Apparmor: mediated_filesystem() should use dentry->d_sb not inode->i_sb
VFS: Split DCACHE_FILE_TYPE into regular and special types
VFS: Add a fallthrough flag for marking virtual dentries
VFS: Add a whiteout dentry type
VFS: Introduce inode-getting helpers for layered/unioned fs environments
Infiniband: Fix potential NULL d_inode dereference
posix_acl: fix reference leaks in posix_acl_create
autofs4: Wrong format for printing dentry
...

+927 -778
-2
Documentation/filesystems/Locking
··· 164 164 165 165 --------------------------- file_system_type --------------------------- 166 166 prototypes: 167 - int (*get_sb) (struct file_system_type *, int, 168 - const char *, void *, struct vfsmount *); 169 167 struct dentry *(*mount) (struct file_system_type *, int, 170 168 const char *, void *); 171 169 void (*kill_sb) (struct super_block *);
+28
Documentation/filesystems/overlayfs.txt
··· 159 159 rename or unlink will of course be noticed and handled). 160 160 161 161 162 + Multiple lower layers 163 + --------------------- 164 + 165 + Multiple lower layers can now be given using the the colon (":") as a 166 + separator character between the directory names. For example: 167 + 168 + mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged 169 + 170 + As the example shows, "upperdir=" and "workdir=" may be omitted. In 171 + that case the overlay will be read-only. 172 + 173 + The specified lower directories will be stacked beginning from the 174 + rightmost one and going left. In the above example lower1 will be the 175 + top, lower2 the middle and lower3 the bottom layer. 176 + 177 + 162 178 Non-standard behavior 163 179 --------------------- 164 180 ··· 212 196 filesystem are not allowed. If the underlying filesystem is changed, 213 197 the behavior of the overlay is undefined, though it will not result in 214 198 a crash or deadlock. 199 + 200 + Testsuite 201 + --------- 202 + 203 + There's testsuite developed by David Howells at: 204 + 205 + git://git.infradead.org/users/dhowells/unionmount-testsuite.git 206 + 207 + Run as root: 208 + 209 + # cd unionmount-testsuite 210 + # ./run --ov
+24 -27
arch/s390/hypfs/inode.c
··· 74 74 parent = dentry->d_parent; 75 75 mutex_lock(&parent->d_inode->i_mutex); 76 76 if (hypfs_positive(dentry)) { 77 - if (S_ISDIR(dentry->d_inode->i_mode)) 77 + if (d_is_dir(dentry)) 78 78 simple_rmdir(parent->d_inode, dentry); 79 79 else 80 80 simple_unlink(parent->d_inode, dentry); ··· 144 144 return nonseekable_open(inode, filp); 145 145 } 146 146 147 - static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, 148 - unsigned long nr_segs, loff_t offset) 147 + static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 149 148 { 150 - char *data; 151 - ssize_t ret; 152 - struct file *filp = iocb->ki_filp; 153 - /* XXX: temporary */ 154 - char __user *buf = iov[0].iov_base; 155 - size_t count = iov[0].iov_len; 149 + struct file *file = iocb->ki_filp; 150 + char *data = file->private_data; 151 + size_t available = strlen(data); 152 + loff_t pos = iocb->ki_pos; 153 + size_t count; 156 154 157 - if (nr_segs != 1) 155 + if (pos < 0) 158 156 return -EINVAL; 159 - 160 - data = filp->private_data; 161 - ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data)); 162 - if (ret <= 0) 163 - return ret; 164 - 165 - iocb->ki_pos += ret; 166 - file_accessed(filp); 167 - 168 - return ret; 157 + if (pos >= available || !iov_iter_count(to)) 158 + return 0; 159 + count = copy_to_iter(data + pos, available - pos, to); 160 + if (!count) 161 + return -EFAULT; 162 + iocb->ki_pos = pos + count; 163 + file_accessed(file); 164 + return count; 169 165 } 170 - static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, 171 - unsigned long nr_segs, loff_t offset) 166 + 167 + static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from) 172 168 { 173 169 int rc; 174 170 struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; 175 171 struct hypfs_sb_info *fs_info = sb->s_fs_info; 176 - size_t count = iov_length(iov, nr_segs); 172 + size_t count = iov_iter_count(from); 177 173 178 174 /* 179 175 * Currently we only allow one update per second for two reasons: ··· 198 202 } 199 203 hypfs_update_update(sb); 200 204 rc = count; 205 + iov_iter_advance(from, count); 201 206 out: 202 207 mutex_unlock(&fs_info->lock); 203 208 return rc; ··· 437 440 static const struct file_operations hypfs_file_ops = { 438 441 .open = hypfs_open, 439 442 .release = hypfs_release, 440 - .read = do_sync_read, 441 - .write = do_sync_write, 442 - .aio_read = hypfs_aio_read, 443 - .aio_write = hypfs_aio_write, 443 + .read = new_sync_read, 444 + .write = new_sync_write, 445 + .read_iter = hypfs_read_iter, 446 + .write_iter = hypfs_write_iter, 444 447 .llseek = no_llseek, 445 448 }; 446 449
+1 -1
drivers/infiniband/hw/ipath/ipath_fs.c
··· 277 277 } 278 278 279 279 spin_lock(&tmp->d_lock); 280 - if (!(d_unhashed(tmp) && tmp->d_inode)) { 280 + if (!d_unhashed(tmp) && tmp->d_inode) { 281 281 dget_dlock(tmp); 282 282 __d_drop(tmp); 283 283 spin_unlock(&tmp->d_lock);
+1 -1
drivers/infiniband/hw/qib/qib_fs.c
··· 455 455 } 456 456 457 457 spin_lock(&tmp->d_lock); 458 - if (!(d_unhashed(tmp) && tmp->d_inode)) { 458 + if (!d_unhashed(tmp) && tmp->d_inode) { 459 459 __d_drop(tmp); 460 460 spin_unlock(&tmp->d_lock); 461 461 simple_unlink(parent->d_inode, tmp);
+5 -7
drivers/staging/lustre/lustre/llite/dcache.c
··· 270 270 271 271 int ll_revalidate_it_finish(struct ptlrpc_request *request, 272 272 struct lookup_intent *it, 273 - struct dentry *de) 273 + struct inode *inode) 274 274 { 275 275 int rc = 0; 276 276 ··· 280 280 if (it_disposition(it, DISP_LOOKUP_NEG)) 281 281 return -ENOENT; 282 282 283 - rc = ll_prep_inode(&de->d_inode, request, NULL, it); 283 + rc = ll_prep_inode(&inode, request, NULL, it); 284 284 285 285 return rc; 286 286 } 287 287 288 - void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry) 288 + void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) 289 289 { 290 290 LASSERT(it != NULL); 291 - LASSERT(dentry != NULL); 292 291 293 - if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) { 294 - struct inode *inode = dentry->d_inode; 295 - struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); 292 + if (it->d.lustre.it_lock_mode && inode != NULL) { 293 + struct ll_sb_info *sbi = ll_i2sbi(inode); 296 294 297 295 CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", 298 296 inode, inode->i_ino, inode->i_generation);
+4 -4
drivers/staging/lustre/lustre/llite/file.c
··· 2912 2912 oit.it_op = IT_LOOKUP; 2913 2913 2914 2914 /* Call getattr by fid, so do not provide name at all. */ 2915 - op_data = ll_prep_md_op_data(NULL, dentry->d_inode, 2916 - dentry->d_inode, NULL, 0, 0, 2915 + op_data = ll_prep_md_op_data(NULL, inode, 2916 + inode, NULL, 0, 0, 2917 2917 LUSTRE_OPC_ANY, NULL); 2918 2918 if (IS_ERR(op_data)) 2919 2919 return PTR_ERR(op_data); ··· 2931 2931 goto out; 2932 2932 } 2933 2933 2934 - rc = ll_revalidate_it_finish(req, &oit, dentry); 2934 + rc = ll_revalidate_it_finish(req, &oit, inode); 2935 2935 if (rc != 0) { 2936 2936 ll_intent_release(&oit); 2937 2937 goto out; ··· 2944 2944 if (!dentry->d_inode->i_nlink) 2945 2945 d_lustre_invalidate(dentry, 0); 2946 2946 2947 - ll_lookup_finish_locks(&oit, dentry); 2947 + ll_lookup_finish_locks(&oit, inode); 2948 2948 } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) { 2949 2949 struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); 2950 2950 u64 valid = OBD_MD_FLGETATTR;
+2 -2
drivers/staging/lustre/lustre/llite/llite_internal.h
··· 786 786 void ll_intent_drop_lock(struct lookup_intent *); 787 787 void ll_intent_release(struct lookup_intent *); 788 788 void ll_invalidate_aliases(struct inode *); 789 - void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); 789 + void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode); 790 790 int ll_revalidate_it_finish(struct ptlrpc_request *request, 791 - struct lookup_intent *it, struct dentry *de); 791 + struct lookup_intent *it, struct inode *inode); 792 792 793 793 /* llite/llite_lib.c */ 794 794 extern struct super_operations lustre_super_operations;
+7 -5
drivers/staging/lustre/lustre/llite/namei.c
··· 481 481 struct lookup_intent lookup_it = { .it_op = IT_LOOKUP }; 482 482 struct dentry *save = dentry, *retval; 483 483 struct ptlrpc_request *req = NULL; 484 + struct inode *inode; 484 485 struct md_op_data *op_data; 485 486 __u32 opc; 486 487 int rc; ··· 540 539 goto out; 541 540 } 542 541 543 - if ((it->it_op & IT_OPEN) && dentry->d_inode && 544 - !S_ISREG(dentry->d_inode->i_mode) && 545 - !S_ISDIR(dentry->d_inode->i_mode)) { 546 - ll_release_openhandle(dentry->d_inode, it); 542 + inode = dentry->d_inode; 543 + if ((it->it_op & IT_OPEN) && inode && 544 + !S_ISREG(inode->i_mode) && 545 + !S_ISDIR(inode->i_mode)) { 546 + ll_release_openhandle(inode, it); 547 547 } 548 - ll_lookup_finish_locks(it, dentry); 548 + ll_lookup_finish_locks(it, inode); 549 549 550 550 if (dentry == save) 551 551 retval = NULL;
+1 -1
fs/9p/vfs_inode.c
··· 1127 1127 } 1128 1128 1129 1129 /* Write all dirty data */ 1130 - if (S_ISREG(dentry->d_inode->i_mode)) 1130 + if (d_is_reg(dentry)) 1131 1131 filemap_write_and_wait(dentry->d_inode->i_mapping); 1132 1132 1133 1133 retval = p9_client_wstat(fid, &wstat);
+3 -3
fs/aio.c
··· 1285 1285 1286 1286 ret = -EINVAL; 1287 1287 if (unlikely(ctx || nr_events == 0)) { 1288 - pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", 1288 + pr_debug("EINVAL: ctx %lu nr_events %u\n", 1289 1289 ctx, nr_events); 1290 1290 goto out; 1291 1291 } ··· 1333 1333 1334 1334 return ret; 1335 1335 } 1336 - pr_debug("EINVAL: io_destroy: invalid context id\n"); 1336 + pr_debug("EINVAL: invalid context id\n"); 1337 1337 return -EINVAL; 1338 1338 } 1339 1339 ··· 1515 1515 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1516 1516 ((ssize_t)iocb->aio_nbytes < 0) 1517 1517 )) { 1518 - pr_debug("EINVAL: io_submit: overflow check\n"); 1518 + pr_debug("EINVAL: overflow check\n"); 1519 1519 return -EINVAL; 1520 1520 } 1521 1521
+6 -2
fs/autofs4/dev-ioctl.c
··· 95 95 */ 96 96 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) 97 97 { 98 - struct autofs_dev_ioctl tmp; 98 + struct autofs_dev_ioctl tmp, *res; 99 99 100 100 if (copy_from_user(&tmp, in, sizeof(tmp))) 101 101 return ERR_PTR(-EFAULT); ··· 106 106 if (tmp.size > (PATH_MAX + sizeof(tmp))) 107 107 return ERR_PTR(-ENAMETOOLONG); 108 108 109 - return memdup_user(in, tmp.size); 109 + res = memdup_user(in, tmp.size); 110 + if (!IS_ERR(res)) 111 + res->size = tmp.size; 112 + 113 + return res; 110 114 } 111 115 112 116 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
+1 -1
fs/autofs4/expire.c
··· 374 374 return NULL; 375 375 } 376 376 377 - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 377 + if (dentry->d_inode && d_is_symlink(dentry)) { 378 378 DPRINTK("checking symlink %p %pd", dentry, dentry); 379 379 /* 380 380 * A symlink can't be "busy" in the usual sense so
+3 -3
fs/autofs4/root.c
··· 108 108 struct dentry *dentry = file->f_path.dentry; 109 109 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 110 110 111 - DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry); 111 + DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry); 112 112 113 113 if (autofs4_oz_mode(sbi)) 114 114 goto out; ··· 371 371 * having d_mountpoint() true, so there's no need to call back 372 372 * to the daemon. 373 373 */ 374 - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { 374 + if (dentry->d_inode && d_is_symlink(dentry)) { 375 375 spin_unlock(&sbi->fs_lock); 376 376 goto done; 377 377 } ··· 485 485 * an incorrect ELOOP error return. 486 486 */ 487 487 if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || 488 - (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) 488 + (dentry->d_inode && d_is_symlink(dentry))) 489 489 status = -EISDIR; 490 490 } 491 491 spin_unlock(&sbi->fs_lock);
-147
fs/bad_inode.c
··· 15 15 #include <linux/namei.h> 16 16 #include <linux/poll.h> 17 17 18 - 19 - static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence) 20 - { 21 - return -EIO; 22 - } 23 - 24 - static ssize_t bad_file_read(struct file *filp, char __user *buf, 25 - size_t size, loff_t *ppos) 26 - { 27 - return -EIO; 28 - } 29 - 30 - static ssize_t bad_file_write(struct file *filp, const char __user *buf, 31 - size_t siz, loff_t *ppos) 32 - { 33 - return -EIO; 34 - } 35 - 36 - static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov, 37 - unsigned long nr_segs, loff_t pos) 38 - { 39 - return -EIO; 40 - } 41 - 42 - static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, 43 - unsigned long nr_segs, loff_t pos) 44 - { 45 - return -EIO; 46 - } 47 - 48 - static int bad_file_readdir(struct file *file, struct dir_context *ctx) 49 - { 50 - return -EIO; 51 - } 52 - 53 - static unsigned int bad_file_poll(struct file *filp, poll_table *wait) 54 - { 55 - return POLLERR; 56 - } 57 - 58 - static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd, 59 - unsigned long arg) 60 - { 61 - return -EIO; 62 - } 63 - 64 - static long bad_file_compat_ioctl(struct file *file, unsigned int cmd, 65 - unsigned long arg) 66 - { 67 - return -EIO; 68 - } 69 - 70 - static int bad_file_mmap(struct file *file, struct vm_area_struct *vma) 71 - { 72 - return -EIO; 73 - } 74 - 75 18 static int bad_file_open(struct inode *inode, struct file *filp) 76 - { 77 - return -EIO; 78 - } 79 - 80 - static int bad_file_flush(struct file *file, fl_owner_t id) 81 - { 82 - return -EIO; 83 - } 84 - 85 - static int bad_file_release(struct inode *inode, struct file *filp) 86 - { 87 - return -EIO; 88 - } 89 - 90 - static int bad_file_fsync(struct file *file, loff_t start, loff_t end, 91 - int datasync) 92 - { 93 - return -EIO; 94 - } 95 - 96 - static int bad_file_aio_fsync(struct kiocb *iocb, int datasync) 97 - { 98 - return -EIO; 99 - } 100 - 101 - static int bad_file_fasync(int fd, struct file *filp, int on) 102 - { 103 - return -EIO; 104 - } 105 - 106 - static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl) 107 - { 108 - return -EIO; 109 - } 110 - 111 - static ssize_t bad_file_sendpage(struct file *file, struct page *page, 112 - int off, size_t len, loff_t *pos, int more) 113 - { 114 - return -EIO; 115 - } 116 - 117 - static unsigned long bad_file_get_unmapped_area(struct file *file, 118 - unsigned long addr, unsigned long len, 119 - unsigned long pgoff, unsigned long flags) 120 - { 121 - return -EIO; 122 - } 123 - 124 - static int bad_file_check_flags(int flags) 125 - { 126 - return -EIO; 127 - } 128 - 129 - static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) 130 - { 131 - return -EIO; 132 - } 133 - 134 - static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe, 135 - struct file *out, loff_t *ppos, size_t len, 136 - unsigned int flags) 137 - { 138 - return -EIO; 139 - } 140 - 141 - static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos, 142 - struct pipe_inode_info *pipe, size_t len, 143 - unsigned int flags) 144 19 { 145 20 return -EIO; 146 21 } 147 22 148 23 static const struct file_operations bad_file_ops = 149 24 { 150 - .llseek = bad_file_llseek, 151 - .read = bad_file_read, 152 - .write = bad_file_write, 153 - .aio_read = bad_file_aio_read, 154 - .aio_write = bad_file_aio_write, 155 - .iterate = bad_file_readdir, 156 - .poll = bad_file_poll, 157 - .unlocked_ioctl = bad_file_unlocked_ioctl, 158 - .compat_ioctl = bad_file_compat_ioctl, 159 - .mmap = bad_file_mmap, 160 25 .open = bad_file_open, 161 - .flush = bad_file_flush, 162 - .release = bad_file_release, 163 - .fsync = bad_file_fsync, 164 - .aio_fsync = bad_file_aio_fsync, 165 - .fasync = bad_file_fasync, 166 - .lock = bad_file_lock, 167 - .sendpage = bad_file_sendpage, 168 - .get_unmapped_area = bad_file_get_unmapped_area, 169 - .check_flags = bad_file_check_flags, 170 - .flock = bad_file_flock, 171 - .splice_write = bad_file_splice_write, 172 - .splice_read = bad_file_splice_read, 173 26 }; 174 27 175 28 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
+2 -2
fs/btrfs/ioctl.c
··· 776 776 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 777 777 return -EPERM; 778 778 if (isdir) { 779 - if (!S_ISDIR(victim->d_inode->i_mode)) 779 + if (!d_is_dir(victim)) 780 780 return -ENOTDIR; 781 781 if (IS_ROOT(victim)) 782 782 return -EBUSY; 783 - } else if (S_ISDIR(victim->d_inode->i_mode)) 783 + } else if (d_is_dir(victim)) 784 784 return -EISDIR; 785 785 if (IS_DEADDIR(dir)) 786 786 return -ENOENT;
+2 -2
fs/cachefiles/daemon.c
··· 574 574 /* extract the directory dentry from the cwd */ 575 575 get_fs_pwd(current->fs, &path); 576 576 577 - if (!S_ISDIR(path.dentry->d_inode->i_mode)) 577 + if (!d_can_lookup(path.dentry)) 578 578 goto notdir; 579 579 580 580 cachefiles_begin_secure(cache, &saved_cred); ··· 646 646 /* extract the directory dentry from the cwd */ 647 647 get_fs_pwd(current->fs, &path); 648 648 649 - if (!S_ISDIR(path.dentry->d_inode->i_mode)) 649 + if (!d_can_lookup(path.dentry)) 650 650 goto notdir; 651 651 652 652 cachefiles_begin_secure(cache, &saved_cred);
+2 -2
fs/cachefiles/interface.c
··· 437 437 if (!object->backer) 438 438 return -ENOBUFS; 439 439 440 - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 440 + ASSERT(d_is_reg(object->backer)); 441 441 442 442 fscache_set_store_limit(&object->fscache, ni_size); 443 443 ··· 501 501 op->object->debug_id, (unsigned long long)ni_size); 502 502 503 503 if (object->backer) { 504 - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 504 + ASSERT(d_is_reg(object->backer)); 505 505 506 506 fscache_set_store_limit(&object->fscache, ni_size); 507 507
+8 -8
fs/cachefiles/namei.c
··· 277 277 _debug("remove %p from %p", rep, dir); 278 278 279 279 /* non-directories can just be unlinked */ 280 - if (!S_ISDIR(rep->d_inode->i_mode)) { 280 + if (!d_is_dir(rep)) { 281 281 _debug("unlink stale object"); 282 282 283 283 path.mnt = cache->mnt; ··· 323 323 return 0; 324 324 } 325 325 326 - if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { 326 + if (!d_can_lookup(cache->graveyard)) { 327 327 unlock_rename(cache->graveyard, dir); 328 328 cachefiles_io_error(cache, "Graveyard no longer a directory"); 329 329 return -EIO; ··· 475 475 ASSERT(parent->dentry); 476 476 ASSERT(parent->dentry->d_inode); 477 477 478 - if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { 478 + if (!(d_is_dir(parent->dentry))) { 479 479 // TODO: convert file to dir 480 480 _leave("looking up in none directory"); 481 481 return -ENOBUFS; ··· 539 539 _debug("mkdir -> %p{%p{ino=%lu}}", 540 540 next, next->d_inode, next->d_inode->i_ino); 541 541 542 - } else if (!S_ISDIR(next->d_inode->i_mode)) { 542 + } else if (!d_can_lookup(next)) { 543 543 pr_err("inode %lu is not a directory\n", 544 544 next->d_inode->i_ino); 545 545 ret = -ENOBUFS; ··· 568 568 _debug("create -> %p{%p{ino=%lu}}", 569 569 next, next->d_inode, next->d_inode->i_ino); 570 570 571 - } else if (!S_ISDIR(next->d_inode->i_mode) && 572 - !S_ISREG(next->d_inode->i_mode) 571 + } else if (!d_can_lookup(next) && 572 + !d_is_reg(next) 573 573 ) { 574 574 pr_err("inode %lu is not a file or directory\n", 575 575 next->d_inode->i_ino); ··· 642 642 643 643 /* open a file interface onto a data file */ 644 644 if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { 645 - if (S_ISREG(object->dentry->d_inode->i_mode)) { 645 + if (d_is_reg(object->dentry)) { 646 646 const struct address_space_operations *aops; 647 647 648 648 ret = -EPERM; ··· 763 763 /* we need to make sure the subdir is a directory */ 764 764 ASSERT(subdir->d_inode); 765 765 766 - if (!S_ISDIR(subdir->d_inode->i_mode)) { 766 + if (!d_can_lookup(subdir)) { 767 767 pr_err("%s is not a directory\n", dirname); 768 768 ret = -EIO; 769 769 goto check_error;
+1 -1
fs/cachefiles/rdwr.c
··· 900 900 return -ENOBUFS; 901 901 } 902 902 903 - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); 903 + ASSERT(d_is_reg(object->backer)); 904 904 905 905 cache = container_of(object->fscache.cache, 906 906 struct cachefiles_cache, cache);
+1 -1
fs/ceph/dir.c
··· 904 904 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 905 905 dout("unlink/rmdir dir %p dn %p inode %p\n", 906 906 dir, dentry, inode); 907 - op = S_ISDIR(dentry->d_inode->i_mode) ? 907 + op = d_is_dir(dentry) ? 908 908 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 909 909 } else 910 910 goto out;
+1 -1
fs/ceph/file.c
··· 292 292 } 293 293 if (err) 294 294 goto out_req; 295 - if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { 295 + if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) { 296 296 /* make vfs retry on splice, ENOENT, or symlink */ 297 297 dout("atomic_open finish_no_open on dn %p\n", dn); 298 298 err = finish_no_open(file, dn);
+1 -1
fs/coda/dir.c
··· 304 304 (const char *) old_name, (const char *)new_name); 305 305 if (!error) { 306 306 if (new_dentry->d_inode) { 307 - if (S_ISDIR(new_dentry->d_inode->i_mode)) { 307 + if (d_is_dir(new_dentry)) { 308 308 coda_dir_drop_nlink(old_dir); 309 309 coda_dir_inc_nlink(new_dir); 310 310 }
+1 -2
fs/configfs/configfs_internal.h
··· 69 69 extern int configfs_is_root(struct config_item *item); 70 70 71 71 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); 72 - extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); 72 + extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *)); 73 73 74 74 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 75 75 extern int configfs_make_dirent(struct configfs_dirent *, 76 76 struct dentry *, void *, umode_t, int); 77 77 extern int configfs_dirent_is_ready(struct configfs_dirent *); 78 78 79 - extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int); 80 79 extern void configfs_hash_and_remove(struct dentry * dir, const char * name); 81 80 82 81 extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
+32 -40
fs/configfs/dir.c
··· 240 240 return 0; 241 241 } 242 242 243 - static int init_dir(struct inode * inode) 243 + static void init_dir(struct inode * inode) 244 244 { 245 245 inode->i_op = &configfs_dir_inode_operations; 246 246 inode->i_fop = &configfs_dir_operations; 247 247 248 248 /* directory inodes start off with i_nlink == 2 (for "." entry) */ 249 249 inc_nlink(inode); 250 - return 0; 251 250 } 252 251 253 - static int configfs_init_file(struct inode * inode) 252 + static void configfs_init_file(struct inode * inode) 254 253 { 255 254 inode->i_size = PAGE_SIZE; 256 255 inode->i_fop = &configfs_file_operations; 257 - return 0; 258 256 } 259 257 260 - static int init_symlink(struct inode * inode) 258 + static void init_symlink(struct inode * inode) 261 259 { 262 260 inode->i_op = &configfs_symlink_inode_operations; 263 - return 0; 264 261 } 265 - 266 - static int create_dir(struct config_item *k, struct dentry *d) 267 - { 268 - int error; 269 - umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 270 - struct dentry *p = d->d_parent; 271 - 272 - BUG_ON(!k); 273 - 274 - error = configfs_dirent_exists(p->d_fsdata, d->d_name.name); 275 - if (!error) 276 - error = configfs_make_dirent(p->d_fsdata, d, k, mode, 277 - CONFIGFS_DIR | CONFIGFS_USET_CREATING); 278 - if (!error) { 279 - configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata); 280 - error = configfs_create(d, mode, init_dir); 281 - if (!error) { 282 - inc_nlink(p->d_inode); 283 - } else { 284 - struct configfs_dirent *sd = d->d_fsdata; 285 - if (sd) { 286 - spin_lock(&configfs_dirent_lock); 287 - list_del_init(&sd->s_sibling); 288 - spin_unlock(&configfs_dirent_lock); 289 - configfs_put(sd); 290 - } 291 - } 292 - } 293 - return error; 294 - } 295 - 296 262 297 263 /** 298 264 * configfs_create_dir - create a directory for an config_item. ··· 269 303 * until it is validated by configfs_dir_set_ready() 270 304 */ 271 305 272 - static int configfs_create_dir(struct config_item * item, struct dentry *dentry) 306 + static int configfs_create_dir(struct config_item *item, struct dentry *dentry) 273 307 { 274 - int error = create_dir(item, dentry); 275 - if (!error) 308 + int error; 309 + umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 310 + struct dentry *p = dentry->d_parent; 311 + 312 + BUG_ON(!item); 313 + 314 + error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name); 315 + if (unlikely(error)) 316 + return error; 317 + 318 + error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, 319 + CONFIGFS_DIR | CONFIGFS_USET_CREATING); 320 + if (unlikely(error)) 321 + return error; 322 + 323 + configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); 324 + error = configfs_create(dentry, mode, init_dir); 325 + if (!error) { 326 + inc_nlink(p->d_inode); 276 327 item->ci_dentry = dentry; 328 + } else { 329 + struct configfs_dirent *sd = dentry->d_fsdata; 330 + if (sd) { 331 + spin_lock(&configfs_dirent_lock); 332 + list_del_init(&sd->s_sibling); 333 + spin_unlock(&configfs_dirent_lock); 334 + configfs_put(sd); 335 + } 336 + } 277 337 return error; 278 338 } 279 339
+10 -18
fs/configfs/file.c
··· 313 313 .release = configfs_release, 314 314 }; 315 315 316 - 317 - int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type) 318 - { 319 - struct configfs_dirent * parent_sd = dir->d_fsdata; 320 - umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; 321 - int error = 0; 322 - 323 - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); 324 - error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type); 325 - mutex_unlock(&dir->d_inode->i_mutex); 326 - 327 - return error; 328 - } 329 - 330 - 331 316 /** 332 317 * configfs_create_file - create an attribute file for an item. 333 318 * @item: item we're creating for. ··· 321 336 322 337 int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr) 323 338 { 324 - BUG_ON(!item || !item->ci_dentry || !attr); 339 + struct dentry *dir = item->ci_dentry; 340 + struct configfs_dirent *parent_sd = dir->d_fsdata; 341 + umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; 342 + int error = 0; 325 343 326 - return configfs_add_file(item->ci_dentry, attr, 327 - CONFIGFS_ITEM_ATTR); 344 + mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); 345 + error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, 346 + CONFIGFS_ITEM_ATTR); 347 + mutex_unlock(&dir->d_inode->i_mutex); 348 + 349 + return error; 328 350 } 329 351
+3 -9
fs/configfs/inode.c
··· 176 176 177 177 #endif /* CONFIG_LOCKDEP */ 178 178 179 - int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *)) 179 + int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *)) 180 180 { 181 181 int error = 0; 182 182 struct inode *inode = NULL; ··· 198 198 p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; 199 199 configfs_set_inode_lock_class(sd, inode); 200 200 201 - if (init) { 202 - error = init(inode); 203 - if (error) { 204 - iput(inode); 205 - return error; 206 - } 207 - } 201 + init(inode); 208 202 d_instantiate(dentry, inode); 209 203 if (S_ISDIR(mode) || S_ISLNK(mode)) 210 204 dget(dentry); /* pin link and directory dentries in core */ ··· 236 242 237 243 if (dentry) { 238 244 spin_lock(&dentry->d_lock); 239 - if (!(d_unhashed(dentry) && dentry->d_inode)) { 245 + if (!d_unhashed(dentry) && dentry->d_inode) { 240 246 dget_dlock(dentry); 241 247 __d_drop(dentry); 242 248 spin_unlock(&dentry->d_lock);
+1 -1
fs/coredump.c
··· 572 572 * 573 573 * Normally core limits are irrelevant to pipes, since 574 574 * we're not writing to the file system, but we use 575 - * cprm.limit of 1 here as a speacial value, this is a 575 + * cprm.limit of 1 here as a special value, this is a 576 576 * consistent way to catch recursive crashes. 577 577 * We can still crash if the core_pattern binary sets 578 578 * RLIM_CORE = !1, but it runs as root, and can do
+32 -7
fs/dcache.c
··· 1659 1659 } 1660 1660 EXPORT_SYMBOL(d_set_d_op); 1661 1661 1662 + 1663 + /* 1664 + * d_set_fallthru - Mark a dentry as falling through to a lower layer 1665 + * @dentry - The dentry to mark 1666 + * 1667 + * Mark a dentry as falling through to the lower layer (as set with 1668 + * d_pin_lower()). This flag may be recorded on the medium. 1669 + */ 1670 + void d_set_fallthru(struct dentry *dentry) 1671 + { 1672 + spin_lock(&dentry->d_lock); 1673 + dentry->d_flags |= DCACHE_FALLTHRU; 1674 + spin_unlock(&dentry->d_lock); 1675 + } 1676 + EXPORT_SYMBOL(d_set_fallthru); 1677 + 1662 1678 static unsigned d_flags_for_inode(struct inode *inode) 1663 1679 { 1664 - unsigned add_flags = DCACHE_FILE_TYPE; 1680 + unsigned add_flags = DCACHE_REGULAR_TYPE; 1665 1681 1666 1682 if (!inode) 1667 1683 return DCACHE_MISS_TYPE; ··· 1690 1674 else 1691 1675 inode->i_opflags |= IOP_LOOKUP; 1692 1676 } 1693 - } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1694 - if (unlikely(inode->i_op->follow_link)) 1695 - add_flags = DCACHE_SYMLINK_TYPE; 1696 - else 1697 - inode->i_opflags |= IOP_NOFOLLOW; 1677 + goto type_determined; 1698 1678 } 1699 1679 1680 + if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { 1681 + if (unlikely(inode->i_op->follow_link)) { 1682 + add_flags = DCACHE_SYMLINK_TYPE; 1683 + goto type_determined; 1684 + } 1685 + inode->i_opflags |= IOP_NOFOLLOW; 1686 + } 1687 + 1688 + if (unlikely(!S_ISREG(inode->i_mode))) 1689 + add_flags = DCACHE_SPECIAL_TYPE; 1690 + 1691 + type_determined: 1700 1692 if (unlikely(IS_AUTOMOUNT(inode))) 1701 1693 add_flags |= DCACHE_NEED_AUTOMOUNT; 1702 1694 return add_flags; ··· 1715 1691 unsigned add_flags = d_flags_for_inode(inode); 1716 1692 1717 1693 spin_lock(&dentry->d_lock); 1718 - __d_set_type(dentry, add_flags); 1694 + dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); 1695 + dentry->d_flags |= add_flags; 1719 1696 if (inode) 1720 1697 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); 1721 1698 dentry->d_inode = inode;
+18 -18
fs/debugfs/inode.c
··· 169 169 return 0; 170 170 } 171 171 172 + static void debugfs_evict_inode(struct inode *inode) 173 + { 174 + truncate_inode_pages_final(&inode->i_data); 175 + clear_inode(inode); 176 + if (S_ISLNK(inode->i_mode)) 177 + kfree(inode->i_private); 178 + } 179 + 172 180 static const struct super_operations debugfs_super_operations = { 173 181 .statfs = simple_statfs, 174 182 .remount_fs = debugfs_remount, 175 183 .show_options = debugfs_show_options, 184 + .evict_inode = debugfs_evict_inode, 176 185 }; 177 186 178 187 static struct vfsmount *debugfs_automount(struct path *path) ··· 520 511 int ret = 0; 521 512 522 513 if (debugfs_positive(dentry)) { 523 - if (dentry->d_inode) { 524 - dget(dentry); 525 - switch (dentry->d_inode->i_mode & S_IFMT) { 526 - case S_IFDIR: 527 - ret = simple_rmdir(parent->d_inode, dentry); 528 - break; 529 - case S_IFLNK: 530 - kfree(dentry->d_inode->i_private); 531 - /* fall through */ 532 - default: 533 - simple_unlink(parent->d_inode, dentry); 534 - break; 535 - } 536 - if (!ret) 537 - d_delete(dentry); 538 - dput(dentry); 539 - } 514 + dget(dentry); 515 + if (S_ISDIR(dentry->d_inode->i_mode)) 516 + ret = simple_rmdir(parent->d_inode, dentry); 517 + else 518 + simple_unlink(parent->d_inode, dentry); 519 + if (!ret) 520 + d_delete(dentry); 521 + dput(dentry); 540 522 } 541 523 return ret; 542 524 } ··· 690 690 } 691 691 d_move(old_dentry, dentry); 692 692 fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, 693 - S_ISDIR(old_dentry->d_inode->i_mode), 693 + d_is_dir(old_dentry), 694 694 NULL, old_dentry); 695 695 fsnotify_oldname_free(old_name); 696 696 unlock_rename(new_dir, old_dir);
+1 -1
fs/ecryptfs/file.c
··· 230 230 } 231 231 ecryptfs_set_file_lower( 232 232 file, ecryptfs_inode_to_private(inode)->lower_file); 233 - if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { 233 + if (d_is_dir(ecryptfs_dentry)) { 234 234 ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); 235 235 mutex_lock(&crypt_stat->cs_mutex); 236 236 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+2 -2
fs/ecryptfs/inode.c
··· 907 907 lower_inode = ecryptfs_inode_to_lower(inode); 908 908 lower_dentry = ecryptfs_dentry_to_lower(dentry); 909 909 mutex_lock(&crypt_stat->cs_mutex); 910 - if (S_ISDIR(dentry->d_inode->i_mode)) 910 + if (d_is_dir(dentry)) 911 911 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 912 - else if (S_ISREG(dentry->d_inode->i_mode) 912 + else if (d_is_reg(dentry) 913 913 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) 914 914 || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { 915 915 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+1 -1
fs/exportfs/expfs.c
··· 429 429 if (IS_ERR(result)) 430 430 return result; 431 431 432 - if (S_ISDIR(result->d_inode->i_mode)) { 432 + if (d_is_dir(result)) { 433 433 /* 434 434 * This request is for a directory. 435 435 *
+3 -3
fs/fs-writeback.c
··· 769 769 struct inode *inode = wb_inode(wb->b_io.prev); 770 770 struct super_block *sb = inode->i_sb; 771 771 772 - if (!grab_super_passive(sb)) { 772 + if (!trylock_super(sb)) { 773 773 /* 774 - * grab_super_passive() may fail consistently due to 774 + * trylock_super() may fail consistently due to 775 775 * s_umount being grabbed by someone else. Don't use 776 776 * requeue_io() to avoid busy retrying the inode/sb. 777 777 */ ··· 779 779 continue; 780 780 } 781 781 wrote += writeback_sb_inodes(sb, wb, work); 782 - drop_super(sb); 782 + up_read(&sb->s_umount); 783 783 784 784 /* refer to the same tests at the end of writeback_sb_inodes */ 785 785 if (wrote) {
+1 -1
fs/fuse/dir.c
··· 971 971 err = -EBUSY; 972 972 goto badentry; 973 973 } 974 - if (S_ISDIR(entry->d_inode->i_mode)) { 974 + if (d_is_dir(entry)) { 975 975 shrink_dcache_parent(entry); 976 976 if (!simple_empty(entry)) { 977 977 err = -ENOTEMPTY;
+1 -1
fs/gfs2/dir.c
··· 1809 1809 gfs2_consist_inode(dip); 1810 1810 dip->i_entries--; 1811 1811 dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; 1812 - if (S_ISDIR(dentry->d_inode->i_mode)) 1812 + if (d_is_dir(dentry)) 1813 1813 drop_nlink(&dip->i_inode); 1814 1814 mark_inode_dirty(&dip->i_inode); 1815 1815
+1 -1
fs/hfsplus/dir.c
··· 530 530 531 531 /* Unlink destination if it already exists */ 532 532 if (new_dentry->d_inode) { 533 - if (S_ISDIR(new_dentry->d_inode->i_mode)) 533 + if (d_is_dir(new_dentry)) 534 534 res = hfsplus_rmdir(new_dir, new_dentry); 535 535 else 536 536 res = hfsplus_unlink(new_dir, new_dentry);
+2 -2
fs/hppfs/hppfs.c
··· 678 678 return NULL; 679 679 } 680 680 681 - if (S_ISDIR(dentry->d_inode->i_mode)) { 681 + if (d_is_dir(dentry)) { 682 682 inode->i_op = &hppfs_dir_iops; 683 683 inode->i_fop = &hppfs_dir_fops; 684 - } else if (S_ISLNK(dentry->d_inode->i_mode)) { 684 + } else if (d_is_symlink(dentry)) { 685 685 inode->i_op = &hppfs_link_iops; 686 686 inode->i_fop = &hppfs_file_fops; 687 687 } else {
+1 -1
fs/internal.h
··· 84 84 * super.c 85 85 */ 86 86 extern int do_remount_sb(struct super_block *, int, void *, int); 87 - extern bool grab_super_passive(struct super_block *sb); 87 + extern bool trylock_super(struct super_block *sb); 88 88 extern struct dentry *mount_fs(struct file_system_type *, 89 89 int, const char *, void *); 90 90 extern struct super_block *user_get_super(dev_t);
+7 -7
fs/jffs2/dir.c
··· 252 252 if (!f->inocache) 253 253 return -EIO; 254 254 255 - if (S_ISDIR(old_dentry->d_inode->i_mode)) 255 + if (d_is_dir(old_dentry)) 256 256 return -EPERM; 257 257 258 258 /* XXX: This is ugly */ ··· 772 772 */ 773 773 if (new_dentry->d_inode) { 774 774 victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); 775 - if (S_ISDIR(new_dentry->d_inode->i_mode)) { 775 + if (d_is_dir(new_dentry)) { 776 776 struct jffs2_full_dirent *fd; 777 777 778 778 mutex_lock(&victim_f->sem); ··· 807 807 808 808 if (victim_f) { 809 809 /* There was a victim. Kill it off nicely */ 810 - if (S_ISDIR(new_dentry->d_inode->i_mode)) 810 + if (d_is_dir(new_dentry)) 811 811 clear_nlink(new_dentry->d_inode); 812 812 else 813 813 drop_nlink(new_dentry->d_inode); ··· 815 815 inode which didn't exist. */ 816 816 if (victim_f->inocache) { 817 817 mutex_lock(&victim_f->sem); 818 - if (S_ISDIR(new_dentry->d_inode->i_mode)) 818 + if (d_is_dir(new_dentry)) 819 819 victim_f->inocache->pino_nlink = 0; 820 820 else 821 821 victim_f->inocache->pino_nlink--; ··· 825 825 826 826 /* If it was a directory we moved, and there was no victim, 827 827 increase i_nlink on its new parent */ 828 - if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) 828 + if (d_is_dir(old_dentry) && !victim_f) 829 829 inc_nlink(new_dir_i); 830 830 831 831 /* Unlink the original */ ··· 839 839 struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); 840 840 mutex_lock(&f->sem); 841 841 inc_nlink(old_dentry->d_inode); 842 - if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) 842 + if (f->inocache && !d_is_dir(old_dentry)) 843 843 f->inocache->pino_nlink++; 844 844 mutex_unlock(&f->sem); 845 845 ··· 852 852 return ret; 853 853 } 854 854 855 - if (S_ISDIR(old_dentry->d_inode->i_mode)) 855 + if (d_is_dir(old_dentry)) 856 856 drop_nlink(old_dir_i); 857 857 858 858 new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
+1 -1
fs/jffs2/super.c
··· 138 138 struct jffs2_inode_info *f; 139 139 uint32_t pino; 140 140 141 - BUG_ON(!S_ISDIR(child->d_inode->i_mode)); 141 + BUG_ON(!d_is_dir(child)); 142 142 143 143 f = JFFS2_INODE_INFO(child->d_inode); 144 144
+1 -1
fs/libfs.c
··· 329 329 struct inode *new_dir, struct dentry *new_dentry) 330 330 { 331 331 struct inode *inode = old_dentry->d_inode; 332 - int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); 332 + int they_are_dirs = d_is_dir(old_dentry); 333 333 334 334 if (!simple_empty(new_dentry)) 335 335 return -ENOTEMPTY;
+1 -1
fs/namei.c
··· 2814 2814 } else if (!dentry->d_inode) { 2815 2815 goto out; 2816 2816 } else if ((open_flag & O_TRUNC) && 2817 - S_ISREG(dentry->d_inode->i_mode)) { 2817 + d_is_reg(dentry)) { 2818 2818 goto out; 2819 2819 } 2820 2820 /* will fail later, go on to get the right error */
+5 -5
fs/namespace.c
··· 1907 1907 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) 1908 1908 return -EINVAL; 1909 1909 1910 - if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != 1911 - S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) 1910 + if (d_is_dir(mp->m_dentry) != 1911 + d_is_dir(mnt->mnt.mnt_root)) 1912 1912 return -ENOTDIR; 1913 1913 1914 1914 return attach_recursive_mnt(mnt, p, mp, NULL); ··· 2180 2180 if (!mnt_has_parent(old)) 2181 2181 goto out1; 2182 2182 2183 - if (S_ISDIR(path->dentry->d_inode->i_mode) != 2184 - S_ISDIR(old_path.dentry->d_inode->i_mode)) 2183 + if (d_is_dir(path->dentry) != 2184 + d_is_dir(old_path.dentry)) 2185 2185 goto out1; 2186 2186 /* 2187 2187 * Don't move a mount residing in a shared parent. ··· 2271 2271 goto unlock; 2272 2272 2273 2273 err = -EINVAL; 2274 - if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) 2274 + if (d_is_symlink(newmnt->mnt.mnt_root)) 2275 2275 goto unlock; 2276 2276 2277 2277 newmnt->mnt.mnt_flags = mnt_flags;
+2 -2
fs/nfsd/nfs4recover.c
··· 583 583 if (status) 584 584 return status; 585 585 status = -ENOTDIR; 586 - if (S_ISDIR(path.dentry->d_inode->i_mode)) { 586 + if (d_is_dir(path.dentry)) { 587 587 strcpy(user_recovery_dirname, recdir); 588 588 status = 0; 589 589 } ··· 1426 1426 nn->client_tracking_ops = &nfsd4_legacy_tracking_ops; 1427 1427 status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path); 1428 1428 if (!status) { 1429 - status = S_ISDIR(path.dentry->d_inode->i_mode); 1429 + status = d_is_dir(path.dentry); 1430 1430 path_put(&path); 1431 1431 if (status) 1432 1432 goto do_init;
+4 -4
fs/nfsd/nfsfh.c
··· 114 114 * We're exposing only the directories and symlinks that have to be 115 115 * traversed on the way to real exports: 116 116 */ 117 - if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) && 118 - !S_ISLNK(dentry->d_inode->i_mode))) 117 + if (unlikely(!d_is_dir(dentry) && 118 + !d_is_symlink(dentry))) 119 119 return nfserr_stale; 120 120 /* 121 121 * A pseudoroot export gives permission to access only one ··· 259 259 goto out; 260 260 } 261 261 262 - if (S_ISDIR(dentry->d_inode->i_mode) && 262 + if (d_is_dir(dentry) && 263 263 (dentry->d_flags & DCACHE_DISCONNECTED)) { 264 264 printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n", 265 265 dentry); ··· 414 414 { 415 415 fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino); 416 416 fh->ofh_generation = dentry->d_inode->i_generation; 417 - if (S_ISDIR(dentry->d_inode->i_mode) || 417 + if (d_is_dir(dentry) || 418 418 (exp->ex_flags & NFSEXP_NOSUBTREECHECK)) 419 419 fh->ofh_dirino = 0; 420 420 }
+4 -4
fs/nfsd/vfs.c
··· 615 615 export = fhp->fh_export; 616 616 dentry = fhp->fh_dentry; 617 617 618 - if (S_ISREG(dentry->d_inode->i_mode)) 618 + if (d_is_reg(dentry)) 619 619 map = nfs3_regaccess; 620 - else if (S_ISDIR(dentry->d_inode->i_mode)) 620 + else if (d_is_dir(dentry)) 621 621 map = nfs3_diraccess; 622 622 else 623 623 map = nfs3_anyaccess; ··· 1402 1402 1403 1403 switch (createmode) { 1404 1404 case NFS3_CREATE_UNCHECKED: 1405 - if (! S_ISREG(dchild->d_inode->i_mode)) 1405 + if (! d_is_reg(dchild)) 1406 1406 goto out; 1407 1407 else if (truncp) { 1408 1408 /* in nfsv4, we need to treat this case a little ··· 1615 1615 if (err) 1616 1616 goto out; 1617 1617 err = nfserr_isdir; 1618 - if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode)) 1618 + if (d_is_dir(tfhp->fh_dentry)) 1619 1619 goto out; 1620 1620 err = nfserr_perm; 1621 1621 if (!len)
+3 -3
fs/notify/fanotify/fanotify.c
··· 115 115 return false; 116 116 117 117 /* sorry, fanotify only gives a damn about files and dirs */ 118 - if (!S_ISREG(path->dentry->d_inode->i_mode) && 119 - !S_ISDIR(path->dentry->d_inode->i_mode)) 118 + if (!d_is_reg(path->dentry) && 119 + !d_can_lookup(path->dentry)) 120 120 return false; 121 121 122 122 if (inode_mark && vfsmnt_mark) { ··· 139 139 BUG(); 140 140 } 141 141 142 - if (S_ISDIR(path->dentry->d_inode->i_mode) && 142 + if (d_is_dir(path->dentry) && 143 143 !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) 144 144 return false; 145 145
+2 -3
fs/overlayfs/copy_up.c
··· 191 191 ovl_set_timestamps(upperdentry, stat); 192 192 193 193 return err; 194 - 195 194 } 196 195 197 196 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, ··· 384 385 struct kstat stat; 385 386 enum ovl_path_type type = ovl_path_type(dentry); 386 387 387 - if (type != OVL_PATH_LOWER) 388 + if (OVL_TYPE_UPPER(type)) 388 389 break; 389 390 390 391 next = dget(dentry); ··· 393 394 parent = dget_parent(next); 394 395 395 396 type = ovl_path_type(parent); 396 - if (type != OVL_PATH_LOWER) 397 + if (OVL_TYPE_UPPER(type)) 397 398 break; 398 399 399 400 dput(next);
+17 -17
fs/overlayfs/dir.c
··· 19 19 int err; 20 20 21 21 dget(wdentry); 22 - if (S_ISDIR(wdentry->d_inode->i_mode)) 22 + if (d_is_dir(wdentry)) 23 23 err = ovl_do_rmdir(wdir, wdentry); 24 24 else 25 25 err = ovl_do_unlink(wdir, wdentry); ··· 118 118 119 119 static int ovl_set_opaque(struct dentry *upperdentry) 120 120 { 121 - return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); 121 + return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); 122 122 } 123 123 124 124 static void ovl_remove_opaque(struct dentry *upperdentry) 125 125 { 126 126 int err; 127 127 128 - err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); 128 + err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE); 129 129 if (err) { 130 130 pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", 131 131 upperdentry->d_name.name, err); ··· 152 152 * correct link count. nlink=1 seems to pacify 'find' and 153 153 * other utilities. 154 154 */ 155 - if (type == OVL_PATH_MERGE) 155 + if (OVL_TYPE_MERGE(type)) 156 156 stat->nlink = 1; 157 157 158 158 return 0; ··· 506 506 struct dentry *opaquedir = NULL; 507 507 int err; 508 508 509 - if (is_dir) { 509 + if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { 510 510 opaquedir = ovl_check_empty_and_clear(dentry); 511 511 err = PTR_ERR(opaquedir); 512 512 if (IS_ERR(opaquedir)) ··· 630 630 goto out_drop_write; 631 631 632 632 type = ovl_path_type(dentry); 633 - if (type == OVL_PATH_PURE_UPPER) { 633 + if (OVL_TYPE_PURE_UPPER(type)) { 634 634 err = ovl_remove_upper(dentry, is_dir); 635 635 } else { 636 636 const struct cred *old_cred; ··· 693 693 bool new_create = false; 694 694 bool cleanup_whiteout = false; 695 695 bool overwrite = !(flags & RENAME_EXCHANGE); 696 - bool is_dir = S_ISDIR(old->d_inode->i_mode); 696 + bool is_dir = d_is_dir(old); 697 697 bool new_is_dir = false; 698 698 struct dentry *opaquedir = NULL; 699 699 const struct cred *old_cred = NULL; ··· 712 712 /* Don't copy up directory trees */ 713 713 old_type = ovl_path_type(old); 714 714 err = -EXDEV; 715 - if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir) 715 + if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir) 716 716 goto out; 717 717 718 718 if (new->d_inode) { ··· 720 720 if (err) 721 721 goto out; 722 722 723 - if (S_ISDIR(new->d_inode->i_mode)) 723 + if (d_is_dir(new)) 724 724 new_is_dir = true; 725 725 726 726 new_type = ovl_path_type(new); 727 727 err = -EXDEV; 728 - if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) 728 + if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) 729 729 goto out; 730 730 731 731 err = 0; 732 - if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { 732 + if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) { 733 733 if (ovl_dentry_lower(old)->d_inode == 734 734 ovl_dentry_lower(new)->d_inode) 735 735 goto out; 736 736 } 737 - if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { 737 + if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) { 738 738 if (ovl_dentry_upper(old)->d_inode == 739 739 ovl_dentry_upper(new)->d_inode) 740 740 goto out; 741 741 } 742 742 } else { 743 743 if (ovl_dentry_is_opaque(new)) 744 - new_type = OVL_PATH_UPPER; 744 + new_type = __OVL_PATH_UPPER; 745 745 else 746 - new_type = OVL_PATH_PURE_UPPER; 746 + new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE; 747 747 } 748 748 749 749 err = ovl_want_write(old); ··· 763 763 goto out_drop_write; 764 764 } 765 765 766 - old_opaque = old_type != OVL_PATH_PURE_UPPER; 767 - new_opaque = new_type != OVL_PATH_PURE_UPPER; 766 + old_opaque = !OVL_TYPE_PURE_UPPER(old_type); 767 + new_opaque = !OVL_TYPE_PURE_UPPER(new_type); 768 768 769 769 if (old_opaque || new_opaque) { 770 770 err = -ENOMEM; ··· 787 787 old_cred = override_creds(override_cred); 788 788 } 789 789 790 - if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { 790 + if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { 791 791 opaquedir = ovl_check_empty_and_clear(new); 792 792 err = PTR_ERR(opaquedir); 793 793 if (IS_ERR(opaquedir)) {
+7 -5
fs/overlayfs/inode.c
··· 205 205 206 206 static bool ovl_is_private_xattr(const char *name) 207 207 { 208 - return strncmp(name, "trusted.overlay.", 14) == 0; 208 + return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0; 209 209 } 210 210 211 211 int ovl_setxattr(struct dentry *dentry, const char *name, ··· 238 238 static bool ovl_need_xattr_filter(struct dentry *dentry, 239 239 enum ovl_path_type type) 240 240 { 241 - return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); 241 + if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER) 242 + return S_ISDIR(dentry->d_inode->i_mode); 243 + else 244 + return false; 242 245 } 243 246 244 247 ssize_t ovl_getxattr(struct dentry *dentry, const char *name, ··· 302 299 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) 303 300 goto out_drop_write; 304 301 305 - if (type == OVL_PATH_LOWER) { 302 + if (!OVL_TYPE_UPPER(type)) { 306 303 err = vfs_getxattr(realpath.dentry, name, NULL, 0); 307 304 if (err < 0) 308 305 goto out_drop_write; ··· 324 321 static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, 325 322 struct dentry *realdentry) 326 323 { 327 - if (type != OVL_PATH_LOWER) 324 + if (OVL_TYPE_UPPER(type)) 328 325 return false; 329 326 330 327 if (special_file(realdentry->d_inode->i_mode)) ··· 433 430 } 434 431 435 432 return inode; 436 - 437 433 }
+13 -5
fs/overlayfs/overlayfs.h
··· 12 12 struct ovl_entry; 13 13 14 14 enum ovl_path_type { 15 - OVL_PATH_PURE_UPPER, 16 - OVL_PATH_UPPER, 17 - OVL_PATH_MERGE, 18 - OVL_PATH_LOWER, 15 + __OVL_PATH_PURE = (1 << 0), 16 + __OVL_PATH_UPPER = (1 << 1), 17 + __OVL_PATH_MERGE = (1 << 2), 19 18 }; 20 19 21 - extern const char *ovl_opaque_xattr; 20 + #define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER) 21 + #define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE) 22 + #define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE) 23 + #define OVL_TYPE_MERGE_OR_LOWER(type) \ 24 + (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) 25 + 26 + #define OVL_XATTR_PRE_NAME "trusted.overlay." 27 + #define OVL_XATTR_PRE_LEN 16 28 + #define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque" 22 29 23 30 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) 24 31 { ··· 137 130 void ovl_path_upper(struct dentry *dentry, struct path *path); 138 131 void ovl_path_lower(struct dentry *dentry, struct path *path); 139 132 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); 133 + int ovl_path_next(int idx, struct dentry *dentry, struct path *path); 140 134 struct dentry *ovl_dentry_upper(struct dentry *dentry); 141 135 struct dentry *ovl_dentry_lower(struct dentry *dentry); 142 136 struct dentry *ovl_dentry_real(struct dentry *dentry);
+75 -106
fs/overlayfs/readdir.c
··· 24 24 struct list_head l_node; 25 25 struct rb_node node; 26 26 bool is_whiteout; 27 - bool is_cursor; 28 27 char name[]; 29 28 }; 30 29 ··· 39 40 struct rb_root root; 40 41 struct list_head *list; 41 42 struct list_head middle; 43 + struct dentry *dir; 42 44 int count; 43 45 int err; 44 46 }; ··· 48 48 bool is_real; 49 49 bool is_upper; 50 50 struct ovl_dir_cache *cache; 51 - struct ovl_cache_entry cursor; 51 + struct list_head *cursor; 52 52 struct file *realfile; 53 53 struct file *upperfile; 54 54 }; ··· 79 79 return NULL; 80 80 } 81 81 82 - static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, 82 + static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, 83 + const char *name, int len, 83 84 u64 ino, unsigned int d_type) 84 85 { 85 86 struct ovl_cache_entry *p; 86 87 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); 87 88 88 89 p = kmalloc(size, GFP_KERNEL); 89 - if (p) { 90 - memcpy(p->name, name, len); 91 - p->name[len] = '\0'; 92 - p->len = len; 93 - p->type = d_type; 94 - p->ino = ino; 95 - p->is_whiteout = false; 96 - p->is_cursor = false; 97 - } 90 + if (!p) 91 + return NULL; 98 92 93 + memcpy(p->name, name, len); 94 + p->name[len] = '\0'; 95 + p->len = len; 96 + p->type = d_type; 97 + p->ino = ino; 98 + p->is_whiteout = false; 99 + 100 + if (d_type == DT_CHR) { 101 + struct dentry *dentry; 102 + const struct cred *old_cred; 103 + struct cred *override_cred; 104 + 105 + override_cred = prepare_creds(); 106 + if (!override_cred) { 107 + kfree(p); 108 + return NULL; 109 + } 110 + 111 + /* 112 + * CAP_DAC_OVERRIDE for lookup 113 + */ 114 + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); 115 + old_cred = override_creds(override_cred); 116 + 117 + dentry = lookup_one_len(name, dir, len); 118 + if (!IS_ERR(dentry)) { 119 + p->is_whiteout = ovl_is_whiteout(dentry); 120 + dput(dentry); 121 + } 122 + revert_creds(old_cred); 123 + put_cred(override_cred); 124 + } 99 125 return p; 100 126 } 101 127 ··· 148 122 return 0; 149 123 } 150 124 151 - p = ovl_cache_entry_new(name, len, ino, d_type); 125 + p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type); 152 126 if (p == NULL) 153 127 return -ENOMEM; 154 128 ··· 169 143 if (p) { 170 144 list_move_tail(&p->l_node, &rdd->middle); 171 145 } else { 172 - p = ovl_cache_entry_new(name, namelen, ino, d_type); 146 + p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type); 173 147 if (p == NULL) 174 148 rdd->err = -ENOMEM; 175 149 else ··· 194 168 { 195 169 struct ovl_dir_cache *cache = od->cache; 196 170 197 - list_del_init(&od->cursor.l_node); 198 171 WARN_ON(cache->refcount <= 0); 199 172 cache->refcount--; 200 173 if (!cache->refcount) { ··· 229 204 if (IS_ERR(realfile)) 230 205 return PTR_ERR(realfile); 231 206 207 + rdd->dir = realpath->dentry; 232 208 rdd->ctx.pos = 0; 233 209 do { 234 210 rdd->count = 0; ··· 253 227 if (cache && ovl_dentry_version_get(dentry) != cache->version) { 254 228 ovl_cache_put(od, dentry); 255 229 od->cache = NULL; 230 + od->cursor = NULL; 256 231 } 257 - WARN_ON(!od->is_real && type != OVL_PATH_MERGE); 258 - if (od->is_real && type == OVL_PATH_MERGE) 232 + WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type)); 233 + if (od->is_real && OVL_TYPE_MERGE(type)) 259 234 od->is_real = false; 260 - } 261 - 262 - static int ovl_dir_mark_whiteouts(struct dentry *dir, 263 - struct ovl_readdir_data *rdd) 264 - { 265 - struct ovl_cache_entry *p; 266 - struct dentry *dentry; 267 - const struct cred *old_cred; 268 - struct cred *override_cred; 269 - 270 - override_cred = prepare_creds(); 271 - if (!override_cred) { 272 - ovl_cache_free(rdd->list); 273 - return -ENOMEM; 274 - } 275 - 276 - /* 277 - * CAP_DAC_OVERRIDE for lookup 278 - */ 279 - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); 280 - old_cred = override_creds(override_cred); 281 - 282 - mutex_lock(&dir->d_inode->i_mutex); 283 - list_for_each_entry(p, rdd->list, l_node) { 284 - if (p->is_cursor) 285 - continue; 286 - 287 - if (p->type != DT_CHR) 288 - continue; 289 - 290 - dentry = lookup_one_len(p->name, dir, p->len); 291 - if (IS_ERR(dentry)) 292 - continue; 293 - 294 - p->is_whiteout = ovl_is_whiteout(dentry); 295 - dput(dentry); 296 - } 297 - mutex_unlock(&dir->d_inode->i_mutex); 298 - 299 - revert_creds(old_cred); 300 - put_cred(override_cred); 301 - 302 - return 0; 303 235 } 304 236 305 237 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) 306 238 { 307 239 int err; 308 - struct path lowerpath; 309 - struct path upperpath; 240 + struct path realpath; 310 241 struct ovl_readdir_data rdd = { 311 242 .ctx.actor = ovl_fill_merge, 312 243 .list = list, 313 244 .root = RB_ROOT, 314 245 .is_merge = false, 315 246 }; 247 + int idx, next; 316 248 317 - ovl_path_lower(dentry, &lowerpath); 318 - ovl_path_upper(dentry, &upperpath); 249 + for (idx = 0; idx != -1; idx = next) { 250 + next = ovl_path_next(idx, dentry, &realpath); 319 251 320 - if (upperpath.dentry) { 321 - err = ovl_dir_read(&upperpath, &rdd); 322 - if (err) 323 - goto out; 324 - 325 - if (lowerpath.dentry) { 326 - err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); 252 + if (next != -1) { 253 + err = ovl_dir_read(&realpath, &rdd); 327 254 if (err) 328 - goto out; 255 + break; 256 + } else { 257 + /* 258 + * Insert lowest layer entries before upper ones, this 259 + * allows offsets to be reasonably constant 260 + */ 261 + list_add(&rdd.middle, rdd.list); 262 + rdd.is_merge = true; 263 + err = ovl_dir_read(&realpath, &rdd); 264 + list_del(&rdd.middle); 329 265 } 330 266 } 331 - if (lowerpath.dentry) { 332 - /* 333 - * Insert lowerpath entries before upperpath ones, this allows 334 - * offsets to be reasonably constant 335 - */ 336 - list_add(&rdd.middle, rdd.list); 337 - rdd.is_merge = true; 338 - err = ovl_dir_read(&lowerpath, &rdd); 339 - list_del(&rdd.middle); 340 - } 341 - out: 342 267 return err; 343 268 } 344 269 345 270 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) 346 271 { 347 - struct ovl_cache_entry *p; 272 + struct list_head *p; 348 273 loff_t off = 0; 349 274 350 - list_for_each_entry(p, &od->cache->entries, l_node) { 351 - if (p->is_cursor) 352 - continue; 275 + list_for_each(p, &od->cache->entries) { 353 276 if (off >= pos) 354 277 break; 355 278 off++; 356 279 } 357 - list_move_tail(&od->cursor.l_node, &p->l_node); 280 + /* Cursor is safe since the cache is stable */ 281 + od->cursor = p; 358 282 } 359 283 360 284 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) ··· 343 367 { 344 368 struct ovl_dir_file *od = file->private_data; 345 369 struct dentry *dentry = file->f_path.dentry; 370 + struct ovl_cache_entry *p; 346 371 347 372 if (!ctx->pos) 348 373 ovl_dir_reset(file); ··· 362 385 ovl_seek_cursor(od, ctx->pos); 363 386 } 364 387 365 - while (od->cursor.l_node.next != &od->cache->entries) { 366 - struct ovl_cache_entry *p; 367 - 368 - p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node); 369 - /* Skip cursors */ 370 - if (!p->is_cursor) { 371 - if (!p->is_whiteout) { 372 - if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) 373 - break; 374 - } 375 - ctx->pos++; 376 - } 377 - list_move(&od->cursor.l_node, &p->l_node); 388 + while (od->cursor != &od->cache->entries) { 389 + p = list_entry(od->cursor, struct ovl_cache_entry, l_node); 390 + if (!p->is_whiteout) 391 + if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) 392 + break; 393 + od->cursor = p->l_node.next; 394 + ctx->pos++; 378 395 } 379 396 return 0; 380 397 } ··· 423 452 /* 424 453 * Need to check if we started out being a lower dir, but got copied up 425 454 */ 426 - if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { 455 + if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { 427 456 struct inode *inode = file_inode(file); 428 457 429 458 realfile = lockless_dereference(od->upperfile); ··· 487 516 kfree(od); 488 517 return PTR_ERR(realfile); 489 518 } 490 - INIT_LIST_HEAD(&od->cursor.l_node); 491 519 od->realfile = realfile; 492 - od->is_real = (type != OVL_PATH_MERGE); 493 - od->is_upper = (type != OVL_PATH_LOWER); 494 - od->cursor.is_cursor = true; 520 + od->is_real = !OVL_TYPE_MERGE(type); 521 + od->is_upper = OVL_TYPE_UPPER(type); 495 522 file->private_data = od; 496 523 497 524 return 0;
+398 -206
fs/overlayfs/super.c
··· 35 35 /* private information held for overlayfs's superblock */ 36 36 struct ovl_fs { 37 37 struct vfsmount *upper_mnt; 38 - struct vfsmount *lower_mnt; 38 + unsigned numlower; 39 + struct vfsmount **lower_mnt; 39 40 struct dentry *workdir; 40 41 long lower_namelen; 41 42 /* pathnames of lower and upper dirs, for show_options */ ··· 48 47 /* private information held for every overlayfs dentry */ 49 48 struct ovl_entry { 50 49 struct dentry *__upperdentry; 51 - struct dentry *lowerdentry; 52 50 struct ovl_dir_cache *cache; 53 51 union { 54 52 struct { ··· 56 56 }; 57 57 struct rcu_head rcu; 58 58 }; 59 + unsigned numlower; 60 + struct path lowerstack[]; 59 61 }; 60 62 61 - const char *ovl_opaque_xattr = "trusted.overlay.opaque"; 63 + #define OVL_MAX_STACK 500 62 64 65 + static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) 66 + { 67 + return oe->numlower ? oe->lowerstack[0].dentry : NULL; 68 + } 63 69 64 70 enum ovl_path_type ovl_path_type(struct dentry *dentry) 65 71 { 66 72 struct ovl_entry *oe = dentry->d_fsdata; 73 + enum ovl_path_type type = 0; 67 74 68 75 if (oe->__upperdentry) { 69 - if (oe->lowerdentry) { 76 + type = __OVL_PATH_UPPER; 77 + 78 + if (oe->numlower) { 70 79 if (S_ISDIR(dentry->d_inode->i_mode)) 71 - return OVL_PATH_MERGE; 72 - else 73 - return OVL_PATH_UPPER; 74 - } else { 75 - if (oe->opaque) 76 - return OVL_PATH_UPPER; 77 - else 78 - return OVL_PATH_PURE_UPPER; 80 + type |= __OVL_PATH_MERGE; 81 + } else if (!oe->opaque) { 82 + type |= __OVL_PATH_PURE; 79 83 } 80 84 } else { 81 - return OVL_PATH_LOWER; 85 + if (oe->numlower > 1) 86 + type |= __OVL_PATH_MERGE; 82 87 } 88 + return type; 83 89 } 84 90 85 91 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) ··· 104 98 105 99 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) 106 100 { 107 - 108 101 enum ovl_path_type type = ovl_path_type(dentry); 109 102 110 - if (type == OVL_PATH_LOWER) 103 + if (!OVL_TYPE_UPPER(type)) 111 104 ovl_path_lower(dentry, path); 112 105 else 113 106 ovl_path_upper(dentry, path); ··· 125 120 { 126 121 struct ovl_entry *oe = dentry->d_fsdata; 127 122 128 - return oe->lowerdentry; 123 + return __ovl_dentry_lower(oe); 129 124 } 130 125 131 126 struct dentry *ovl_dentry_real(struct dentry *dentry) ··· 135 130 136 131 realdentry = ovl_upperdentry_dereference(oe); 137 132 if (!realdentry) 138 - realdentry = oe->lowerdentry; 133 + realdentry = __ovl_dentry_lower(oe); 139 134 140 135 return realdentry; 141 136 } ··· 148 143 if (realdentry) { 149 144 *is_upper = true; 150 145 } else { 151 - realdentry = oe->lowerdentry; 146 + realdentry = __ovl_dentry_lower(oe); 152 147 *is_upper = false; 153 148 } 154 149 return realdentry; ··· 170 165 171 166 void ovl_path_lower(struct dentry *dentry, struct path *path) 172 167 { 173 - struct ovl_fs *ofs = dentry->d_sb->s_fs_info; 174 168 struct ovl_entry *oe = dentry->d_fsdata; 175 169 176 - path->mnt = ofs->lower_mnt; 177 - path->dentry = oe->lowerdentry; 170 + *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL }; 178 171 } 179 172 180 173 int ovl_want_write(struct dentry *dentry) ··· 252 249 if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) 253 250 return false; 254 251 255 - res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); 252 + res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); 256 253 if (res == 1 && val == 'y') 257 254 return true; 258 255 ··· 264 261 struct ovl_entry *oe = dentry->d_fsdata; 265 262 266 263 if (oe) { 264 + unsigned int i; 265 + 267 266 dput(oe->__upperdentry); 268 - dput(oe->lowerdentry); 267 + for (i = 0; i < oe->numlower; i++) 268 + dput(oe->lowerstack[i].dentry); 269 269 kfree_rcu(oe, rcu); 270 270 } 271 271 } ··· 277 271 .d_release = ovl_dentry_release, 278 272 }; 279 273 280 - static struct ovl_entry *ovl_alloc_entry(void) 274 + static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) 281 275 { 282 - return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); 276 + size_t size = offsetof(struct ovl_entry, lowerstack[numlower]); 277 + struct ovl_entry *oe = kzalloc(size, GFP_KERNEL); 278 + 279 + if (oe) 280 + oe->numlower = numlower; 281 + 282 + return oe; 283 283 } 284 284 285 285 static inline struct dentry *ovl_lookup_real(struct dentry *dir, ··· 307 295 return dentry; 308 296 } 309 297 298 + /* 299 + * Returns next layer in stack starting from top. 300 + * Returns -1 if this is the last layer. 301 + */ 302 + int ovl_path_next(int idx, struct dentry *dentry, struct path *path) 303 + { 304 + struct ovl_entry *oe = dentry->d_fsdata; 305 + 306 + BUG_ON(idx < 0); 307 + if (idx == 0) { 308 + ovl_path_upper(dentry, path); 309 + if (path->dentry) 310 + return oe->numlower ? 1 : -1; 311 + idx++; 312 + } 313 + BUG_ON(idx > oe->numlower); 314 + *path = oe->lowerstack[idx - 1]; 315 + 316 + return (idx < oe->numlower) ? idx + 1 : -1; 317 + } 318 + 310 319 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, 311 320 unsigned int flags) 312 321 { 313 322 struct ovl_entry *oe; 314 - struct dentry *upperdir; 315 - struct dentry *lowerdir; 316 - struct dentry *upperdentry = NULL; 317 - struct dentry *lowerdentry = NULL; 323 + struct ovl_entry *poe = dentry->d_parent->d_fsdata; 324 + struct path *stack = NULL; 325 + struct dentry *upperdir, *upperdentry = NULL; 326 + unsigned int ctr = 0; 318 327 struct inode *inode = NULL; 328 + bool upperopaque = false; 329 + struct dentry *this, *prev = NULL; 330 + unsigned int i; 319 331 int err; 320 332 321 - err = -ENOMEM; 322 - oe = ovl_alloc_entry(); 323 - if (!oe) 324 - goto out; 325 - 326 - upperdir = ovl_dentry_upper(dentry->d_parent); 327 - lowerdir = ovl_dentry_lower(dentry->d_parent); 328 - 333 + upperdir = ovl_upperdentry_dereference(poe); 329 334 if (upperdir) { 330 - upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); 331 - err = PTR_ERR(upperdentry); 332 - if (IS_ERR(upperdentry)) 333 - goto out_put_dir; 335 + this = ovl_lookup_real(upperdir, &dentry->d_name); 336 + err = PTR_ERR(this); 337 + if (IS_ERR(this)) 338 + goto out; 334 339 335 - if (lowerdir && upperdentry) { 336 - if (ovl_is_whiteout(upperdentry)) { 337 - dput(upperdentry); 338 - upperdentry = NULL; 339 - oe->opaque = true; 340 - } else if (ovl_is_opaquedir(upperdentry)) { 341 - oe->opaque = true; 340 + if (this) { 341 + if (ovl_is_whiteout(this)) { 342 + dput(this); 343 + this = NULL; 344 + upperopaque = true; 345 + } else if (poe->numlower && ovl_is_opaquedir(this)) { 346 + upperopaque = true; 342 347 } 343 348 } 344 - } 345 - if (lowerdir && !oe->opaque) { 346 - lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); 347 - err = PTR_ERR(lowerdentry); 348 - if (IS_ERR(lowerdentry)) 349 - goto out_dput_upper; 349 + upperdentry = prev = this; 350 350 } 351 351 352 - if (lowerdentry && upperdentry && 353 - (!S_ISDIR(upperdentry->d_inode->i_mode) || 354 - !S_ISDIR(lowerdentry->d_inode->i_mode))) { 355 - dput(lowerdentry); 356 - lowerdentry = NULL; 357 - oe->opaque = true; 352 + if (!upperopaque && poe->numlower) { 353 + err = -ENOMEM; 354 + stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL); 355 + if (!stack) 356 + goto out_put_upper; 358 357 } 359 358 360 - if (lowerdentry || upperdentry) { 359 + for (i = 0; !upperopaque && i < poe->numlower; i++) { 360 + bool opaque = false; 361 + struct path lowerpath = poe->lowerstack[i]; 362 + 363 + this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); 364 + err = PTR_ERR(this); 365 + if (IS_ERR(this)) { 366 + /* 367 + * If it's positive, then treat ENAMETOOLONG as ENOENT. 368 + */ 369 + if (err == -ENAMETOOLONG && (upperdentry || ctr)) 370 + continue; 371 + goto out_put; 372 + } 373 + if (!this) 374 + continue; 375 + if (ovl_is_whiteout(this)) { 376 + dput(this); 377 + break; 378 + } 379 + /* 380 + * Only makes sense to check opaque dir if this is not the 381 + * lowermost layer. 382 + */ 383 + if (i < poe->numlower - 1 && ovl_is_opaquedir(this)) 384 + opaque = true; 385 + 386 + if (prev && (!S_ISDIR(prev->d_inode->i_mode) || 387 + !S_ISDIR(this->d_inode->i_mode))) { 388 + /* 389 + * FIXME: check for upper-opaqueness maybe better done 390 + * in remove code. 391 + */ 392 + if (prev == upperdentry) 393 + upperopaque = true; 394 + dput(this); 395 + break; 396 + } 397 + /* 398 + * If this is a non-directory then stop here. 399 + */ 400 + if (!S_ISDIR(this->d_inode->i_mode)) 401 + opaque = true; 402 + 403 + stack[ctr].dentry = this; 404 + stack[ctr].mnt = lowerpath.mnt; 405 + ctr++; 406 + prev = this; 407 + if (opaque) 408 + break; 409 + } 410 + 411 + oe = ovl_alloc_entry(ctr); 412 + err = -ENOMEM; 413 + if (!oe) 414 + goto out_put; 415 + 416 + if (upperdentry || ctr) { 361 417 struct dentry *realdentry; 362 418 363 - realdentry = upperdentry ? upperdentry : lowerdentry; 419 + realdentry = upperdentry ? upperdentry : stack[0].dentry; 420 + 364 421 err = -ENOMEM; 365 422 inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, 366 423 oe); 367 424 if (!inode) 368 - goto out_dput; 425 + goto out_free_oe; 369 426 ovl_copyattr(realdentry->d_inode, inode); 370 427 } 371 428 429 + oe->opaque = upperopaque; 372 430 oe->__upperdentry = upperdentry; 373 - oe->lowerdentry = lowerdentry; 374 - 431 + memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); 432 + kfree(stack); 375 433 dentry->d_fsdata = oe; 376 434 d_add(dentry, inode); 377 435 378 436 return NULL; 379 437 380 - out_dput: 381 - dput(lowerdentry); 382 - out_dput_upper: 383 - dput(upperdentry); 384 - out_put_dir: 438 + out_free_oe: 385 439 kfree(oe); 440 + out_put: 441 + for (i = 0; i < ctr; i++) 442 + dput(stack[i].dentry); 443 + kfree(stack); 444 + out_put_upper: 445 + dput(upperdentry); 386 446 out: 387 447 return ERR_PTR(err); 388 448 } ··· 467 383 static void ovl_put_super(struct super_block *sb) 468 384 { 469 385 struct ovl_fs *ufs = sb->s_fs_info; 386 + unsigned i; 470 387 471 388 dput(ufs->workdir); 472 389 mntput(ufs->upper_mnt); 473 - mntput(ufs->lower_mnt); 390 + for (i = 0; i < ufs->numlower; i++) 391 + mntput(ufs->lower_mnt[i]); 474 392 475 393 kfree(ufs->config.lowerdir); 476 394 kfree(ufs->config.upperdir); ··· 486 400 * @buf: The struct kstatfs to fill in with stats 487 401 * 488 402 * Get the filesystem statistics. As writes always target the upper layer 489 - * filesystem pass the statfs to the same filesystem. 403 + * filesystem pass the statfs to the upper filesystem (if it exists) 490 404 */ 491 405 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) 492 406 { ··· 495 409 struct path path; 496 410 int err; 497 411 498 - ovl_path_upper(root_dentry, &path); 412 + ovl_path_real(root_dentry, &path); 499 413 500 414 err = vfs_statfs(&path, buf); 501 415 if (!err) { ··· 518 432 struct ovl_fs *ufs = sb->s_fs_info; 519 433 520 434 seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); 521 - seq_printf(m, ",upperdir=%s", ufs->config.upperdir); 522 - seq_printf(m, ",workdir=%s", ufs->config.workdir); 435 + if (ufs->config.upperdir) { 436 + seq_printf(m, ",upperdir=%s", ufs->config.upperdir); 437 + seq_printf(m, ",workdir=%s", ufs->config.workdir); 438 + } 439 + return 0; 440 + } 441 + 442 + static int ovl_remount(struct super_block *sb, int *flags, char *data) 443 + { 444 + struct ovl_fs *ufs = sb->s_fs_info; 445 + 446 + if (!(*flags & MS_RDONLY) && 447 + (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) 448 + return -EROFS; 449 + 523 450 return 0; 524 451 } 525 452 ··· 540 441 .put_super = ovl_put_super, 541 442 .statfs = ovl_statfs, 542 443 .show_options = ovl_show_options, 444 + .remount_fs = ovl_remount, 543 445 }; 544 446 545 447 enum { ··· 685 585 } 686 586 } 687 587 688 - static int ovl_mount_dir(const char *name, struct path *path) 689 - { 690 - int err; 691 - char *tmp = kstrdup(name, GFP_KERNEL); 692 - 693 - if (!tmp) 694 - return -ENOMEM; 695 - 696 - ovl_unescape(tmp); 697 - err = kern_path(tmp, LOOKUP_FOLLOW, path); 698 - if (err) { 699 - pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); 700 - err = -EINVAL; 701 - } 702 - kfree(tmp); 703 - return err; 704 - } 705 - 706 588 static bool ovl_is_allowed_fs_type(struct dentry *root) 707 589 { 708 590 const struct dentry_operations *dop = root->d_op; ··· 704 622 return true; 705 623 } 706 624 625 + static int ovl_mount_dir_noesc(const char *name, struct path *path) 626 + { 627 + int err = -EINVAL; 628 + 629 + if (!*name) { 630 + pr_err("overlayfs: empty lowerdir\n"); 631 + goto out; 632 + } 633 + err = kern_path(name, LOOKUP_FOLLOW, path); 634 + if (err) { 635 + pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); 636 + goto out; 637 + } 638 + err = -EINVAL; 639 + if (!ovl_is_allowed_fs_type(path->dentry)) { 640 + pr_err("overlayfs: filesystem on '%s' not supported\n", name); 641 + goto out_put; 642 + } 643 + if (!S_ISDIR(path->dentry->d_inode->i_mode)) { 644 + pr_err("overlayfs: '%s' not a directory\n", name); 645 + goto out_put; 646 + } 647 + return 0; 648 + 649 + out_put: 650 + path_put(path); 651 + out: 652 + return err; 653 + } 654 + 655 + static int ovl_mount_dir(const char *name, struct path *path) 656 + { 657 + int err = -ENOMEM; 658 + char *tmp = kstrdup(name, GFP_KERNEL); 659 + 660 + if (tmp) { 661 + ovl_unescape(tmp); 662 + err = ovl_mount_dir_noesc(tmp, path); 663 + kfree(tmp); 664 + } 665 + return err; 666 + } 667 + 668 + static int ovl_lower_dir(const char *name, struct path *path, long *namelen, 669 + int *stack_depth) 670 + { 671 + int err; 672 + struct kstatfs statfs; 673 + 674 + err = ovl_mount_dir_noesc(name, path); 675 + if (err) 676 + goto out; 677 + 678 + err = vfs_statfs(path, &statfs); 679 + if (err) { 680 + pr_err("overlayfs: statfs failed on '%s'\n", name); 681 + goto out_put; 682 + } 683 + *namelen = max(*namelen, statfs.f_namelen); 684 + *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); 685 + 686 + return 0; 687 + 688 + out_put: 689 + path_put(path); 690 + out: 691 + return err; 692 + } 693 + 707 694 /* Workdir should not be subdir of upperdir and vice versa */ 708 695 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) 709 696 { ··· 785 634 return ok; 786 635 } 787 636 637 + static unsigned int ovl_split_lowerdirs(char *str) 638 + { 639 + unsigned int ctr = 1; 640 + char *s, *d; 641 + 642 + for (s = d = str;; s++, d++) { 643 + if (*s == '\\') { 644 + s++; 645 + } else if (*s == ':') { 646 + *d = '\0'; 647 + ctr++; 648 + continue; 649 + } 650 + *d = *s; 651 + if (!*s) 652 + break; 653 + } 654 + return ctr; 655 + } 656 + 788 657 static int ovl_fill_super(struct super_block *sb, void *data, int silent) 789 658 { 790 - struct path lowerpath; 791 - struct path upperpath; 792 - struct path workpath; 793 - struct inode *root_inode; 659 + struct path upperpath = { NULL, NULL }; 660 + struct path workpath = { NULL, NULL }; 794 661 struct dentry *root_dentry; 795 662 struct ovl_entry *oe; 796 663 struct ovl_fs *ufs; 797 - struct kstatfs statfs; 664 + struct path *stack = NULL; 665 + char *lowertmp; 666 + char *lower; 667 + unsigned int numlower; 668 + unsigned int stacklen = 0; 669 + unsigned int i; 798 670 int err; 799 671 800 672 err = -ENOMEM; ··· 829 655 if (err) 830 656 goto out_free_config; 831 657 832 - /* FIXME: workdir is not needed for a R/O mount */ 833 658 err = -EINVAL; 834 - if (!ufs->config.upperdir || !ufs->config.lowerdir || 835 - !ufs->config.workdir) { 836 - pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); 659 + if (!ufs->config.lowerdir) { 660 + pr_err("overlayfs: missing 'lowerdir'\n"); 837 661 goto out_free_config; 662 + } 663 + 664 + sb->s_stack_depth = 0; 665 + if (ufs->config.upperdir) { 666 + /* FIXME: workdir is not needed for a R/O mount */ 667 + if (!ufs->config.workdir) { 668 + pr_err("overlayfs: missing 'workdir'\n"); 669 + goto out_free_config; 670 + } 671 + 672 + err = ovl_mount_dir(ufs->config.upperdir, &upperpath); 673 + if (err) 674 + goto out_free_config; 675 + 676 + err = ovl_mount_dir(ufs->config.workdir, &workpath); 677 + if (err) 678 + goto out_put_upperpath; 679 + 680 + err = -EINVAL; 681 + if (upperpath.mnt != workpath.mnt) { 682 + pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); 683 + goto out_put_workpath; 684 + } 685 + if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { 686 + pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); 687 + goto out_put_workpath; 688 + } 689 + sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; 690 + } 691 + err = -ENOMEM; 692 + lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL); 693 + if (!lowertmp) 694 + goto out_put_workpath; 695 + 696 + err = -EINVAL; 697 + stacklen = ovl_split_lowerdirs(lowertmp); 698 + if (stacklen > OVL_MAX_STACK) 699 + goto out_free_lowertmp; 700 + 701 + stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); 702 + if (!stack) 703 + goto out_free_lowertmp; 704 + 705 + lower = lowertmp; 706 + for (numlower = 0; numlower < stacklen; numlower++) { 707 + err = ovl_lower_dir(lower, &stack[numlower], 708 + &ufs->lower_namelen, &sb->s_stack_depth); 709 + if (err) 710 + goto out_put_lowerpath; 711 + 712 + lower = strchr(lower, '\0') + 1; 713 + } 714 + 715 + err = -EINVAL; 716 + sb->s_stack_depth++; 717 + if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { 718 + pr_err("overlayfs: maximum fs stacking depth exceeded\n"); 719 + goto out_put_lowerpath; 720 + } 721 + 722 + if (ufs->config.upperdir) { 723 + ufs->upper_mnt = clone_private_mount(&upperpath); 724 + err = PTR_ERR(ufs->upper_mnt); 725 + if (IS_ERR(ufs->upper_mnt)) { 726 + pr_err("overlayfs: failed to clone upperpath\n"); 727 + goto out_put_lowerpath; 728 + } 729 + 730 + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); 731 + err = PTR_ERR(ufs->workdir); 732 + if (IS_ERR(ufs->workdir)) { 733 + pr_err("overlayfs: failed to create directory %s/%s\n", 734 + ufs->config.workdir, OVL_WORKDIR_NAME); 735 + goto out_put_upper_mnt; 736 + } 838 737 } 839 738 840 739 err = -ENOMEM; 841 - oe = ovl_alloc_entry(); 842 - if (oe == NULL) 843 - goto out_free_config; 740 + ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL); 741 + if (ufs->lower_mnt == NULL) 742 + goto out_put_workdir; 743 + for (i = 0; i < numlower; i++) { 744 + struct vfsmount *mnt = clone_private_mount(&stack[i]); 844 745 845 - err = ovl_mount_dir(ufs->config.upperdir, &upperpath); 846 - if (err) 847 - goto out_free_oe; 746 + err = PTR_ERR(mnt); 747 + if (IS_ERR(mnt)) { 748 + pr_err("overlayfs: failed to clone lowerpath\n"); 749 + goto out_put_lower_mnt; 750 + } 751 + /* 752 + * Make lower_mnt R/O. That way fchmod/fchown on lower file 753 + * will fail instead of modifying lower fs. 754 + */ 755 + mnt->mnt_flags |= MNT_READONLY; 848 756 849 - err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); 850 - if (err) 851 - goto out_put_upperpath; 852 - 853 - err = ovl_mount_dir(ufs->config.workdir, &workpath); 854 - if (err) 855 - goto out_put_lowerpath; 856 - 857 - err = -EINVAL; 858 - if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || 859 - !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || 860 - !S_ISDIR(workpath.dentry->d_inode->i_mode)) { 861 - pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); 862 - goto out_put_workpath; 757 + ufs->lower_mnt[ufs->numlower] = mnt; 758 + ufs->numlower++; 863 759 } 864 760 865 - if (upperpath.mnt != workpath.mnt) { 866 - pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); 867 - goto out_put_workpath; 868 - } 869 - if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { 870 - pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); 871 - goto out_put_workpath; 872 - } 873 - 874 - if (!ovl_is_allowed_fs_type(upperpath.dentry)) { 875 - pr_err("overlayfs: filesystem of upperdir is not supported\n"); 876 - goto out_put_workpath; 877 - } 878 - 879 - if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { 880 - pr_err("overlayfs: filesystem of lowerdir is not supported\n"); 881 - goto out_put_workpath; 882 - } 883 - 884 - err = vfs_statfs(&lowerpath, &statfs); 885 - if (err) { 886 - pr_err("overlayfs: statfs failed on lowerpath\n"); 887 - goto out_put_workpath; 888 - } 889 - ufs->lower_namelen = statfs.f_namelen; 890 - 891 - sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, 892 - lowerpath.mnt->mnt_sb->s_stack_depth) + 1; 893 - 894 - err = -EINVAL; 895 - if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { 896 - pr_err("overlayfs: maximum fs stacking depth exceeded\n"); 897 - goto out_put_workpath; 898 - } 899 - 900 - ufs->upper_mnt = clone_private_mount(&upperpath); 901 - err = PTR_ERR(ufs->upper_mnt); 902 - if (IS_ERR(ufs->upper_mnt)) { 903 - pr_err("overlayfs: failed to clone upperpath\n"); 904 - goto out_put_workpath; 905 - } 906 - 907 - ufs->lower_mnt = clone_private_mount(&lowerpath); 908 - err = PTR_ERR(ufs->lower_mnt); 909 - if (IS_ERR(ufs->lower_mnt)) { 910 - pr_err("overlayfs: failed to clone lowerpath\n"); 911 - goto out_put_upper_mnt; 912 - } 913 - 914 - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); 915 - err = PTR_ERR(ufs->workdir); 916 - if (IS_ERR(ufs->workdir)) { 917 - pr_err("overlayfs: failed to create directory %s/%s\n", 918 - ufs->config.workdir, OVL_WORKDIR_NAME); 919 - goto out_put_lower_mnt; 920 - } 921 - 922 - /* 923 - * Make lower_mnt R/O. That way fchmod/fchown on lower file 924 - * will fail instead of modifying lower fs. 925 - */ 926 - ufs->lower_mnt->mnt_flags |= MNT_READONLY; 927 - 928 - /* If the upper fs is r/o, we mark overlayfs r/o too */ 929 - if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) 761 + /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ 762 + if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) 930 763 sb->s_flags |= MS_RDONLY; 931 764 932 765 sb->s_d_op = &ovl_dentry_operations; 933 766 934 767 err = -ENOMEM; 935 - root_inode = ovl_new_inode(sb, S_IFDIR, oe); 936 - if (!root_inode) 937 - goto out_put_workdir; 768 + oe = ovl_alloc_entry(numlower); 769 + if (!oe) 770 + goto out_put_lower_mnt; 938 771 939 - root_dentry = d_make_root(root_inode); 772 + root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe)); 940 773 if (!root_dentry) 941 - goto out_put_workdir; 774 + goto out_free_oe; 942 775 943 776 mntput(upperpath.mnt); 944 - mntput(lowerpath.mnt); 777 + for (i = 0; i < numlower; i++) 778 + mntput(stack[i].mnt); 945 779 path_put(&workpath); 780 + kfree(lowertmp); 946 781 947 782 oe->__upperdentry = upperpath.dentry; 948 - oe->lowerdentry = lowerpath.dentry; 783 + for (i = 0; i < numlower; i++) { 784 + oe->lowerstack[i].dentry = stack[i].dentry; 785 + oe->lowerstack[i].mnt = ufs->lower_mnt[i]; 786 + } 949 787 950 788 root_dentry->d_fsdata = oe; 951 789 ··· 968 782 969 783 return 0; 970 784 971 - out_put_workdir: 972 - dput(ufs->workdir); 973 - out_put_lower_mnt: 974 - mntput(ufs->lower_mnt); 975 - out_put_upper_mnt: 976 - mntput(ufs->upper_mnt); 977 - out_put_workpath: 978 - path_put(&workpath); 979 - out_put_lowerpath: 980 - path_put(&lowerpath); 981 - out_put_upperpath: 982 - path_put(&upperpath); 983 785 out_free_oe: 984 786 kfree(oe); 787 + out_put_lower_mnt: 788 + for (i = 0; i < ufs->numlower; i++) 789 + mntput(ufs->lower_mnt[i]); 790 + kfree(ufs->lower_mnt); 791 + out_put_workdir: 792 + dput(ufs->workdir); 793 + out_put_upper_mnt: 794 + mntput(ufs->upper_mnt); 795 + out_put_lowerpath: 796 + for (i = 0; i < numlower; i++) 797 + path_put(&stack[i]); 798 + kfree(stack); 799 + out_free_lowertmp: 800 + kfree(lowertmp); 801 + out_put_workpath: 802 + path_put(&workpath); 803 + out_put_upperpath: 804 + path_put(&upperpath); 985 805 out_free_config: 986 806 kfree(ufs->config.lowerdir); 987 807 kfree(ufs->config.upperdir);
+11 -7
fs/posix_acl.c
··· 564 564 565 565 *acl = posix_acl_clone(p, GFP_NOFS); 566 566 if (!*acl) 567 - return -ENOMEM; 567 + goto no_mem; 568 568 569 569 ret = posix_acl_create_masq(*acl, mode); 570 - if (ret < 0) { 571 - posix_acl_release(*acl); 572 - return -ENOMEM; 573 - } 570 + if (ret < 0) 571 + goto no_mem_clone; 574 572 575 573 if (ret == 0) { 576 574 posix_acl_release(*acl); ··· 589 591 *default_acl = NULL; 590 592 *acl = NULL; 591 593 return 0; 594 + 595 + no_mem_clone: 596 + posix_acl_release(*acl); 597 + no_mem: 598 + posix_acl_release(p); 599 + return -ENOMEM; 592 600 } 593 601 EXPORT_SYMBOL_GPL(posix_acl_create); 594 602 ··· 776 772 777 773 if (!IS_POSIXACL(dentry->d_inode)) 778 774 return -EOPNOTSUPP; 779 - if (S_ISLNK(dentry->d_inode->i_mode)) 775 + if (d_is_symlink(dentry)) 780 776 return -EOPNOTSUPP; 781 777 782 778 acl = get_acl(dentry->d_inode, type); ··· 836 832 837 833 if (!IS_POSIXACL(dentry->d_inode)) 838 834 return -EOPNOTSUPP; 839 - if (S_ISLNK(dentry->d_inode->i_mode)) 835 + if (d_is_symlink(dentry)) 840 836 return -EOPNOTSUPP; 841 837 842 838 if (type == ACL_TYPE_ACCESS)
-12
fs/proc/generic.c
··· 19 19 #include <linux/mount.h> 20 20 #include <linux/init.h> 21 21 #include <linux/idr.h> 22 - #include <linux/namei.h> 23 22 #include <linux/bitops.h> 24 23 #include <linux/spinlock.h> 25 24 #include <linux/completion.h> ··· 221 222 ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); 222 223 spin_unlock_irqrestore(&proc_inum_lock, flags); 223 224 } 224 - 225 - static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 226 - { 227 - nd_set_link(nd, __PDE_DATA(dentry->d_inode)); 228 - return NULL; 229 - } 230 - 231 - static const struct inode_operations proc_link_inode_operations = { 232 - .readlink = generic_readlink, 233 - .follow_link = proc_follow_link, 234 - }; 235 225 236 226 /* 237 227 * Don't create negative dentries here, return -ENOENT by hand
+21
fs/proc/inode.c
··· 23 23 #include <linux/slab.h> 24 24 #include <linux/mount.h> 25 25 #include <linux/magic.h> 26 + #include <linux/namei.h> 26 27 27 28 #include <asm/uaccess.h> 28 29 ··· 393 392 .release = proc_reg_release, 394 393 }; 395 394 #endif 395 + 396 + static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) 397 + { 398 + struct proc_dir_entry *pde = PDE(dentry->d_inode); 399 + if (unlikely(!use_pde(pde))) 400 + return ERR_PTR(-EINVAL); 401 + nd_set_link(nd, pde->data); 402 + return pde; 403 + } 404 + 405 + static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 406 + { 407 + unuse_pde(p); 408 + } 409 + 410 + const struct inode_operations proc_link_inode_operations = { 411 + .readlink = generic_readlink, 412 + .follow_link = proc_follow_link, 413 + .put_link = proc_put_link, 414 + }; 396 415 397 416 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 398 417 {
+1
fs/proc/internal.h
··· 200 200 int closing; 201 201 struct completion *c; 202 202 }; 203 + extern const struct inode_operations proc_link_inode_operations; 203 204 204 205 extern const struct inode_operations proc_pid_link_inode_operations; 205 206
+2 -2
fs/reiserfs/xattr.c
··· 266 266 for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { 267 267 struct dentry *dentry = buf.dentries[i]; 268 268 269 - if (!S_ISDIR(dentry->d_inode->i_mode)) 269 + if (!d_is_dir(dentry)) 270 270 err = action(dentry, data); 271 271 272 272 dput(dentry); ··· 322 322 struct inode *dir = dentry->d_parent->d_inode; 323 323 324 324 /* This is the xattr dir, handle specially. */ 325 - if (S_ISDIR(dentry->d_inode->i_mode)) 325 + if (d_is_dir(dentry)) 326 326 return xattr_rmdir(dir, dentry); 327 327 328 328 return xattr_unlink(dir, dentry);
+18 -22
fs/super.c
··· 71 71 if (!(sc->gfp_mask & __GFP_FS)) 72 72 return SHRINK_STOP; 73 73 74 - if (!grab_super_passive(sb)) 74 + if (!trylock_super(sb)) 75 75 return SHRINK_STOP; 76 76 77 77 if (sb->s_op->nr_cached_objects) ··· 105 105 freed += sb->s_op->free_cached_objects(sb, sc); 106 106 } 107 107 108 - drop_super(sb); 108 + up_read(&sb->s_umount); 109 109 return freed; 110 110 } 111 111 ··· 118 118 sb = container_of(shrink, struct super_block, s_shrink); 119 119 120 120 /* 121 - * Don't call grab_super_passive as it is a potential 121 + * Don't call trylock_super as it is a potential 122 122 * scalability bottleneck. The counts could get updated 123 123 * between super_cache_count and super_cache_scan anyway. 124 124 * Call to super_cache_count with shrinker_rwsem held ··· 348 348 } 349 349 350 350 /* 351 - * grab_super_passive - acquire a passive reference 351 + * trylock_super - try to grab ->s_umount shared 352 352 * @sb: reference we are trying to grab 353 353 * 354 - * Tries to acquire a passive reference. This is used in places where we 354 + * Try to prevent fs shutdown. This is used in places where we 355 355 * cannot take an active reference but we need to ensure that the 356 - * superblock does not go away while we are working on it. It returns 357 - * false if a reference was not gained, and returns true with the s_umount 358 - * lock held in read mode if a reference is gained. On successful return, 359 - * the caller must drop the s_umount lock and the passive reference when 360 - * done. 356 + * filesystem is not shut down while we are working on it. It returns 357 + * false if we cannot acquire s_umount or if we lose the race and 358 + * filesystem already got into shutdown, and returns true with the s_umount 359 + * lock held in read mode in case of success. On successful return, 360 + * the caller must drop the s_umount lock when done. 361 + * 362 + * Note that unlike get_super() et.al. this one does *not* bump ->s_count. 363 + * The reason why it's safe is that we are OK with doing trylock instead 364 + * of down_read(). There's a couple of places that are OK with that, but 365 + * it's very much not a general-purpose interface. 361 366 */ 362 - bool grab_super_passive(struct super_block *sb) 367 + bool trylock_super(struct super_block *sb) 363 368 { 364 - spin_lock(&sb_lock); 365 - if (hlist_unhashed(&sb->s_instances)) { 366 - spin_unlock(&sb_lock); 367 - return false; 368 - } 369 - 370 - sb->s_count++; 371 - spin_unlock(&sb_lock); 372 - 373 369 if (down_read_trylock(&sb->s_umount)) { 374 - if (sb->s_root && (sb->s_flags & MS_BORN)) 370 + if (!hlist_unhashed(&sb->s_instances) && 371 + sb->s_root && (sb->s_flags & MS_BORN)) 375 372 return true; 376 373 up_read(&sb->s_umount); 377 374 } 378 375 379 - put_super(sb); 380 376 return false; 381 377 } 382 378
+1 -1
fs/xfs/xfs_ioctl.c
··· 287 287 return PTR_ERR(dentry); 288 288 289 289 /* Restrict this handle operation to symlinks only. */ 290 - if (!S_ISLNK(dentry->d_inode->i_mode)) { 290 + if (!d_is_symlink(dentry)) { 291 291 error = -EINVAL; 292 292 goto out_dput; 293 293 }
+96 -7
include/linux/dcache.h
··· 215 215 #define DCACHE_LRU_LIST 0x00080000 216 216 217 217 #define DCACHE_ENTRY_TYPE 0x00700000 218 - #define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */ 219 - #define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */ 220 - #define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */ 221 - #define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */ 222 - #define DCACHE_FILE_TYPE 0x00400000 /* Other file type */ 218 + #define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */ 219 + #define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ 220 + #define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ 221 + #define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ 222 + #define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ 223 + #define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ 224 + #define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ 223 225 224 226 #define DCACHE_MAY_FREE 0x00800000 227 + #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ 225 228 226 229 extern seqlock_t rename_lock; 227 230 ··· 426 423 return dentry->d_flags & DCACHE_ENTRY_TYPE; 427 424 } 428 425 426 + static inline bool d_is_miss(const struct dentry *dentry) 427 + { 428 + return __d_entry_type(dentry) == DCACHE_MISS_TYPE; 429 + } 430 + 431 + static inline bool d_is_whiteout(const struct dentry *dentry) 432 + { 433 + return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; 434 + } 435 + 429 436 static inline bool d_can_lookup(const struct dentry *dentry) 430 437 { 431 438 return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; ··· 456 443 return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; 457 444 } 458 445 446 + static inline bool d_is_reg(const struct dentry *dentry) 447 + { 448 + return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; 449 + } 450 + 451 + static inline bool d_is_special(const struct dentry *dentry) 452 + { 453 + return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; 454 + } 455 + 459 456 static inline bool d_is_file(const struct dentry *dentry) 460 457 { 461 - return __d_entry_type(dentry) == DCACHE_FILE_TYPE; 458 + return d_is_reg(dentry) || d_is_special(dentry); 462 459 } 463 460 464 461 static inline bool d_is_negative(const struct dentry *dentry) 465 462 { 466 - return __d_entry_type(dentry) == DCACHE_MISS_TYPE; 463 + // TODO: check d_is_whiteout(dentry) also. 464 + return d_is_miss(dentry); 467 465 } 468 466 469 467 static inline bool d_is_positive(const struct dentry *dentry) ··· 482 458 return !d_is_negative(dentry); 483 459 } 484 460 461 + extern void d_set_fallthru(struct dentry *dentry); 462 + 463 + static inline bool d_is_fallthru(const struct dentry *dentry) 464 + { 465 + return dentry->d_flags & DCACHE_FALLTHRU; 466 + } 467 + 468 + 485 469 extern int sysctl_vfs_cache_pressure; 486 470 487 471 static inline unsigned long vfs_pressure_ratio(unsigned long val) 488 472 { 489 473 return mult_frac(val, sysctl_vfs_cache_pressure, 100); 490 474 } 475 + 476 + /** 477 + * d_inode - Get the actual inode of this dentry 478 + * @dentry: The dentry to query 479 + * 480 + * This is the helper normal filesystems should use to get at their own inodes 481 + * in their own dentries and ignore the layering superimposed upon them. 482 + */ 483 + static inline struct inode *d_inode(const struct dentry *dentry) 484 + { 485 + return dentry->d_inode; 486 + } 487 + 488 + /** 489 + * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE() 490 + * @dentry: The dentry to query 491 + * 492 + * This is the helper normal filesystems should use to get at their own inodes 493 + * in their own dentries and ignore the layering superimposed upon them. 494 + */ 495 + static inline struct inode *d_inode_rcu(const struct dentry *dentry) 496 + { 497 + return ACCESS_ONCE(dentry->d_inode); 498 + } 499 + 500 + /** 501 + * d_backing_inode - Get upper or lower inode we should be using 502 + * @upper: The upper layer 503 + * 504 + * This is the helper that should be used to get at the inode that will be used 505 + * if this dentry were to be opened as a file. The inode may be on the upper 506 + * dentry or it may be on a lower dentry pinned by the upper. 507 + * 508 + * Normal filesystems should not use this to access their own inodes. 509 + */ 510 + static inline struct inode *d_backing_inode(const struct dentry *upper) 511 + { 512 + struct inode *inode = upper->d_inode; 513 + 514 + return inode; 515 + } 516 + 517 + /** 518 + * d_backing_dentry - Get upper or lower dentry we should be using 519 + * @upper: The upper layer 520 + * 521 + * This is the helper that should be used to get the dentry of the inode that 522 + * will be used if this dentry were opened as a file. It may be the upper 523 + * dentry or it may be a lower dentry pinned by the upper. 524 + * 525 + * Normal filesystems should not use this to access their own dentries. 526 + */ 527 + static inline struct dentry *d_backing_dentry(struct dentry *upper) 528 + { 529 + return upper; 530 + } 531 + 491 532 #endif /* __LINUX_DCACHE_H */
+2 -2
mm/shmem.c
··· 2319 2319 2320 2320 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2321 2321 { 2322 - bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 2323 - bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); 2322 + bool old_is_dir = d_is_dir(old_dentry); 2323 + bool new_is_dir = d_is_dir(new_dentry); 2324 2324 2325 2325 if (old_dir != new_dir && old_is_dir != new_is_dir) { 2326 2326 if (old_is_dir) {
+2 -2
security/apparmor/include/apparmor.h
··· 112 112 return aa_dfa_next(dfa, start, 0); 113 113 } 114 114 115 - static inline bool mediated_filesystem(struct inode *inode) 115 + static inline bool mediated_filesystem(struct dentry *dentry) 116 116 { 117 - return !(inode->i_sb->s_flags & MS_NOUSER); 117 + return !(dentry->d_sb->s_flags & MS_NOUSER); 118 118 } 119 119 120 120 #endif /* __APPARMOR_H */
+10 -10
security/apparmor/lsm.c
··· 226 226 struct inode *inode = dentry->d_inode; 227 227 struct path_cond cond = { }; 228 228 229 - if (!inode || !dir->mnt || !mediated_filesystem(inode)) 229 + if (!inode || !dir->mnt || !mediated_filesystem(dentry)) 230 230 return 0; 231 231 232 232 cond.uid = inode->i_uid; ··· 250 250 { 251 251 struct path_cond cond = { current_fsuid(), mode }; 252 252 253 - if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode)) 253 + if (!dir->mnt || !mediated_filesystem(dir->dentry)) 254 254 return 0; 255 255 256 256 return common_perm_dir_dentry(op, dir, dentry, mask, &cond); ··· 285 285 path->dentry->d_inode->i_mode 286 286 }; 287 287 288 - if (!path->mnt || !mediated_filesystem(path->dentry->d_inode)) 288 + if (!path->mnt || !mediated_filesystem(path->dentry)) 289 289 return 0; 290 290 291 291 return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, ··· 305 305 struct aa_profile *profile; 306 306 int error = 0; 307 307 308 - if (!mediated_filesystem(old_dentry->d_inode)) 308 + if (!mediated_filesystem(old_dentry)) 309 309 return 0; 310 310 311 311 profile = aa_current_profile(); ··· 320 320 struct aa_profile *profile; 321 321 int error = 0; 322 322 323 - if (!mediated_filesystem(old_dentry->d_inode)) 323 + if (!mediated_filesystem(old_dentry)) 324 324 return 0; 325 325 326 326 profile = aa_current_profile(); ··· 346 346 347 347 static int apparmor_path_chmod(struct path *path, umode_t mode) 348 348 { 349 - if (!mediated_filesystem(path->dentry->d_inode)) 349 + if (!mediated_filesystem(path->dentry)) 350 350 return 0; 351 351 352 352 return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); ··· 358 358 path->dentry->d_inode->i_mode 359 359 }; 360 360 361 - if (!mediated_filesystem(path->dentry->d_inode)) 361 + if (!mediated_filesystem(path->dentry)) 362 362 return 0; 363 363 364 364 return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); ··· 366 366 367 367 static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) 368 368 { 369 - if (!mediated_filesystem(dentry->d_inode)) 369 + if (!mediated_filesystem(dentry)) 370 370 return 0; 371 371 372 372 return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry, ··· 379 379 struct aa_profile *profile; 380 380 int error = 0; 381 381 382 - if (!mediated_filesystem(file_inode(file))) 382 + if (!mediated_filesystem(file->f_path.dentry)) 383 383 return 0; 384 384 385 385 /* If in exec, permission is handled by bprm hooks. ··· 432 432 BUG_ON(!fprofile); 433 433 434 434 if (!file->f_path.mnt || 435 - !mediated_filesystem(file_inode(file))) 435 + !mediated_filesystem(file->f_path.dentry)) 436 436 return 0; 437 437 438 438 profile = __aa_current_profile();
+1 -1
security/apparmor/path.c
··· 114 114 * security_path hooks as a deleted dentry except without an inode 115 115 * allocated. 116 116 */ 117 - if (d_unlinked(path->dentry) && path->dentry->d_inode && 117 + if (d_unlinked(path->dentry) && d_is_positive(path->dentry) && 118 118 !(flags & PATH_MEDIATE_DELETED)) { 119 119 error = -ENOENT; 120 120 goto out;
+1 -1
security/inode.c
··· 203 203 mutex_lock(&parent->d_inode->i_mutex); 204 204 if (positive(dentry)) { 205 205 if (dentry->d_inode) { 206 - if (S_ISDIR(dentry->d_inode->i_mode)) 206 + if (d_is_dir(dentry)) 207 207 simple_rmdir(parent->d_inode, dentry); 208 208 else 209 209 simple_unlink(parent->d_inode, dentry);
+4 -4
security/selinux/hooks.c
··· 1799 1799 1800 1800 old_dsec = old_dir->i_security; 1801 1801 old_isec = old_dentry->d_inode->i_security; 1802 - old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 1802 + old_is_dir = d_is_dir(old_dentry); 1803 1803 new_dsec = new_dir->i_security; 1804 1804 1805 1805 ad.type = LSM_AUDIT_DATA_DENTRY; ··· 1822 1822 1823 1823 ad.u.dentry = new_dentry; 1824 1824 av = DIR__ADD_NAME | DIR__SEARCH; 1825 - if (new_dentry->d_inode) 1825 + if (d_is_positive(new_dentry)) 1826 1826 av |= DIR__REMOVE_NAME; 1827 1827 rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad); 1828 1828 if (rc) 1829 1829 return rc; 1830 - if (new_dentry->d_inode) { 1830 + if (d_is_positive(new_dentry)) { 1831 1831 new_isec = new_dentry->d_inode->i_security; 1832 - new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); 1832 + new_is_dir = d_is_dir(new_dentry); 1833 1833 rc = avc_has_perm(sid, new_isec->sid, 1834 1834 new_isec->sclass, 1835 1835 (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
+2 -2
security/smack/smack_lsm.c
··· 855 855 rc = smk_curacc(isp, MAY_WRITE, &ad); 856 856 rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc); 857 857 858 - if (rc == 0 && new_dentry->d_inode != NULL) { 858 + if (rc == 0 && d_is_positive(new_dentry)) { 859 859 isp = smk_of_inode(new_dentry->d_inode); 860 860 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); 861 861 rc = smk_curacc(isp, MAY_WRITE, &ad); ··· 961 961 rc = smk_curacc(isp, MAY_READWRITE, &ad); 962 962 rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc); 963 963 964 - if (rc == 0 && new_dentry->d_inode != NULL) { 964 + if (rc == 0 && d_is_positive(new_dentry)) { 965 965 isp = smk_of_inode(new_dentry->d_inode); 966 966 smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); 967 967 rc = smk_curacc(isp, MAY_READWRITE, &ad);
+1 -3
security/tomoyo/file.c
··· 905 905 !tomoyo_get_realpath(&buf2, path2)) 906 906 goto out; 907 907 switch (operation) { 908 - struct dentry *dentry; 909 908 case TOMOYO_TYPE_RENAME: 910 909 case TOMOYO_TYPE_LINK: 911 - dentry = path1->dentry; 912 - if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode)) 910 + if (!d_is_dir(path1->dentry)) 913 911 break; 914 912 /* fall through */ 915 913 case TOMOYO_TYPE_PIVOT_ROOT: