Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: (107 commits)
vfs: use ERR_CAST for err-ptr tossing in lookup_instantiate_filp
isofs: Remove global fs lock
jffs2: fix IN_DELETE_SELF on overwriting rename() killing a directory
fix IN_DELETE_SELF on overwriting rename() on ramfs et.al.
mm/truncate.c: fix build for CONFIG_BLOCK not enabled
fs:update the NOTE of the file_operations structure
Remove dead code in dget_parent()
AFS: Fix silly characters in a comment
switch d_add_ci() to d_splice_alias() in "found negative" case as well
simplify gfs2_lookup()
jfs_lookup(): don't bother with . or ..
get rid of useless dget_parent() in btrfs rename() and link()
get rid of useless dget_parent() in fs/btrfs/ioctl.c
fs: push i_mutex and filemap_write_and_wait down into ->fsync() handlers
drivers: fix up various ->llseek() implementations
fs: handle SEEK_HOLE/SEEK_DATA properly in all fs's that define their own llseek
Ext4: handle SEEK_HOLE/SEEK_DATA generically
Btrfs: implement our own ->llseek
fs: add SEEK_HOLE and SEEK_DATA flags
reiserfs: make reiserfs default to barrier=flush
...

Fix up trivial conflicts in fs/xfs/linux-2.6/xfs_super.c due to the new
shrinker callout for the inode cache, that clashed with the xfs code to
start the periodic workers later.

+2512 -1943
+3 -5
Documentation/filesystems/Locking
··· 52 52 void (*put_link) (struct dentry *, struct nameidata *, void *); 53 53 void (*truncate) (struct inode *); 54 54 int (*permission) (struct inode *, int, unsigned int); 55 - int (*check_acl)(struct inode *, int, unsigned int); 55 + int (*check_acl)(struct inode *, int); 56 56 int (*setattr) (struct dentry *, struct iattr *); 57 57 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 58 58 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ··· 412 412 int (*open) (struct inode *, struct file *); 413 413 int (*flush) (struct file *); 414 414 int (*release) (struct inode *, struct file *); 415 - int (*fsync) (struct file *, int datasync); 415 + int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); 416 416 int (*aio_fsync) (struct kiocb *, int datasync); 417 417 int (*fasync) (int, struct file *, int); 418 418 int (*lock) (struct file *, int, struct file_lock *); ··· 438 438 439 439 locking rules: 440 440 All may block except for ->setlease. 441 - No VFS locks held on entry except for ->fsync and ->setlease. 442 - 443 - ->fsync() has i_mutex on inode. 441 + No VFS locks held on entry except for ->setlease. 444 442 445 443 ->setlease has the file_list_lock held and must not sleep. 446 444
+24 -3
Documentation/filesystems/porting
··· 400 400 401 401 -- 402 402 [mandatory] 403 - 404 - -- 405 - [mandatory] 406 403 ->get_sb() is gone. Switch to use of ->mount(). Typically it's just 407 404 a matter of switching from calling get_sb_... to mount_... and changing the 408 405 function type. If you were doing it manually, just switch from setting ->mnt_root 409 406 to some pointer to returning that pointer. On errors return ERR_PTR(...). 407 + 408 + -- 409 + [mandatory] 410 + ->permission(), generic_permission() and ->check_acl() have lost flags 411 + argument; instead of passing IPERM_FLAG_RCU we add MAY_NOT_BLOCK into mask. 412 + generic_permission() has also lost the check_acl argument; if you want 413 + non-NULL to be used for that inode, put it into ->i_op->check_acl. 414 + 415 + -- 416 + [mandatory] 417 + If you implement your own ->llseek() you must handle SEEK_HOLE and 418 + SEEK_DATA. You can hanle this by returning -EINVAL, but it would be nicer to 419 + support it in some way. The generic handler assumes that the entire file is 420 + data and there is a virtual hole at the end of the file. So if the provided 421 + offset is less than i_size and SEEK_DATA is specified, return the same offset. 422 + If the above is true for the offset and you are given SEEK_HOLE, return the end 423 + of the file. If the offset is i_size or greater return -ENXIO in either case. 424 + 425 + [mandatory] 426 + If you have your own ->fsync() you must make sure to call 427 + filemap_write_and_wait_range() so that all dirty pages are synced out properly. 428 + You must also keep in mind that ->fsync() is not called with i_mutex held 429 + anymore, so if you require i_mutex locking you must make sure to take it and 430 + release it yourself.
+26 -4
Documentation/filesystems/vfs.txt
··· 229 229 230 230 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 231 231 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 232 + int (*nr_cached_objects)(struct super_block *); 233 + void (*free_cached_objects)(struct super_block *, int); 232 234 }; 233 235 234 236 All methods are called without any locks being held, unless otherwise ··· 303 301 304 302 quota_write: called by the VFS to write to filesystem quota file. 305 303 304 + nr_cached_objects: called by the sb cache shrinking function for the 305 + filesystem to return the number of freeable cached objects it contains. 306 + Optional. 307 + 308 + free_cache_objects: called by the sb cache shrinking function for the 309 + filesystem to scan the number of objects indicated to try to free them. 310 + Optional, but any filesystem implementing this method needs to also 311 + implement ->nr_cached_objects for it to be called correctly. 312 + 313 + We can't do anything with any errors that the filesystem might 314 + encountered, hence the void return type. This will never be called if 315 + the VM is trying to reclaim under GFP_NOFS conditions, hence this 316 + method does not need to handle that situation itself. 317 + 318 + Implementations must include conditional reschedule calls inside any 319 + scanning loop that is done. This allows the VFS to determine 320 + appropriate scan batch sizes without having to worry about whether 321 + implementations will cause holdoff problems due to large scan batch 322 + sizes. 323 + 306 324 Whoever sets up the inode is responsible for filling in the "i_op" field. This 307 325 is a pointer to a "struct inode_operations" which describes the methods that 308 326 can be performed on individual inodes. ··· 355 333 void * (*follow_link) (struct dentry *, struct nameidata *); 356 334 void (*put_link) (struct dentry *, struct nameidata *, void *); 357 335 void (*truncate) (struct inode *); 358 - int (*permission) (struct inode *, int, unsigned int); 359 - int (*check_acl)(struct inode *, int, unsigned int); 336 + int (*permission) (struct inode *, int); 337 + int (*check_acl)(struct inode *, int); 360 338 int (*setattr) (struct dentry *, struct iattr *); 361 339 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 362 340 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); ··· 445 423 permission: called by the VFS to check for access rights on a POSIX-like 446 424 filesystem. 447 425 448 - May be called in rcu-walk mode (flags & IPERM_FLAG_RCU). If in rcu-walk 426 + May be called in rcu-walk mode (mask & MAY_NOT_BLOCK). If in rcu-walk 449 427 mode, the filesystem must check the permission without blocking or 450 428 storing to the inode. 451 429 ··· 777 755 int (*open) (struct inode *, struct file *); 778 756 int (*flush) (struct file *); 779 757 int (*release) (struct inode *, struct file *); 780 - int (*fsync) (struct file *, int datasync); 758 + int (*fsync) (struct file *, loff_t, loff_t, int datasync); 781 759 int (*aio_fsync) (struct kiocb *, int datasync); 782 760 int (*fasync) (int, struct file *, int); 783 761 int (*lock) (struct file *, int, struct file_lock *);
+2 -5
arch/arm/mach-tegra/clock.c
··· 585 585 586 586 static int clk_debugfs_register_one(struct clk *c) 587 587 { 588 - struct dentry *d, *child, *child_tmp; 588 + struct dentry *d; 589 589 590 590 d = debugfs_create_dir(c->name, clk_debugfs_root); 591 591 if (!d) ··· 614 614 return 0; 615 615 616 616 err_out: 617 - d = c->dent; 618 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 619 - debugfs_remove(child); 620 - debugfs_remove(c->dent); 617 + debugfs_remove_recursive(c->dent); 621 618 return -ENOMEM; 622 619 } 623 620
+7 -24
arch/arm/mach-ux500/clock.c
··· 635 635 static struct dentry *clk_debugfs_register_dir(struct clk *c, 636 636 struct dentry *p_dentry) 637 637 { 638 - struct dentry *d, *clk_d, *child, *child_tmp; 639 - char s[255]; 640 - char *p = s; 638 + struct dentry *d, *clk_d; 639 + const char *p = c->name; 641 640 642 - if (c->name == NULL) 643 - p += sprintf(p, "BUG"); 644 - else 645 - p += sprintf(p, "%s", c->name); 641 + if (!p) 642 + p = "BUG"; 646 643 647 - clk_d = debugfs_create_dir(s, p_dentry); 644 + clk_d = debugfs_create_dir(p, p_dentry); 648 645 if (!clk_d) 649 646 return NULL; 650 647 ··· 663 666 return clk_d; 664 667 665 668 err_out: 666 - d = clk_d; 667 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 668 - debugfs_remove(child); 669 - debugfs_remove(clk_d); 669 + debugfs_remove_recursive(clk_d); 670 670 return NULL; 671 - } 672 - 673 - static void clk_debugfs_remove_dir(struct dentry *cdentry) 674 - { 675 - struct dentry *d, *child, *child_tmp; 676 - 677 - d = cdentry; 678 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 679 - debugfs_remove(child); 680 - debugfs_remove(cdentry); 681 - return ; 682 671 } 683 672 684 673 static int clk_debugfs_register_one(struct clk *c) ··· 683 700 c->dent_bus = clk_debugfs_register_dir(c, 684 701 bpa->dent_bus ? bpa->dent_bus : bpa->dent); 685 702 if ((!c->dent_bus) && (c->dent)) { 686 - clk_debugfs_remove_dir(c->dent); 703 + debugfs_remove_recursive(c->dent); 687 704 c->dent = NULL; 688 705 return -ENOMEM; 689 706 }
+3 -9
arch/arm/plat-omap/clock.c
··· 480 480 static int clk_debugfs_register_one(struct clk *c) 481 481 { 482 482 int err; 483 - struct dentry *d, *child, *child_tmp; 483 + struct dentry *d; 484 484 struct clk *pa = c->parent; 485 - char s[255]; 486 - char *p = s; 487 485 488 - p += sprintf(p, "%s", c->name); 489 - d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); 486 + d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root); 490 487 if (!d) 491 488 return -ENOMEM; 492 489 c->dent = d; ··· 506 509 return 0; 507 510 508 511 err_out: 509 - d = c->dent; 510 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 511 - debugfs_remove(child); 512 - debugfs_remove(c->dent); 512 + debugfs_remove_recursive(c->dent); 513 513 return err; 514 514 } 515 515
+2 -5
arch/arm/plat-samsung/clock.c
··· 458 458 static int clk_debugfs_register_one(struct clk *c) 459 459 { 460 460 int err; 461 - struct dentry *d, *child, *child_tmp; 461 + struct dentry *d; 462 462 struct clk *pa = c->parent; 463 463 char s[255]; 464 464 char *p = s; ··· 488 488 return 0; 489 489 490 490 err_out: 491 - d = c->dent; 492 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 493 - debugfs_remove(child); 494 - debugfs_remove(c->dent); 491 + debugfs_remove_recursive(c->dent); 495 492 return err; 496 493 } 497 494
+2 -5
arch/arm/plat-spear/clock.c
··· 916 916 static int clk_debugfs_register_one(struct clk *c) 917 917 { 918 918 int err; 919 - struct dentry *d, *child; 919 + struct dentry *d; 920 920 struct clk *pa = c->pclk; 921 921 char s[255]; 922 922 char *p = s; ··· 951 951 return 0; 952 952 953 953 err_out: 954 - d = c->dent; 955 - list_for_each_entry(child, &d->d_subdirs, d_u.d_child) 956 - debugfs_remove(child); 957 - debugfs_remove(c->dent); 954 + debugfs_remove_recursive(c->dent); 958 955 return err; 959 956 } 960 957
+9 -2
arch/powerpc/platforms/cell/spufs/file.c
··· 1850 1850 return ret; 1851 1851 } 1852 1852 1853 - static int spufs_mfc_fsync(struct file *file, int datasync) 1853 + static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1854 1854 { 1855 - return spufs_mfc_flush(file, NULL); 1855 + struct inode *inode = file->f_path.dentry->d_inode; 1856 + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 1857 + if (!err) { 1858 + mutex_lock(&inode->i_mutex); 1859 + err = spufs_mfc_flush(file, NULL); 1860 + mutex_unlock(&inode->i_mutex); 1861 + } 1862 + return err; 1856 1863 } 1857 1864 1858 1865 static int spufs_mfc_fasync(int fd, struct file *file, int on)
+11 -18
arch/powerpc/platforms/cell/spufs/inode.c
··· 611 611 612 612 static struct file_system_type spufs_type; 613 613 614 - long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode, 615 - struct file *filp) 614 + long spufs_create(struct path *path, struct dentry *dentry, 615 + unsigned int flags, mode_t mode, struct file *filp) 616 616 { 617 - struct dentry *dentry; 618 617 int ret; 619 618 620 619 ret = -EINVAL; 621 620 /* check if we are on spufs */ 622 - if (nd->path.dentry->d_sb->s_type != &spufs_type) 621 + if (path->dentry->d_sb->s_type != &spufs_type) 623 622 goto out; 624 623 625 624 /* don't accept undefined flags */ ··· 626 627 goto out; 627 628 628 629 /* only threads can be underneath a gang */ 629 - if (nd->path.dentry != nd->path.dentry->d_sb->s_root) { 630 + if (path->dentry != path->dentry->d_sb->s_root) { 630 631 if ((flags & SPU_CREATE_GANG) || 631 - !SPUFS_I(nd->path.dentry->d_inode)->i_gang) 632 + !SPUFS_I(path->dentry->d_inode)->i_gang) 632 633 goto out; 633 634 } 634 - 635 - dentry = lookup_create(nd, 1); 636 - ret = PTR_ERR(dentry); 637 - if (IS_ERR(dentry)) 638 - goto out_dir; 639 635 640 636 mode &= ~current_umask(); 641 637 642 638 if (flags & SPU_CREATE_GANG) 643 - ret = spufs_create_gang(nd->path.dentry->d_inode, 644 - dentry, nd->path.mnt, mode); 639 + ret = spufs_create_gang(path->dentry->d_inode, 640 + dentry, path->mnt, mode); 645 641 else 646 - ret = spufs_create_context(nd->path.dentry->d_inode, 647 - dentry, nd->path.mnt, flags, mode, 642 + ret = spufs_create_context(path->dentry->d_inode, 643 + dentry, path->mnt, flags, mode, 648 644 filp); 649 645 if (ret >= 0) 650 - fsnotify_mkdir(nd->path.dentry->d_inode, dentry); 646 + fsnotify_mkdir(path->dentry->d_inode, dentry); 651 647 return ret; 652 648 653 - out_dir: 654 - mutex_unlock(&nd->path.dentry->d_inode->i_mutex); 655 649 out: 650 + mutex_unlock(&path->dentry->d_inode->i_mutex); 656 651 return ret; 657 652 } 658 653
+1 -1
arch/powerpc/platforms/cell/spufs/spufs.h
··· 248 248 /* system call implementation */ 249 249 extern struct spufs_calls spufs_calls; 250 250 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); 251 - long spufs_create(struct nameidata *nd, unsigned int flags, 251 + long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags, 252 252 mode_t mode, struct file *filp); 253 253 /* ELF coredump callbacks for writing SPU ELF notes */ 254 254 extern int spufs_coredump_extra_notes_size(void);
+9 -13
arch/powerpc/platforms/cell/spufs/syscalls.c
··· 62 62 static long do_spu_create(const char __user *pathname, unsigned int flags, 63 63 mode_t mode, struct file *neighbor) 64 64 { 65 - char *tmp; 65 + struct path path; 66 + struct dentry *dentry; 66 67 int ret; 67 68 68 - tmp = getname(pathname); 69 - ret = PTR_ERR(tmp); 70 - if (!IS_ERR(tmp)) { 71 - struct nameidata nd; 72 - 73 - ret = kern_path_parent(tmp, &nd); 74 - if (!ret) { 75 - nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; 76 - ret = spufs_create(&nd, flags, mode, neighbor); 77 - path_put(&nd.path); 78 - } 79 - putname(tmp); 69 + dentry = user_path_create(AT_FDCWD, pathname, &path, 1); 70 + ret = PTR_ERR(dentry); 71 + if (!IS_ERR(dentry)) { 72 + ret = spufs_create(&path, dentry, flags, mode, neighbor); 73 + mutex_unlock(&path.dentry->d_inode->i_mutex); 74 + dput(dentry); 75 + path_put(&path); 80 76 } 81 77 82 78 return ret;
+193 -134
drivers/base/devtmpfs.c
··· 21 21 #include <linux/fs.h> 22 22 #include <linux/shmem_fs.h> 23 23 #include <linux/ramfs.h> 24 - #include <linux/cred.h> 25 24 #include <linux/sched.h> 26 - #include <linux/init_task.h> 27 25 #include <linux/slab.h> 26 + #include <linux/kthread.h> 28 27 29 - static struct vfsmount *dev_mnt; 28 + static struct task_struct *thread; 30 29 31 30 #if defined CONFIG_DEVTMPFS_MOUNT 32 31 static int mount_dev = 1; ··· 33 34 static int mount_dev; 34 35 #endif 35 36 36 - static DEFINE_MUTEX(dirlock); 37 + static DEFINE_SPINLOCK(req_lock); 38 + 39 + static struct req { 40 + struct req *next; 41 + struct completion done; 42 + int err; 43 + const char *name; 44 + mode_t mode; /* 0 => delete */ 45 + struct device *dev; 46 + } *requests; 37 47 38 48 static int __init mount_param(char *str) 39 49 { ··· 76 68 static inline int is_blockdev(struct device *dev) { return 0; } 77 69 #endif 78 70 71 + int devtmpfs_create_node(struct device *dev) 72 + { 73 + const char *tmp = NULL; 74 + struct req req; 75 + 76 + if (!thread) 77 + return 0; 78 + 79 + req.mode = 0; 80 + req.name = device_get_devnode(dev, &req.mode, &tmp); 81 + if (!req.name) 82 + return -ENOMEM; 83 + 84 + if (req.mode == 0) 85 + req.mode = 0600; 86 + if (is_blockdev(dev)) 87 + req.mode |= S_IFBLK; 88 + else 89 + req.mode |= S_IFCHR; 90 + 91 + req.dev = dev; 92 + 93 + init_completion(&req.done); 94 + 95 + spin_lock(&req_lock); 96 + req.next = requests; 97 + requests = &req; 98 + spin_unlock(&req_lock); 99 + 100 + wake_up_process(thread); 101 + wait_for_completion(&req.done); 102 + 103 + kfree(tmp); 104 + 105 + return req.err; 106 + } 107 + 108 + int devtmpfs_delete_node(struct device *dev) 109 + { 110 + const char *tmp = NULL; 111 + struct req req; 112 + 113 + if (!thread) 114 + return 0; 115 + 116 + req.name = device_get_devnode(dev, NULL, &tmp); 117 + if (!req.name) 118 + return -ENOMEM; 119 + 120 + req.mode = 0; 121 + req.dev = dev; 122 + 123 + init_completion(&req.done); 124 + 125 + spin_lock(&req_lock); 126 + req.next = requests; 127 + requests = &req; 128 + spin_unlock(&req_lock); 129 + 130 + wake_up_process(thread); 131 + wait_for_completion(&req.done); 132 + 133 + kfree(tmp); 134 + return req.err; 135 + } 136 + 79 137 static int dev_mkdir(const char *name, mode_t mode) 80 138 { 81 - struct nameidata nd; 82 139 struct dentry *dentry; 140 + struct path path; 83 141 int err; 84 142 85 - err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 86 - name, LOOKUP_PARENT, &nd); 87 - if (err) 88 - return err; 143 + dentry = kern_path_create(AT_FDCWD, name, &path, 1); 144 + if (IS_ERR(dentry)) 145 + return PTR_ERR(dentry); 89 146 90 - dentry = lookup_create(&nd, 1); 91 - if (!IS_ERR(dentry)) { 92 - err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); 93 - if (!err) 94 - /* mark as kernel-created inode */ 95 - dentry->d_inode->i_private = &dev_mnt; 96 - dput(dentry); 97 - } else { 98 - err = PTR_ERR(dentry); 99 - } 100 - 101 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 102 - path_put(&nd.path); 147 + err = vfs_mkdir(path.dentry->d_inode, dentry, mode); 148 + if (!err) 149 + /* mark as kernel-created inode */ 150 + dentry->d_inode->i_private = &thread; 151 + dput(dentry); 152 + mutex_unlock(&path.dentry->d_inode->i_mutex); 153 + path_put(&path); 103 154 return err; 104 155 } 105 156 106 157 static int create_path(const char *nodepath) 107 158 { 159 + char *path; 160 + char *s; 108 161 int err; 109 162 110 - mutex_lock(&dirlock); 111 - err = dev_mkdir(nodepath, 0755); 112 - if (err == -ENOENT) { 113 - char *path; 114 - char *s; 163 + /* parent directories do not exist, create them */ 164 + path = kstrdup(nodepath, GFP_KERNEL); 165 + if (!path) 166 + return -ENOMEM; 115 167 116 - /* parent directories do not exist, create them */ 117 - path = kstrdup(nodepath, GFP_KERNEL); 118 - if (!path) { 119 - err = -ENOMEM; 120 - goto out; 121 - } 122 - s = path; 123 - for (;;) { 124 - s = strchr(s, '/'); 125 - if (!s) 126 - break; 127 - s[0] = '\0'; 128 - err = dev_mkdir(path, 0755); 129 - if (err && err != -EEXIST) 130 - break; 131 - s[0] = '/'; 132 - s++; 133 - } 134 - kfree(path); 168 + s = path; 169 + for (;;) { 170 + s = strchr(s, '/'); 171 + if (!s) 172 + break; 173 + s[0] = '\0'; 174 + err = dev_mkdir(path, 0755); 175 + if (err && err != -EEXIST) 176 + break; 177 + s[0] = '/'; 178 + s++; 135 179 } 136 - out: 137 - mutex_unlock(&dirlock); 180 + kfree(path); 138 181 return err; 139 182 } 140 183 141 - int devtmpfs_create_node(struct device *dev) 184 + static int handle_create(const char *nodename, mode_t mode, struct device *dev) 142 185 { 143 - const char *tmp = NULL; 144 - const char *nodename; 145 - const struct cred *curr_cred; 146 - mode_t mode = 0; 147 - struct nameidata nd; 148 186 struct dentry *dentry; 187 + struct path path; 149 188 int err; 150 189 151 - if (!dev_mnt) 152 - return 0; 153 - 154 - nodename = device_get_devnode(dev, &mode, &tmp); 155 - if (!nodename) 156 - return -ENOMEM; 157 - 158 - if (mode == 0) 159 - mode = 0600; 160 - if (is_blockdev(dev)) 161 - mode |= S_IFBLK; 162 - else 163 - mode |= S_IFCHR; 164 - 165 - curr_cred = override_creds(&init_cred); 166 - 167 - err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 168 - nodename, LOOKUP_PARENT, &nd); 169 - if (err == -ENOENT) { 190 + dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); 191 + if (dentry == ERR_PTR(-ENOENT)) { 170 192 create_path(nodename); 171 - err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 172 - nodename, LOOKUP_PARENT, &nd); 193 + dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); 173 194 } 174 - if (err) 175 - goto out; 195 + if (IS_ERR(dentry)) 196 + return PTR_ERR(dentry); 176 197 177 - dentry = lookup_create(&nd, 0); 178 - if (!IS_ERR(dentry)) { 179 - err = vfs_mknod(nd.path.dentry->d_inode, 180 - dentry, mode, dev->devt); 181 - if (!err) { 182 - struct iattr newattrs; 198 + err = vfs_mknod(path.dentry->d_inode, 199 + dentry, mode, dev->devt); 200 + if (!err) { 201 + struct iattr newattrs; 183 202 184 - /* fixup possibly umasked mode */ 185 - newattrs.ia_mode = mode; 186 - newattrs.ia_valid = ATTR_MODE; 187 - mutex_lock(&dentry->d_inode->i_mutex); 188 - notify_change(dentry, &newattrs); 189 - mutex_unlock(&dentry->d_inode->i_mutex); 203 + /* fixup possibly umasked mode */ 204 + newattrs.ia_mode = mode; 205 + newattrs.ia_valid = ATTR_MODE; 206 + mutex_lock(&dentry->d_inode->i_mutex); 207 + notify_change(dentry, &newattrs); 208 + mutex_unlock(&dentry->d_inode->i_mutex); 190 209 191 - /* mark as kernel-created inode */ 192 - dentry->d_inode->i_private = &dev_mnt; 193 - } 194 - dput(dentry); 195 - } else { 196 - err = PTR_ERR(dentry); 210 + /* mark as kernel-created inode */ 211 + dentry->d_inode->i_private = &thread; 197 212 } 213 + dput(dentry); 198 214 199 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 200 - path_put(&nd.path); 201 - out: 202 - kfree(tmp); 203 - revert_creds(curr_cred); 215 + mutex_unlock(&path.dentry->d_inode->i_mutex); 216 + path_put(&path); 204 217 return err; 205 218 } 206 219 ··· 231 202 struct dentry *dentry; 232 203 int err; 233 204 234 - err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 235 - name, LOOKUP_PARENT, &nd); 205 + err = kern_path_parent(name, &nd); 236 206 if (err) 237 207 return err; 238 208 ··· 239 211 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 240 212 if (!IS_ERR(dentry)) { 241 213 if (dentry->d_inode) { 242 - if (dentry->d_inode->i_private == &dev_mnt) 214 + if (dentry->d_inode->i_private == &thread) 243 215 err = vfs_rmdir(nd.path.dentry->d_inode, 244 216 dentry); 245 217 else ··· 266 238 if (!path) 267 239 return -ENOMEM; 268 240 269 - mutex_lock(&dirlock); 270 241 for (;;) { 271 242 char *base; 272 243 ··· 277 250 if (err) 278 251 break; 279 252 } 280 - mutex_unlock(&dirlock); 281 253 282 254 kfree(path); 283 255 return err; ··· 285 259 static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) 286 260 { 287 261 /* did we create it */ 288 - if (inode->i_private != &dev_mnt) 262 + if (inode->i_private != &thread) 289 263 return 0; 290 264 291 265 /* does the dev_t match */ ··· 303 277 return 1; 304 278 } 305 279 306 - int devtmpfs_delete_node(struct device *dev) 280 + static int handle_remove(const char *nodename, struct device *dev) 307 281 { 308 - const char *tmp = NULL; 309 - const char *nodename; 310 - const struct cred *curr_cred; 311 282 struct nameidata nd; 312 283 struct dentry *dentry; 313 284 struct kstat stat; 314 285 int deleted = 1; 315 286 int err; 316 287 317 - if (!dev_mnt) 318 - return 0; 319 - 320 - nodename = device_get_devnode(dev, NULL, &tmp); 321 - if (!nodename) 322 - return -ENOMEM; 323 - 324 - curr_cred = override_creds(&init_cred); 325 - err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 326 - nodename, LOOKUP_PARENT, &nd); 288 + err = kern_path_parent(nodename, &nd); 327 289 if (err) 328 - goto out; 290 + return err; 329 291 330 292 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 331 293 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); ··· 351 337 path_put(&nd.path); 352 338 if (deleted && strchr(nodename, '/')) 353 339 delete_path(nodename); 354 - out: 355 - kfree(tmp); 356 - revert_creds(curr_cred); 357 340 return err; 358 341 } 359 342 ··· 365 354 if (!mount_dev) 366 355 return 0; 367 356 368 - if (!dev_mnt) 357 + if (!thread) 369 358 return 0; 370 359 371 360 err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); ··· 376 365 return err; 377 366 } 378 367 368 + static __initdata DECLARE_COMPLETION(setup_done); 369 + 370 + static int handle(const char *name, mode_t mode, struct device *dev) 371 + { 372 + if (mode) 373 + return handle_create(name, mode, dev); 374 + else 375 + return handle_remove(name, dev); 376 + } 377 + 378 + static int devtmpfsd(void *p) 379 + { 380 + char options[] = "mode=0755"; 381 + int *err = p; 382 + *err = sys_unshare(CLONE_NEWNS); 383 + if (*err) 384 + goto out; 385 + *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options); 386 + if (*err) 387 + goto out; 388 + sys_chdir("/.."); /* will traverse into overmounted root */ 389 + sys_chroot("."); 390 + complete(&setup_done); 391 + while (1) { 392 + spin_lock(&req_lock); 393 + while (requests) { 394 + struct req *req = requests; 395 + requests = NULL; 396 + spin_unlock(&req_lock); 397 + while (req) { 398 + req->err = handle(req->name, req->mode, req->dev); 399 + complete(&req->done); 400 + req = req->next; 401 + } 402 + spin_lock(&req_lock); 403 + } 404 + set_current_state(TASK_INTERRUPTIBLE); 405 + spin_unlock(&req_lock); 406 + schedule(); 407 + __set_current_state(TASK_RUNNING); 408 + } 409 + return 0; 410 + out: 411 + complete(&setup_done); 412 + return *err; 413 + } 414 + 379 415 /* 380 416 * Create devtmpfs instance, driver-core devices will add their device 381 417 * nodes here. 382 418 */ 383 419 int __init devtmpfs_init(void) 384 420 { 385 - int err; 386 - struct vfsmount *mnt; 387 - char options[] = "mode=0755"; 388 - 389 - err = register_filesystem(&dev_fs_type); 421 + int err = register_filesystem(&dev_fs_type); 390 422 if (err) { 391 423 printk(KERN_ERR "devtmpfs: unable to register devtmpfs " 392 424 "type %i\n", err); 393 425 return err; 394 426 } 395 427 396 - mnt = kern_mount_data(&dev_fs_type, options); 397 - if (IS_ERR(mnt)) { 398 - err = PTR_ERR(mnt); 428 + thread = kthread_run(devtmpfsd, &err, "kdevtmpfs"); 429 + if (!IS_ERR(thread)) { 430 + wait_for_completion(&setup_done); 431 + } else { 432 + err = PTR_ERR(thread); 433 + thread = NULL; 434 + } 435 + 436 + if (err) { 399 437 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 400 438 unregister_filesystem(&dev_fs_type); 401 439 return err; 402 440 } 403 - dev_mnt = mnt; 404 441 405 442 printk(KERN_INFO "devtmpfs: initialized\n"); 406 443 return 0;
+1 -1
drivers/block/pktcdvd.c
··· 1206 1206 if (!sb) 1207 1207 return 0; 1208 1208 1209 - if (!sb->s_op || !sb->s_op->relocate_blocks) 1209 + if (!sb->s_op->relocate_blocks) 1210 1210 goto out; 1211 1211 1212 1212 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
+4
drivers/char/generic_nvram.c
··· 34 34 static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) 35 35 { 36 36 switch (origin) { 37 + case 0: 38 + break; 37 39 case 1: 38 40 offset += file->f_pos; 39 41 break; 40 42 case 2: 41 43 offset += nvram_len; 42 44 break; 45 + default: 46 + offset = -1; 43 47 } 44 48 if (offset < 0) 45 49 return -EINVAL;
+2
drivers/char/nvram.c
··· 224 224 case 2: 225 225 offset += NVRAM_BYTES; 226 226 break; 227 + default: 228 + return -EINVAL; 227 229 } 228 230 229 231 return (offset >= 0) ? (file->f_pos = offset) : -EINVAL;
+11 -2
drivers/char/ps3flash.c
··· 101 101 102 102 mutex_lock(&file->f_mapping->host->i_mutex); 103 103 switch (origin) { 104 + case 0: 105 + break; 104 106 case 1: 105 107 offset += file->f_pos; 106 108 break; 107 109 case 2: 108 110 offset += dev->regions[dev->region_idx].size*dev->blk_size; 109 111 break; 112 + default: 113 + offset = -1; 110 114 } 111 115 if (offset < 0) { 112 116 res = -EINVAL; ··· 309 305 return ps3flash_writeback(ps3flash_dev); 310 306 } 311 307 312 - static int ps3flash_fsync(struct file *file, int datasync) 308 + static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) 313 309 { 314 - return ps3flash_writeback(ps3flash_dev); 310 + struct inode *inode = file->f_path.dentry->d_inode; 311 + int err; 312 + mutex_lock(&inode->i_mutex); 313 + err = ps3flash_writeback(ps3flash_dev); 314 + mutex_unlock(&inode->i_mutex); 315 + return err; 315 316 } 316 317 317 318 static irqreturn_t ps3flash_interrupt(int irq, void *data)
+4
drivers/macintosh/nvram.c
··· 21 21 static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) 22 22 { 23 23 switch (origin) { 24 + case 0: 25 + break; 24 26 case 1: 25 27 offset += file->f_pos; 26 28 break; 27 29 case 2: 28 30 offset += NVRAM_SIZE; 29 31 break; 32 + default: 33 + offset = -1; 30 34 } 31 35 if (offset < 0) 32 36 return -EINVAL;
+8 -18
drivers/md/md.c
··· 6394 6394 mddev_put(mddev); 6395 6395 } 6396 6396 6397 - struct mdstat_info { 6398 - int event; 6399 - }; 6400 - 6401 6397 static int md_seq_show(struct seq_file *seq, void *v) 6402 6398 { 6403 6399 mddev_t *mddev = v; 6404 6400 sector_t sectors; 6405 6401 mdk_rdev_t *rdev; 6406 - struct mdstat_info *mi = seq->private; 6407 6402 struct bitmap *bitmap; 6408 6403 6409 6404 if (v == (void*)1) { ··· 6410 6415 6411 6416 spin_unlock(&pers_lock); 6412 6417 seq_printf(seq, "\n"); 6413 - mi->event = atomic_read(&md_event_count); 6418 + seq->poll_event = atomic_read(&md_event_count); 6414 6419 return 0; 6415 6420 } 6416 6421 if (v == (void*)2) { ··· 6522 6527 6523 6528 static int md_seq_open(struct inode *inode, struct file *file) 6524 6529 { 6530 + struct seq_file *seq; 6525 6531 int error; 6526 - struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 6527 - if (mi == NULL) 6528 - return -ENOMEM; 6529 6532 6530 6533 error = seq_open(file, &md_seq_ops); 6531 6534 if (error) 6532 - kfree(mi); 6533 - else { 6534 - struct seq_file *p = file->private_data; 6535 - p->private = mi; 6536 - mi->event = atomic_read(&md_event_count); 6537 - } 6535 + return error; 6536 + 6537 + seq = file->private_data; 6538 + seq->poll_event = atomic_read(&md_event_count); 6538 6539 return error; 6539 6540 } 6540 6541 6541 6542 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6542 6543 { 6543 - struct seq_file *m = filp->private_data; 6544 - struct mdstat_info *mi = m->private; 6544 + struct seq_file *seq = filp->private_data; 6545 6545 int mask; 6546 6546 6547 6547 poll_wait(filp, &md_event_waiters, wait); ··· 6544 6554 /* always allow read */ 6545 6555 mask = POLLIN | POLLRDNORM; 6546 6556 6547 - if (mi->event != atomic_read(&md_event_count)) 6557 + if (seq->poll_event != atomic_read(&md_event_count)) 6548 6558 mask |= POLLERR | POLLPRI; 6549 6559 return mask; 6550 6560 }
+7 -3
drivers/mtd/ubi/cdev.c
··· 189 189 return new_offset; 190 190 } 191 191 192 - static int vol_cdev_fsync(struct file *file, int datasync) 192 + static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync) 193 193 { 194 194 struct ubi_volume_desc *desc = file->private_data; 195 195 struct ubi_device *ubi = desc->vol->ubi; 196 - 197 - return ubi_sync(ubi->ubi_num); 196 + struct inode *inode = file->f_path.dentry->d_inode; 197 + int err; 198 + mutex_lock(&inode->i_mutex); 199 + err = ubi_sync(ubi->ubi_num); 200 + mutex_unlock(&inode->i_mutex); 201 + return err; 198 202 } 199 203 200 204
+2 -5
drivers/sh/clk/core.c
··· 670 670 static int clk_debugfs_register_one(struct clk *c) 671 671 { 672 672 int err; 673 - struct dentry *d, *child, *child_tmp; 673 + struct dentry *d; 674 674 struct clk *pa = c->parent; 675 675 char s[255]; 676 676 char *p = s; ··· 699 699 return 0; 700 700 701 701 err_out: 702 - d = c->dentry; 703 - list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child) 704 - debugfs_remove(child); 705 - debugfs_remove(c->dentry); 702 + debugfs_remove_recursive(c->dentry); 706 703 return err; 707 704 } 708 705
+1 -1
drivers/staging/pohmelfs/dir.c
··· 512 512 int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1; 513 513 struct qstr str = dentry->d_name; 514 514 515 - if ((nd->intent.open.flags & O_ACCMODE) > 1) 515 + if ((nd->intent.open.flags & O_ACCMODE) != O_RDONLY) 516 516 lock_type = POHMELFS_WRITE_LOCK; 517 517 518 518 if (test_bit(NETFS_INODE_OWNED, &parent->state)) {
+8 -3
drivers/staging/pohmelfs/inode.c
··· 887 887 /* 888 888 * We want fsync() to work on POHMELFS. 889 889 */ 890 - static int pohmelfs_fsync(struct file *file, int datasync) 890 + static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 891 891 { 892 892 struct inode *inode = file->f_mapping->host; 893 - 894 - return sync_inode_metadata(inode, 1); 893 + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 894 + if (!err) { 895 + mutex_lock(&inode->i_mutex); 896 + err = sync_inode_metadata(inode, 1); 897 + mutex_unlock(&inode->i_mutex); 898 + } 899 + return err; 895 900 } 896 901 897 902 ssize_t pohmelfs_write(struct file *file, const char __user *buf,
+4 -1
drivers/usb/gadget/printer.c
··· 795 795 } 796 796 797 797 static int 798 - printer_fsync(struct file *fd, int datasync) 798 + printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync) 799 799 { 800 800 struct printer_dev *dev = fd->private_data; 801 + struct inode *inode = fd->f_path.dentry->d_inode; 801 802 unsigned long flags; 802 803 int tx_list_empty; 803 804 805 + mutex_lock(&inode->i_mutex); 804 806 spin_lock_irqsave(&dev->lock, flags); 805 807 tx_list_empty = (likely(list_empty(&dev->tx_reqs))); 806 808 spin_unlock_irqrestore(&dev->lock, flags); ··· 812 810 wait_event_interruptible(dev->tx_flush_wait, 813 811 (likely(list_empty(&dev->tx_reqs_active)))); 814 812 } 813 + mutex_unlock(&inode->i_mutex); 815 814 816 815 return 0; 817 816 }
+9 -2
drivers/video/fb_defio.c
··· 66 66 return 0; 67 67 } 68 68 69 - int fb_deferred_io_fsync(struct file *file, int datasync) 69 + int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) 70 70 { 71 71 struct fb_info *info = file->private_data; 72 + struct inode *inode = file->f_path.dentry->d_inode; 73 + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); 74 + if (err) 75 + return err; 72 76 73 77 /* Skip if deferred io is compiled-in but disabled on this fbdev */ 74 78 if (!info->fbdefio) 75 79 return 0; 76 80 81 + mutex_lock(&inode->i_mutex); 77 82 /* Kill off the delayed work */ 78 83 cancel_delayed_work_sync(&info->deferred_work); 79 84 80 85 /* Run it immediately */ 81 - return schedule_delayed_work(&info->deferred_work, 0); 86 + err = schedule_delayed_work(&info->deferred_work, 0); 87 + mutex_unlock(&inode->i_mutex); 88 + return err; 82 89 } 83 90 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); 84 91
+2 -2
fs/9p/acl.c
··· 96 96 return acl; 97 97 } 98 98 99 - int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags) 99 + int v9fs_check_acl(struct inode *inode, int mask) 100 100 { 101 101 struct posix_acl *acl; 102 102 struct v9fs_session_info *v9ses; 103 103 104 - if (flags & IPERM_FLAG_RCU) 104 + if (mask & MAY_NOT_BLOCK) 105 105 return -ECHILD; 106 106 107 107 v9ses = v9fs_inode2v9ses(inode);
+1 -1
fs/9p/acl.h
··· 16 16 17 17 #ifdef CONFIG_9P_FS_POSIX_ACL 18 18 extern int v9fs_get_acl(struct inode *, struct p9_fid *); 19 - extern int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags); 19 + extern int v9fs_check_acl(struct inode *inode, int mask); 20 20 extern int v9fs_acl_chmod(struct dentry *); 21 21 extern int v9fs_set_create_acl(struct dentry *, 22 22 struct posix_acl *, struct posix_acl *);
+2 -1
fs/9p/v9fs_vfs.h
··· 70 70 ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64); 71 71 void v9fs_blank_wstat(struct p9_wstat *wstat); 72 72 int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *); 73 - int v9fs_file_fsync_dotl(struct file *filp, int datasync); 73 + int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, 74 + int datasync); 74 75 ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *, 75 76 const char __user *, size_t, loff_t *, int); 76 77 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode);
+20 -2
fs/9p/vfs_file.c
··· 519 519 } 520 520 521 521 522 - static int v9fs_file_fsync(struct file *filp, int datasync) 522 + static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, 523 + int datasync) 523 524 { 524 525 struct p9_fid *fid; 526 + struct inode *inode = filp->f_mapping->host; 525 527 struct p9_wstat wstat; 526 528 int retval; 527 529 530 + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); 531 + if (retval) 532 + return retval; 533 + 534 + mutex_lock(&inode->i_mutex); 528 535 P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); 529 536 530 537 fid = filp->private_data; 531 538 v9fs_blank_wstat(&wstat); 532 539 533 540 retval = p9_client_wstat(fid, &wstat); 541 + mutex_unlock(&inode->i_mutex); 542 + 534 543 return retval; 535 544 } 536 545 537 - int v9fs_file_fsync_dotl(struct file *filp, int datasync) 546 + int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, 547 + int datasync) 538 548 { 539 549 struct p9_fid *fid; 550 + struct inode *inode = filp->f_mapping->host; 540 551 int retval; 541 552 553 + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); 554 + if (retval) 555 + return retval; 556 + 557 + mutex_lock(&inode->i_mutex); 542 558 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n", 543 559 filp, datasync); 544 560 545 561 fid = filp->private_data; 546 562 547 563 retval = p9_client_fsync(fid, datasync); 564 + mutex_unlock(&inode->i_mutex); 565 + 548 566 return retval; 549 567 } 550 568
+3 -3
fs/9p/vfs_inode.c
··· 633 633 fid = NULL; 634 634 v9ses = v9fs_inode2v9ses(dir); 635 635 perm = unixmode2p9mode(v9ses, mode); 636 - if (nd && nd->flags & LOOKUP_OPEN) 637 - flags = nd->intent.open.flags - 1; 636 + if (nd) 637 + flags = nd->intent.open.flags; 638 638 else 639 639 flags = O_RDWR; 640 640 ··· 649 649 650 650 v9fs_invalidate_inode_attr(dir); 651 651 /* if we are opening a file, assign the open fid to the file */ 652 - if (nd && nd->flags & LOOKUP_OPEN) { 652 + if (nd) { 653 653 v9inode = V9FS_I(dentry->d_inode); 654 654 mutex_lock(&v9inode->v_mutex); 655 655 if (v9ses->cache && !v9inode->writeback_fid &&
+2 -2
fs/9p/vfs_inode_dotl.c
··· 173 173 struct posix_acl *pacl = NULL, *dacl = NULL; 174 174 175 175 v9ses = v9fs_inode2v9ses(dir); 176 - if (nd && nd->flags & LOOKUP_OPEN) 177 - flags = nd->intent.open.flags - 1; 176 + if (nd) 177 + flags = nd->intent.open.flags; 178 178 else { 179 179 /* 180 180 * create call without LOOKUP_OPEN is due
+1 -1
fs/affs/affs.h
··· 182 182 183 183 void affs_free_prealloc(struct inode *inode); 184 184 extern void affs_truncate(struct inode *); 185 - int affs_file_fsync(struct file *, int); 185 + int affs_file_fsync(struct file *, loff_t, loff_t, int); 186 186 187 187 /* dir.c */ 188 188
+7 -1
fs/affs/file.c
··· 923 923 affs_free_prealloc(inode); 924 924 } 925 925 926 - int affs_file_fsync(struct file *filp, int datasync) 926 + int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 927 927 { 928 928 struct inode *inode = filp->f_mapping->host; 929 929 int ret, err; 930 930 931 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 932 + if (err) 933 + return err; 934 + 935 + mutex_lock(&inode->i_mutex); 931 936 ret = write_inode_now(inode, 0); 932 937 err = sync_blockdev(inode->i_sb->s_bdev); 933 938 if (!ret) 934 939 ret = err; 940 + mutex_unlock(&inode->i_mutex); 935 941 return ret; 936 942 }
+1 -1
fs/afs/afs_vl.h
··· 49 49 AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ 50 50 AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ 51 51 AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ 52 - AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ 52 + AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server flag */ 53 53 AFSVL_PERM = 363546, /* No permission access */ 54 54 AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ 55 55 };
+2 -2
fs/afs/internal.h
··· 627 627 extern void afs_cache_permit(struct afs_vnode *, struct key *, long); 628 628 extern void afs_zap_permits(struct rcu_head *); 629 629 extern struct key *afs_request_key(struct afs_cell *); 630 - extern int afs_permission(struct inode *, int, unsigned int); 630 + extern int afs_permission(struct inode *, int); 631 631 632 632 /* 633 633 * server.c ··· 750 750 extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, 751 751 unsigned long, loff_t); 752 752 extern int afs_writeback_all(struct afs_vnode *); 753 - extern int afs_fsync(struct file *, int); 753 + extern int afs_fsync(struct file *, loff_t, loff_t, int); 754 754 755 755 756 756 /*****************************************************************************/
+3 -3
fs/afs/security.c
··· 285 285 * - AFS ACLs are attached to directories only, and a file is controlled by its 286 286 * parent directory's ACL 287 287 */ 288 - int afs_permission(struct inode *inode, int mask, unsigned int flags) 288 + int afs_permission(struct inode *inode, int mask) 289 289 { 290 290 struct afs_vnode *vnode = AFS_FS_I(inode); 291 291 afs_access_t uninitialized_var(access); 292 292 struct key *key; 293 293 int ret; 294 294 295 - if (flags & IPERM_FLAG_RCU) 295 + if (mask & MAY_NOT_BLOCK) 296 296 return -ECHILD; 297 297 298 298 _enter("{{%x:%u},%lx},%x,", ··· 350 350 } 351 351 352 352 key_put(key); 353 - ret = generic_permission(inode, mask, flags, NULL); 353 + ret = generic_permission(inode, mask); 354 354 _leave(" = %d", ret); 355 355 return ret; 356 356
+14 -4
fs/afs/write.c
··· 681 681 * - the return status from this call provides a reliable indication of 682 682 * whether any write errors occurred for this process. 683 683 */ 684 - int afs_fsync(struct file *file, int datasync) 684 + int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 685 685 { 686 686 struct dentry *dentry = file->f_path.dentry; 687 + struct inode *inode = file->f_mapping->host; 687 688 struct afs_writeback *wb, *xwb; 688 689 struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); 689 690 int ret; ··· 693 692 vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name, 694 693 datasync); 695 694 695 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 696 + if (ret) 697 + return ret; 698 + mutex_lock(&inode->i_mutex); 699 + 696 700 /* use a writeback record as a marker in the queue - when this reaches 697 701 * the front of the queue, all the outstanding writes are either 698 702 * completed or rejected */ 699 703 wb = kzalloc(sizeof(*wb), GFP_KERNEL); 700 - if (!wb) 701 - return -ENOMEM; 704 + if (!wb) { 705 + ret = -ENOMEM; 706 + goto out; 707 + } 702 708 wb->vnode = vnode; 703 709 wb->first = 0; 704 710 wb->last = -1; ··· 728 720 if (ret < 0) { 729 721 afs_put_writeback(wb); 730 722 _leave(" = %d [wb]", ret); 731 - return ret; 723 + goto out; 732 724 } 733 725 734 726 /* wait for the preceding writes to actually complete */ ··· 737 729 vnode->writebacks.next == &wb->link); 738 730 afs_put_writeback(wb); 739 731 _leave(" = %d", ret); 732 + out: 733 + mutex_unlock(&inode->i_mutex); 740 734 return ret; 741 735 } 742 736
-6
fs/attr.c
··· 232 232 if (error) 233 233 return error; 234 234 235 - if (ia_valid & ATTR_SIZE) 236 - down_write(&dentry->d_inode->i_alloc_sem); 237 - 238 235 if (inode->i_op->setattr) 239 236 error = inode->i_op->setattr(dentry, attr); 240 237 else 241 238 error = simple_setattr(dentry, attr); 242 - 243 - if (ia_valid & ATTR_SIZE) 244 - up_write(&dentry->d_inode->i_alloc_sem); 245 239 246 240 if (!error) 247 241 fsnotify_change(dentry, ia_valid);
+3 -2
fs/bad_inode.c
··· 87 87 return -EIO; 88 88 } 89 89 90 - static int bad_file_fsync(struct file *file, int datasync) 90 + static int bad_file_fsync(struct file *file, loff_t start, loff_t end, 91 + int datasync) 91 92 { 92 93 return -EIO; 93 94 } ··· 230 229 return -EIO; 231 230 } 232 231 233 - static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) 232 + static int bad_inode_permission(struct inode *inode, int mask) 234 233 { 235 234 return -EIO; 236 235 }
+1 -2
fs/binfmt_elf.c
··· 668 668 * mm->dumpable = 0 regardless of the interpreter's 669 669 * permissions. 670 670 */ 671 - if (file_permission(interpreter, MAY_READ) < 0) 672 - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 671 + would_dump(bprm, interpreter); 673 672 674 673 retval = kernel_read(interpreter, 0, bprm->buf, 675 674 BINPRM_BUF_SIZE);
+1 -2
fs/binfmt_elf_fdpic.c
··· 245 245 * mm->dumpable = 0 regardless of the interpreter's 246 246 * permissions. 247 247 */ 248 - if (file_permission(interpreter, MAY_READ) < 0) 249 - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 248 + would_dump(bprm, interpreter); 250 249 251 250 retval = kernel_read(interpreter, 0, bprm->buf, 252 251 BINPRM_BUF_SIZE);
+1 -2
fs/binfmt_misc.c
··· 149 149 150 150 /* if the binary is not readable than enforce mm->dumpable=0 151 151 regardless of the interpreter's permissions */ 152 - if (file_permission(bprm->file, MAY_READ)) 153 - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 152 + would_dump(bprm, bprm->file); 154 153 155 154 allow_write_access(bprm->file); 156 155 bprm->file = NULL;
+9 -8
fs/block_dev.c
··· 355 355 mutex_lock(&bd_inode->i_mutex); 356 356 size = i_size_read(bd_inode); 357 357 358 + retval = -EINVAL; 358 359 switch (origin) { 359 - case 2: 360 + case SEEK_END: 360 361 offset += size; 361 362 break; 362 - case 1: 363 + case SEEK_CUR: 363 364 offset += file->f_pos; 365 + case SEEK_SET: 366 + break; 367 + default: 368 + goto out; 364 369 } 365 - retval = -EINVAL; 366 370 if (offset >= 0 && offset <= size) { 367 371 if (offset != file->f_pos) { 368 372 file->f_pos = offset; 369 373 } 370 374 retval = offset; 371 375 } 376 + out: 372 377 mutex_unlock(&bd_inode->i_mutex); 373 378 return retval; 374 379 } 375 380 376 - int blkdev_fsync(struct file *filp, int datasync) 381 + int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 377 382 { 378 383 struct inode *bd_inode = filp->f_mapping->host; 379 384 struct block_device *bdev = I_BDEV(bd_inode); ··· 389 384 * i_mutex and doing so causes performance issues with concurrent 390 385 * O_SYNC writers to a block device. 391 386 */ 392 - mutex_unlock(&bd_inode->i_mutex); 393 - 394 387 error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); 395 388 if (error == -EOPNOTSUPP) 396 389 error = 0; 397 - 398 - mutex_lock(&bd_inode->i_mutex); 399 390 400 391 return error; 401 392 }
+2 -3
fs/btrfs/acl.c
··· 195 195 return ret; 196 196 } 197 197 198 - int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags) 198 + int btrfs_check_acl(struct inode *inode, int mask) 199 199 { 200 200 int error = -EAGAIN; 201 201 202 - if (flags & IPERM_FLAG_RCU) { 202 + if (mask & MAY_NOT_BLOCK) { 203 203 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 204 204 error = -ECHILD; 205 - 206 205 } else { 207 206 struct posix_acl *acl; 208 207 acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
+6 -3
fs/btrfs/ctree.h
··· 1219 1219 * right now this just gets used so that a root has its own devid 1220 1220 * for stat. It may be used for more later 1221 1221 */ 1222 - struct super_block anon_super; 1222 + dev_t anon_dev; 1223 1223 }; 1224 1224 1225 1225 struct btrfs_ioctl_defrag_range_args { ··· 2510 2510 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 2511 2511 struct list_head *list, int search_commit); 2512 2512 /* inode.c */ 2513 + struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 2514 + size_t pg_offset, u64 start, u64 len, 2515 + int create); 2513 2516 2514 2517 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 2515 2518 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) ··· 2605 2602 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 2606 2603 struct inode *inode); 2607 2604 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 2608 - int btrfs_sync_file(struct file *file, int datasync); 2605 + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 2609 2606 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 2610 2607 int skip_pinned); 2611 2608 extern const struct file_operations btrfs_file_operations; ··· 2645 2642 2646 2643 /* acl.c */ 2647 2644 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 2648 - int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags); 2645 + int btrfs_check_acl(struct inode *inode, int mask); 2649 2646 #else 2650 2647 #define btrfs_check_acl NULL 2651 2648 #endif
+4 -11
fs/btrfs/disk-io.c
··· 1077 1077 init_completion(&root->kobj_unregister); 1078 1078 root->defrag_running = 0; 1079 1079 root->root_key.objectid = objectid; 1080 - root->anon_super.s_root = NULL; 1081 - root->anon_super.s_dev = 0; 1082 - INIT_LIST_HEAD(&root->anon_super.s_list); 1083 - INIT_LIST_HEAD(&root->anon_super.s_instances); 1084 - init_rwsem(&root->anon_super.s_umount); 1085 - 1080 + root->anon_dev = 0; 1086 1081 return 0; 1087 1082 } 1088 1083 ··· 1306 1311 spin_lock_init(&root->cache_lock); 1307 1312 init_waitqueue_head(&root->cache_wait); 1308 1313 1309 - ret = set_anon_super(&root->anon_super, NULL); 1314 + ret = get_anon_bdev(&root->anon_dev); 1310 1315 if (ret) 1311 1316 goto fail; 1312 1317 ··· 2388 2393 { 2389 2394 iput(root->cache_inode); 2390 2395 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); 2391 - if (root->anon_super.s_dev) { 2392 - down_write(&root->anon_super.s_umount); 2393 - kill_anon_super(&root->anon_super); 2394 - } 2396 + if (root->anon_dev) 2397 + free_anon_bdev(root->anon_dev); 2395 2398 free_extent_buffer(root->node); 2396 2399 free_extent_buffer(root->commit_root); 2397 2400 kfree(root->free_ino_ctl);
+162 -7
fs/btrfs/file.c
··· 1452 1452 * important optimization for directories because holding the mutex prevents 1453 1453 * new operations on the dir while we write to disk. 1454 1454 */ 1455 - int btrfs_sync_file(struct file *file, int datasync) 1455 + int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 1456 1456 { 1457 1457 struct dentry *dentry = file->f_path.dentry; 1458 1458 struct inode *inode = dentry->d_inode; ··· 1462 1462 1463 1463 trace_btrfs_sync_file(file, datasync); 1464 1464 1465 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1466 + if (ret) 1467 + return ret; 1468 + mutex_lock(&inode->i_mutex); 1469 + 1465 1470 /* we wait first, since the writeback may change the inode */ 1466 1471 root->log_batch++; 1467 - /* the VFS called filemap_fdatawrite for us */ 1468 1472 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1469 1473 root->log_batch++; 1470 1474 ··· 1476 1472 * check the transaction that last modified this inode 1477 1473 * and see if its already been committed 1478 1474 */ 1479 - if (!BTRFS_I(inode)->last_trans) 1475 + if (!BTRFS_I(inode)->last_trans) { 1476 + mutex_unlock(&inode->i_mutex); 1480 1477 goto out; 1478 + } 1481 1479 1482 1480 /* 1483 1481 * if the last transaction that changed this file was before ··· 1490 1484 if (BTRFS_I(inode)->last_trans <= 1491 1485 root->fs_info->last_trans_committed) { 1492 1486 BTRFS_I(inode)->last_trans = 0; 1487 + mutex_unlock(&inode->i_mutex); 1493 1488 goto out; 1494 1489 } 1495 1490 ··· 1503 1496 trans = btrfs_start_transaction(root, 0); 1504 1497 if (IS_ERR(trans)) { 1505 1498 ret = PTR_ERR(trans); 1499 + mutex_unlock(&inode->i_mutex); 1506 1500 goto out; 1507 1501 } 1508 1502 1509 1503 ret = btrfs_log_dentry_safe(trans, root, dentry); 1510 - if (ret < 0) 1504 + if (ret < 0) { 1505 + mutex_unlock(&inode->i_mutex); 1511 1506 goto out; 1507 + } 1512 1508 1513 1509 /* we've logged all the items and now have a consistent 1514 1510 * version of the file in the log. It is possible that ··· 1523 1513 * file again, but that will end up using the synchronization 1524 1514 * inside btrfs_sync_log to keep things safe. 1525 1515 */ 1526 - mutex_unlock(&dentry->d_inode->i_mutex); 1516 + mutex_unlock(&inode->i_mutex); 1527 1517 1528 1518 if (ret != BTRFS_NO_LOG_SYNC) { 1529 1519 if (ret > 0) { ··· 1538 1528 } else { 1539 1529 ret = btrfs_end_transaction(trans, root); 1540 1530 } 1541 - mutex_lock(&dentry->d_inode->i_mutex); 1542 1531 out: 1543 1532 return ret > 0 ? -EIO : ret; 1544 1533 } ··· 1673 1664 return ret; 1674 1665 } 1675 1666 1667 + static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) 1668 + { 1669 + struct btrfs_root *root = BTRFS_I(inode)->root; 1670 + struct extent_map *em; 1671 + struct extent_state *cached_state = NULL; 1672 + u64 lockstart = *offset; 1673 + u64 lockend = i_size_read(inode); 1674 + u64 start = *offset; 1675 + u64 orig_start = *offset; 1676 + u64 len = i_size_read(inode); 1677 + u64 last_end = 0; 1678 + int ret = 0; 1679 + 1680 + lockend = max_t(u64, root->sectorsize, lockend); 1681 + if (lockend <= lockstart) 1682 + lockend = lockstart + root->sectorsize; 1683 + 1684 + len = lockend - lockstart + 1; 1685 + 1686 + len = max_t(u64, len, root->sectorsize); 1687 + if (inode->i_size == 0) 1688 + return -ENXIO; 1689 + 1690 + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, 1691 + &cached_state, GFP_NOFS); 1692 + 1693 + /* 1694 + * Delalloc is such a pain. If we have a hole and we have pending 1695 + * delalloc for a portion of the hole we will get back a hole that 1696 + * exists for the entire range since it hasn't been actually written 1697 + * yet. So to take care of this case we need to look for an extent just 1698 + * before the position we want in case there is outstanding delalloc 1699 + * going on here. 1700 + */ 1701 + if (origin == SEEK_HOLE && start != 0) { 1702 + if (start <= root->sectorsize) 1703 + em = btrfs_get_extent_fiemap(inode, NULL, 0, 0, 1704 + root->sectorsize, 0); 1705 + else 1706 + em = btrfs_get_extent_fiemap(inode, NULL, 0, 1707 + start - root->sectorsize, 1708 + root->sectorsize, 0); 1709 + if (IS_ERR(em)) { 1710 + ret = -ENXIO; 1711 + goto out; 1712 + } 1713 + last_end = em->start + em->len; 1714 + if (em->block_start == EXTENT_MAP_DELALLOC) 1715 + last_end = min_t(u64, last_end, inode->i_size); 1716 + free_extent_map(em); 1717 + } 1718 + 1719 + while (1) { 1720 + em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 1721 + if (IS_ERR(em)) { 1722 + ret = -ENXIO; 1723 + break; 1724 + } 1725 + 1726 + if (em->block_start == EXTENT_MAP_HOLE) { 1727 + if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1728 + if (last_end <= orig_start) { 1729 + free_extent_map(em); 1730 + ret = -ENXIO; 1731 + break; 1732 + } 1733 + } 1734 + 1735 + if (origin == SEEK_HOLE) { 1736 + *offset = start; 1737 + free_extent_map(em); 1738 + break; 1739 + } 1740 + } else { 1741 + if (origin == SEEK_DATA) { 1742 + if (em->block_start == EXTENT_MAP_DELALLOC) { 1743 + if (start >= inode->i_size) { 1744 + free_extent_map(em); 1745 + ret = -ENXIO; 1746 + break; 1747 + } 1748 + } 1749 + 1750 + *offset = start; 1751 + free_extent_map(em); 1752 + break; 1753 + } 1754 + } 1755 + 1756 + start = em->start + em->len; 1757 + last_end = em->start + em->len; 1758 + 1759 + if (em->block_start == EXTENT_MAP_DELALLOC) 1760 + last_end = min_t(u64, last_end, inode->i_size); 1761 + 1762 + if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { 1763 + free_extent_map(em); 1764 + ret = -ENXIO; 1765 + break; 1766 + } 1767 + free_extent_map(em); 1768 + cond_resched(); 1769 + } 1770 + if (!ret) 1771 + *offset = min(*offset, inode->i_size); 1772 + out: 1773 + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, 1774 + &cached_state, GFP_NOFS); 1775 + return ret; 1776 + } 1777 + 1778 + static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) 1779 + { 1780 + struct inode *inode = file->f_mapping->host; 1781 + int ret; 1782 + 1783 + mutex_lock(&inode->i_mutex); 1784 + switch (origin) { 1785 + case SEEK_END: 1786 + case SEEK_CUR: 1787 + offset = generic_file_llseek_unlocked(file, offset, origin); 1788 + goto out; 1789 + case SEEK_DATA: 1790 + case SEEK_HOLE: 1791 + ret = find_desired_extent(inode, &offset, origin); 1792 + if (ret) { 1793 + mutex_unlock(&inode->i_mutex); 1794 + return ret; 1795 + } 1796 + } 1797 + 1798 + if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) 1799 + return -EINVAL; 1800 + if (offset > inode->i_sb->s_maxbytes) 1801 + return -EINVAL; 1802 + 1803 + /* Special lock needed here? */ 1804 + if (offset != file->f_pos) { 1805 + file->f_pos = offset; 1806 + file->f_version = 0; 1807 + } 1808 + out: 1809 + mutex_unlock(&inode->i_mutex); 1810 + return offset; 1811 + } 1812 + 1676 1813 const struct file_operations btrfs_file_operations = { 1677 - .llseek = generic_file_llseek, 1814 + .llseek = btrfs_file_llseek, 1678 1815 .read = do_sync_read, 1679 1816 .write = do_sync_write, 1680 1817 .aio_read = generic_file_aio_read,
+11 -14
fs/btrfs/inode.c
··· 4079 4079 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, 4080 4080 struct nameidata *nd) 4081 4081 { 4082 - struct inode *inode; 4083 - 4084 - inode = btrfs_lookup_dentry(dir, dentry); 4085 - if (IS_ERR(inode)) 4086 - return ERR_CAST(inode); 4087 - 4088 - return d_splice_alias(inode, dentry); 4082 + return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); 4089 4083 } 4090 4084 4091 4085 unsigned char btrfs_filetype_table[] = { ··· 4766 4772 if (err) { 4767 4773 drop_inode = 1; 4768 4774 } else { 4769 - struct dentry *parent = dget_parent(dentry); 4775 + struct dentry *parent = dentry->d_parent; 4770 4776 err = btrfs_update_inode(trans, root, inode); 4771 4777 BUG_ON(err); 4772 4778 btrfs_log_new_name(trans, inode, NULL, parent); 4773 - dput(parent); 4774 4779 } 4775 4780 4776 4781 nr = trans->blocks_used; ··· 6893 6900 { 6894 6901 struct inode *inode = dentry->d_inode; 6895 6902 generic_fillattr(inode, stat); 6896 - stat->dev = BTRFS_I(inode)->root->anon_super.s_dev; 6903 + stat->dev = BTRFS_I(inode)->root->anon_dev; 6897 6904 stat->blksize = PAGE_CACHE_SIZE; 6898 6905 stat->blocks = (inode_get_bytes(inode) + 6899 6906 BTRFS_I(inode)->delalloc_bytes) >> 9; ··· 7061 7068 BUG_ON(ret); 7062 7069 7063 7070 if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { 7064 - struct dentry *parent = dget_parent(new_dentry); 7071 + struct dentry *parent = new_dentry->d_parent; 7065 7072 btrfs_log_new_name(trans, old_inode, old_dir, parent); 7066 - dput(parent); 7067 7073 btrfs_end_log_trans(root); 7068 7074 } 7069 7075 out_fail: ··· 7323 7331 return __set_page_dirty_nobuffers(page); 7324 7332 } 7325 7333 7326 - static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) 7334 + static int btrfs_permission(struct inode *inode, int mask) 7327 7335 { 7328 7336 struct btrfs_root *root = BTRFS_I(inode)->root; 7329 7337 ··· 7331 7339 return -EROFS; 7332 7340 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) 7333 7341 return -EACCES; 7334 - return generic_permission(inode, mask, flags, btrfs_check_acl); 7342 + return generic_permission(inode, mask); 7335 7343 } 7336 7344 7337 7345 static const struct inode_operations btrfs_dir_inode_operations = { ··· 7351 7359 .listxattr = btrfs_listxattr, 7352 7360 .removexattr = btrfs_removexattr, 7353 7361 .permission = btrfs_permission, 7362 + .check_acl = btrfs_check_acl, 7354 7363 }; 7355 7364 static const struct inode_operations btrfs_dir_ro_inode_operations = { 7356 7365 .lookup = btrfs_lookup, 7357 7366 .permission = btrfs_permission, 7367 + .check_acl = btrfs_check_acl, 7358 7368 }; 7359 7369 7360 7370 static const struct file_operations btrfs_dir_file_operations = { ··· 7425 7431 .removexattr = btrfs_removexattr, 7426 7432 .permission = btrfs_permission, 7427 7433 .fiemap = btrfs_fiemap, 7434 + .check_acl = btrfs_check_acl, 7428 7435 }; 7429 7436 static const struct inode_operations btrfs_special_inode_operations = { 7430 7437 .getattr = btrfs_getattr, ··· 7435 7440 .getxattr = btrfs_getxattr, 7436 7441 .listxattr = btrfs_listxattr, 7437 7442 .removexattr = btrfs_removexattr, 7443 + .check_acl = btrfs_check_acl, 7438 7444 }; 7439 7445 static const struct inode_operations btrfs_symlink_inode_operations = { 7440 7446 .readlink = generic_readlink, ··· 7447 7451 .getxattr = btrfs_getxattr, 7448 7452 .listxattr = btrfs_listxattr, 7449 7453 .removexattr = btrfs_removexattr, 7454 + .check_acl = btrfs_check_acl, 7450 7455 }; 7451 7456 7452 7457 const struct dentry_operations btrfs_dentry_operations = {
+4 -12
fs/btrfs/ioctl.c
··· 323 323 struct btrfs_inode_item *inode_item; 324 324 struct extent_buffer *leaf; 325 325 struct btrfs_root *new_root; 326 - struct dentry *parent = dget_parent(dentry); 326 + struct dentry *parent = dentry->d_parent; 327 327 struct inode *dir; 328 328 int ret; 329 329 int err; ··· 332 332 u64 index = 0; 333 333 334 334 ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); 335 - if (ret) { 336 - dput(parent); 335 + if (ret) 337 336 return ret; 338 - } 339 337 340 338 dir = parent->d_inode; 341 339 ··· 344 346 * 2 - dir items 345 347 */ 346 348 trans = btrfs_start_transaction(root, 6); 347 - if (IS_ERR(trans)) { 348 - dput(parent); 349 + if (IS_ERR(trans)) 349 350 return PTR_ERR(trans); 350 - } 351 351 352 352 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 353 353 0, objectid, NULL, 0, 0, 0); ··· 435 439 436 440 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 437 441 fail: 438 - dput(parent); 439 442 if (async_transid) { 440 443 *async_transid = trans->transid; 441 444 err = btrfs_commit_transaction_async(trans, root, 1); ··· 451 456 bool readonly) 452 457 { 453 458 struct inode *inode; 454 - struct dentry *parent; 455 459 struct btrfs_pending_snapshot *pending_snapshot; 456 460 struct btrfs_trans_handle *trans; 457 461 int ret; ··· 498 504 if (ret) 499 505 goto fail; 500 506 501 - parent = dget_parent(dentry); 502 - inode = btrfs_lookup_dentry(parent->d_inode, dentry); 503 - dput(parent); 507 + inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 504 508 if (IS_ERR(inode)) { 505 509 ret = PTR_ERR(inode); 506 510 goto fail;
-2
fs/cachefiles/bind.c
··· 129 129 !root->d_inode->i_op->mkdir || 130 130 !root->d_inode->i_op->setxattr || 131 131 !root->d_inode->i_op->getxattr || 132 - !root->d_sb || 133 - !root->d_sb->s_op || 134 132 !root->d_sb->s_op->statfs || 135 133 !root->d_sb->s_op->sync_fs) 136 134 goto error_unsupported;
+4 -2
fs/ceph/caps.c
··· 1811 1811 spin_unlock(&ci->i_unsafe_lock); 1812 1812 } 1813 1813 1814 - int ceph_fsync(struct file *file, int datasync) 1814 + int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1815 1815 { 1816 1816 struct inode *inode = file->f_mapping->host; 1817 1817 struct ceph_inode_info *ci = ceph_inode(inode); ··· 1822 1822 dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 1823 1823 sync_write_wait(inode); 1824 1824 1825 - ret = filemap_write_and_wait(inode->i_mapping); 1825 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1826 1826 if (ret < 0) 1827 1827 return ret; 1828 + mutex_lock(&inode->i_mutex); 1828 1829 1829 1830 dirty = try_flush_caps(inode, NULL, &flush_tid); 1830 1831 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); ··· 1842 1841 } 1843 1842 1844 1843 dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); 1844 + mutex_unlock(&inode->i_mutex); 1845 1845 return ret; 1846 1846 } 1847 1847
+17 -4
fs/ceph/dir.c
··· 252 252 off = 1; 253 253 } 254 254 if (filp->f_pos == 1) { 255 - ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino; 255 + ino_t ino = parent_ino(filp->f_dentry); 256 256 dout("readdir off 1 -> '..'\n"); 257 257 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 258 258 ceph_translate_ino(inode->i_sb, ino), ··· 446 446 loff_t retval; 447 447 448 448 mutex_lock(&inode->i_mutex); 449 + retval = -EINVAL; 449 450 switch (origin) { 450 451 case SEEK_END: 451 452 offset += inode->i_size + 2; /* FIXME */ 452 453 break; 453 454 case SEEK_CUR: 454 455 offset += file->f_pos; 456 + case SEEK_SET: 457 + break; 458 + default: 459 + goto out; 455 460 } 456 - retval = -EINVAL; 461 + 457 462 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 458 463 if (offset != file->f_pos) { 459 464 file->f_pos = offset; ··· 482 477 if (offset > old_offset) 483 478 fi->dir_release_count--; 484 479 } 480 + out: 485 481 mutex_unlock(&inode->i_mutex); 486 482 return retval; 487 483 } ··· 572 566 /* open (but not create!) intent? */ 573 567 if (nd && 574 568 (nd->flags & LOOKUP_OPEN) && 575 - (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ 576 569 !(nd->intent.open.flags & O_CREAT)) { 577 570 int mode = nd->intent.open.create_mode & ~current->fs->umask; 578 571 return ceph_lookup_open(dir, dentry, nd, mode, 1); ··· 1118 1113 * an fsync() on a dir will wait for any uncommitted directory 1119 1114 * operations to commit. 1120 1115 */ 1121 - static int ceph_dir_fsync(struct file *file, int datasync) 1116 + static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1117 + int datasync) 1122 1118 { 1123 1119 struct inode *inode = file->f_path.dentry->d_inode; 1124 1120 struct ceph_inode_info *ci = ceph_inode(inode); ··· 1129 1123 int ret = 0; 1130 1124 1131 1125 dout("dir_fsync %p\n", inode); 1126 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1127 + if (ret) 1128 + return ret; 1129 + mutex_lock(&inode->i_mutex); 1130 + 1132 1131 spin_lock(&ci->i_unsafe_lock); 1133 1132 if (list_empty(head)) 1134 1133 goto out; ··· 1167 1156 } while (req->r_tid < last_tid); 1168 1157 out: 1169 1158 spin_unlock(&ci->i_unsafe_lock); 1159 + mutex_unlock(&inode->i_mutex); 1160 + 1170 1161 return ret; 1171 1162 } 1172 1163
+19 -3
fs/ceph/file.c
··· 226 226 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); 227 227 struct ceph_mds_request *req; 228 228 int err; 229 - int flags = nd->intent.open.flags - 1; /* silly vfs! */ 229 + int flags = nd->intent.open.flags; 230 230 231 231 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", 232 232 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); ··· 768 768 769 769 mutex_lock(&inode->i_mutex); 770 770 __ceph_do_pending_vmtruncate(inode); 771 - switch (origin) { 772 - case SEEK_END: 771 + if (origin != SEEK_CUR || origin != SEEK_SET) { 773 772 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); 774 773 if (ret < 0) { 775 774 offset = ret; 776 775 goto out; 777 776 } 777 + } 778 + 779 + switch (origin) { 780 + case SEEK_END: 778 781 offset += inode->i_size; 779 782 break; 780 783 case SEEK_CUR: ··· 792 789 goto out; 793 790 } 794 791 offset += file->f_pos; 792 + break; 793 + case SEEK_DATA: 794 + if (offset >= inode->i_size) { 795 + ret = -ENXIO; 796 + goto out; 797 + } 798 + break; 799 + case SEEK_HOLE: 800 + if (offset >= inode->i_size) { 801 + ret = -ENXIO; 802 + goto out; 803 + } 804 + offset = inode->i_size; 795 805 break; 796 806 } 797 807
+3 -3
fs/ceph/inode.c
··· 1795 1795 * Check inode permissions. We verify we have a valid value for 1796 1796 * the AUTH cap, then call the generic handler. 1797 1797 */ 1798 - int ceph_permission(struct inode *inode, int mask, unsigned int flags) 1798 + int ceph_permission(struct inode *inode, int mask) 1799 1799 { 1800 1800 int err; 1801 1801 1802 - if (flags & IPERM_FLAG_RCU) 1802 + if (mask & MAY_NOT_BLOCK) 1803 1803 return -ECHILD; 1804 1804 1805 1805 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1806 1806 1807 1807 if (!err) 1808 - err = generic_permission(inode, mask, flags, NULL); 1808 + err = generic_permission(inode, mask); 1809 1809 return err; 1810 1810 } 1811 1811
+3 -2
fs/ceph/super.h
··· 692 692 extern void ceph_queue_writeback(struct inode *inode); 693 693 694 694 extern int ceph_do_getattr(struct inode *inode, int mask); 695 - extern int ceph_permission(struct inode *inode, int mask, unsigned int flags); 695 + extern int ceph_permission(struct inode *inode, int mask); 696 696 extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); 697 697 extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 698 698 struct kstat *stat); ··· 728 728 729 729 extern void ceph_queue_caps_release(struct inode *inode); 730 730 extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); 731 - extern int ceph_fsync(struct file *file, int datasync); 731 + extern int ceph_fsync(struct file *file, loff_t start, loff_t end, 732 + int datasync); 732 733 extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 733 734 struct ceph_mds_session *session); 734 735 extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
+7 -4
fs/cifs/cifsfs.c
··· 224 224 return 0; 225 225 } 226 226 227 - static int cifs_permission(struct inode *inode, int mask, unsigned int flags) 227 + static int cifs_permission(struct inode *inode, int mask) 228 228 { 229 229 struct cifs_sb_info *cifs_sb; 230 230 ··· 239 239 on the client (above and beyond ACL on servers) for 240 240 servers which do not support setting and viewing mode bits, 241 241 so allowing client to check permissions is useful */ 242 - return generic_permission(inode, mask, flags, NULL); 242 + return generic_permission(inode, mask); 243 243 } 244 244 245 245 static struct kmem_cache *cifs_inode_cachep; ··· 704 704 705 705 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) 706 706 { 707 - /* origin == SEEK_END => we must revalidate the cached file length */ 708 - if (origin == SEEK_END) { 707 + /* 708 + * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 709 + * the cached file length 710 + */ 711 + if (origin != SEEK_SET || origin != SEEK_CUR) { 709 712 int rc; 710 713 struct inode *inode = file->f_path.dentry->d_inode; 711 714
+2 -2
fs/cifs/cifsfs.h
··· 91 91 extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, 92 92 unsigned long nr_segs, loff_t pos); 93 93 extern int cifs_lock(struct file *, int, struct file_lock *); 94 - extern int cifs_fsync(struct file *, int); 95 - extern int cifs_strict_fsync(struct file *, int); 94 + extern int cifs_fsync(struct file *, loff_t, loff_t, int); 95 + extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int); 96 96 extern int cifs_flush(struct file *, fl_owner_t id); 97 97 extern int cifs_file_mmap(struct file * , struct vm_area_struct *); 98 98 extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
+3 -2
fs/cifs/connect.c
··· 320 320 } 321 321 322 322 static int 323 - cifs_demultiplex_thread(struct TCP_Server_Info *server) 323 + cifs_demultiplex_thread(void *p) 324 324 { 325 325 int length; 326 + struct TCP_Server_Info *server = p; 326 327 unsigned int pdu_length, total_read; 327 328 struct smb_hdr *smb_buffer = NULL; 328 329 struct smb_hdr *bigbuf = NULL; ··· 1792 1791 * this will succeed. No need for try_module_get(). 1793 1792 */ 1794 1793 __module_get(THIS_MODULE); 1795 - tcp_ses->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, 1794 + tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, 1796 1795 tcp_ses, "cifsd"); 1797 1796 if (IS_ERR(tcp_ses->tsk)) { 1798 1797 rc = PTR_ERR(tcp_ses->tsk);
+6 -8
fs/cifs/dir.c
··· 179 179 if (oplockEnabled) 180 180 oplock = REQ_OPLOCK; 181 181 182 - if (nd && (nd->flags & LOOKUP_OPEN)) 182 + if (nd) 183 183 oflags = nd->intent.open.file->f_flags; 184 184 else 185 185 oflags = O_RDONLY | O_CREAT; ··· 214 214 which should be rare for path not covered on files) */ 215 215 } 216 216 217 - if (nd && (nd->flags & LOOKUP_OPEN)) { 217 + if (nd) { 218 218 /* if the file is going to stay open, then we 219 219 need to set the desired access properly */ 220 220 desiredAccess = 0; ··· 328 328 else 329 329 cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); 330 330 331 - if (newinode && nd && (nd->flags & LOOKUP_OPEN)) { 331 + if (newinode && nd) { 332 332 struct cifsFileInfo *pfile_info; 333 333 struct file *filp; 334 334 ··· 568 568 * reduction in network traffic in the other paths. 569 569 */ 570 570 if (pTcon->unix_ext) { 571 - if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 571 + if (nd && !(nd->flags & LOOKUP_DIRECTORY) && 572 572 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 573 573 (nd->intent.open.file->f_flags & O_CREAT)) { 574 574 rc = cifs_posix_open(full_path, &newInode, ··· 663 663 * case sensitive name which is specified by user if this is 664 664 * for creation. 665 665 */ 666 - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { 667 - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 668 - return 0; 669 - } 666 + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 667 + return 0; 670 668 671 669 if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) 672 670 return 0;
+16 -2
fs/cifs/file.c
··· 1401 1401 return rc; 1402 1402 } 1403 1403 1404 - int cifs_strict_fsync(struct file *file, int datasync) 1404 + int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, 1405 + int datasync) 1405 1406 { 1406 1407 int xid; 1407 1408 int rc = 0; ··· 1410 1409 struct cifsFileInfo *smbfile = file->private_data; 1411 1410 struct inode *inode = file->f_path.dentry->d_inode; 1412 1411 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 1412 + 1413 + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); 1414 + if (rc) 1415 + return rc; 1416 + mutex_lock(&inode->i_mutex); 1413 1417 1414 1418 xid = GetXid(); 1415 1419 ··· 1434 1428 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); 1435 1429 1436 1430 FreeXid(xid); 1431 + mutex_unlock(&inode->i_mutex); 1437 1432 return rc; 1438 1433 } 1439 1434 1440 - int cifs_fsync(struct file *file, int datasync) 1435 + int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1441 1436 { 1442 1437 int xid; 1443 1438 int rc = 0; 1444 1439 struct cifs_tcon *tcon; 1445 1440 struct cifsFileInfo *smbfile = file->private_data; 1446 1441 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 1442 + struct inode *inode = file->f_mapping->host; 1443 + 1444 + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); 1445 + if (rc) 1446 + return rc; 1447 + mutex_lock(&inode->i_mutex); 1447 1448 1448 1449 xid = GetXid(); 1449 1450 ··· 1462 1449 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); 1463 1450 1464 1451 FreeXid(xid); 1452 + mutex_unlock(&inode->i_mutex); 1465 1453 return rc; 1466 1454 } 1467 1455
+1 -1
fs/cifs/readdir.c
··· 796 796 file->f_pos++; 797 797 case 1: 798 798 if (filldir(direntry, "..", 2, file->f_pos, 799 - file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { 799 + parent_ino(file->f_path.dentry), DT_DIR) < 0) { 800 800 cERROR(1, "Filldir for parent dir failed"); 801 801 rc = -ENOMEM; 802 802 break;
+1 -1
fs/coda/coda_int.h
··· 11 11 12 12 void coda_destroy_inodecache(void); 13 13 int coda_init_inodecache(void); 14 - int coda_fsync(struct file *coda_file, int datasync); 14 + int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); 15 15 void coda_sysctl_init(void); 16 16 void coda_sysctl_clean(void); 17 17
+1 -1
fs/coda/coda_linux.h
··· 39 39 /* operations shared over more than one file */ 40 40 int coda_open(struct inode *i, struct file *f); 41 41 int coda_release(struct inode *i, struct file *f); 42 - int coda_permission(struct inode *inode, int mask, unsigned int flags); 42 + int coda_permission(struct inode *inode, int mask); 43 43 int coda_revalidate_inode(struct dentry *); 44 44 int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); 45 45 int coda_setattr(struct dentry *, struct iattr *);
+4 -5
fs/coda/dir.c
··· 132 132 } 133 133 134 134 135 - int coda_permission(struct inode *inode, int mask, unsigned int flags) 135 + int coda_permission(struct inode *inode, int mask) 136 136 { 137 137 int error; 138 138 139 - if (flags & IPERM_FLAG_RCU) 139 + if (mask & MAY_NOT_BLOCK) 140 140 return -ECHILD; 141 141 142 142 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; ··· 449 449 struct file *host_file; 450 450 struct dentry *de; 451 451 struct venus_dirent *vdir; 452 - unsigned long vdir_size = 453 - (unsigned long)(&((struct venus_dirent *)0)->d_name); 452 + unsigned long vdir_size = offsetof(struct venus_dirent, d_name); 454 453 unsigned int type; 455 454 struct qstr name; 456 455 ino_t ino; ··· 473 474 coda_file->f_pos++; 474 475 } 475 476 if (coda_file->f_pos == 1) { 476 - ret = filldir(buf, "..", 2, 1, de->d_parent->d_inode->i_ino, DT_DIR); 477 + ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR); 477 478 if (ret < 0) 478 479 goto out; 479 480 result++;
+7 -1
fs/coda/file.c
··· 199 199 return 0; 200 200 } 201 201 202 - int coda_fsync(struct file *coda_file, int datasync) 202 + int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) 203 203 { 204 204 struct file *host_file; 205 205 struct inode *coda_inode = coda_file->f_path.dentry->d_inode; ··· 210 210 S_ISLNK(coda_inode->i_mode))) 211 211 return -EINVAL; 212 212 213 + err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); 214 + if (err) 215 + return err; 216 + mutex_lock(&coda_inode->i_mutex); 217 + 213 218 cfi = CODA_FTOC(coda_file); 214 219 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); 215 220 host_file = cfi->cfi_container; ··· 222 217 err = vfs_fsync(host_file, datasync); 223 218 if (!err && !datasync) 224 219 err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); 220 + mutex_unlock(&coda_inode->i_mutex); 225 221 226 222 return err; 227 223 }
+2 -2
fs/coda/pioctl.c
··· 24 24 #include "coda_linux.h" 25 25 26 26 /* pioctl ops */ 27 - static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags); 27 + static int coda_ioctl_permission(struct inode *inode, int mask); 28 28 static long coda_pioctl(struct file *filp, unsigned int cmd, 29 29 unsigned long user_data); 30 30 ··· 41 41 }; 42 42 43 43 /* the coda pioctl inode ops */ 44 - static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) 44 + static int coda_ioctl_permission(struct inode *inode, int mask) 45 45 { 46 46 return (mask & MAY_EXEC) ? -EACCES : 0; 47 47 }
+91 -171
fs/dcache.c
··· 344 344 EXPORT_SYMBOL(d_drop); 345 345 346 346 /* 347 + * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag 348 + * @dentry: dentry to drop 349 + * 350 + * This is called when we do a lookup on a placeholder dentry that needed to be 351 + * looked up. The dentry should have been hashed in order for it to be found by 352 + * the lookup code, but now needs to be unhashed while we do the actual lookup 353 + * and clear the DCACHE_NEED_LOOKUP flag. 354 + */ 355 + void d_clear_need_lookup(struct dentry *dentry) 356 + { 357 + spin_lock(&dentry->d_lock); 358 + __d_drop(dentry); 359 + dentry->d_flags &= ~DCACHE_NEED_LOOKUP; 360 + spin_unlock(&dentry->d_lock); 361 + } 362 + EXPORT_SYMBOL(d_clear_need_lookup); 363 + 364 + /* 347 365 * Finish off a dentry we've decided to kill. 348 366 * dentry->d_lock must be held, returns with it unlocked. 349 367 * If ref is non-zero, then decrement the refcount too. ··· 450 432 if (d_unhashed(dentry)) 451 433 goto kill_it; 452 434 453 - /* Otherwise leave it cached and ensure it's on the LRU */ 454 - dentry->d_flags |= DCACHE_REFERENCED; 435 + /* 436 + * If this dentry needs lookup, don't set the referenced flag so that it 437 + * is more likely to be cleaned up by the dcache shrinker in case of 438 + * memory pressure. 439 + */ 440 + if (!d_need_lookup(dentry)) 441 + dentry->d_flags |= DCACHE_REFERENCED; 455 442 dentry_lru_add(dentry); 456 443 457 444 dentry->d_count--; ··· 549 526 */ 550 527 rcu_read_lock(); 551 528 ret = dentry->d_parent; 552 - if (!ret) { 553 - rcu_read_unlock(); 554 - goto out; 555 - } 556 529 spin_lock(&ret->d_lock); 557 530 if (unlikely(ret != dentry->d_parent)) { 558 531 spin_unlock(&ret->d_lock); ··· 559 540 BUG_ON(!ret->d_count); 560 541 ret->d_count++; 561 542 spin_unlock(&ret->d_lock); 562 - out: 563 543 return ret; 564 544 } 565 545 EXPORT_SYMBOL(dget_parent); ··· 738 720 * 739 721 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. 740 722 */ 741 - static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) 723 + static void __shrink_dcache_sb(struct super_block *sb, int count, int flags) 742 724 { 743 - /* called from prune_dcache() and shrink_dcache_parent() */ 744 725 struct dentry *dentry; 745 726 LIST_HEAD(referenced); 746 727 LIST_HEAD(tmp); 747 - int cnt = *count; 748 728 749 729 relock: 750 730 spin_lock(&dcache_lru_lock); ··· 770 754 } else { 771 755 list_move_tail(&dentry->d_lru, &tmp); 772 756 spin_unlock(&dentry->d_lock); 773 - if (!--cnt) 757 + if (!--count) 774 758 break; 775 759 } 776 760 cond_resched_lock(&dcache_lru_lock); ··· 780 764 spin_unlock(&dcache_lru_lock); 781 765 782 766 shrink_dentry_list(&tmp); 783 - 784 - *count = cnt; 785 767 } 786 768 787 769 /** 788 - * prune_dcache - shrink the dcache 789 - * @count: number of entries to try to free 770 + * prune_dcache_sb - shrink the dcache 771 + * @nr_to_scan: number of entries to try to free 790 772 * 791 - * Shrink the dcache. This is done when we need more memory, or simply when we 792 - * need to unmount something (at which point we need to unuse all dentries). 773 + * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is 774 + * done when we need more memory an called from the superblock shrinker 775 + * function. 793 776 * 794 - * This function may fail to free any resources if all the dentries are in use. 777 + * This function may fail to free any resources if all the dentries are in 778 + * use. 795 779 */ 796 - static void prune_dcache(int count) 780 + void prune_dcache_sb(struct super_block *sb, int nr_to_scan) 797 781 { 798 - struct super_block *sb, *p = NULL; 799 - int w_count; 800 - int unused = dentry_stat.nr_unused; 801 - int prune_ratio; 802 - int pruned; 803 - 804 - if (unused == 0 || count == 0) 805 - return; 806 - if (count >= unused) 807 - prune_ratio = 1; 808 - else 809 - prune_ratio = unused / count; 810 - spin_lock(&sb_lock); 811 - list_for_each_entry(sb, &super_blocks, s_list) { 812 - if (list_empty(&sb->s_instances)) 813 - continue; 814 - if (sb->s_nr_dentry_unused == 0) 815 - continue; 816 - sb->s_count++; 817 - /* Now, we reclaim unused dentrins with fairness. 818 - * We reclaim them same percentage from each superblock. 819 - * We calculate number of dentries to scan on this sb 820 - * as follows, but the implementation is arranged to avoid 821 - * overflows: 822 - * number of dentries to scan on this sb = 823 - * count * (number of dentries on this sb / 824 - * number of dentries in the machine) 825 - */ 826 - spin_unlock(&sb_lock); 827 - if (prune_ratio != 1) 828 - w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; 829 - else 830 - w_count = sb->s_nr_dentry_unused; 831 - pruned = w_count; 832 - /* 833 - * We need to be sure this filesystem isn't being unmounted, 834 - * otherwise we could race with generic_shutdown_super(), and 835 - * end up holding a reference to an inode while the filesystem 836 - * is unmounted. So we try to get s_umount, and make sure 837 - * s_root isn't NULL. 838 - */ 839 - if (down_read_trylock(&sb->s_umount)) { 840 - if ((sb->s_root != NULL) && 841 - (!list_empty(&sb->s_dentry_lru))) { 842 - __shrink_dcache_sb(sb, &w_count, 843 - DCACHE_REFERENCED); 844 - pruned -= w_count; 845 - } 846 - up_read(&sb->s_umount); 847 - } 848 - spin_lock(&sb_lock); 849 - if (p) 850 - __put_super(p); 851 - count -= pruned; 852 - p = sb; 853 - /* more work left to do? */ 854 - if (count <= 0) 855 - break; 856 - } 857 - if (p) 858 - __put_super(p); 859 - spin_unlock(&sb_lock); 782 + __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED); 860 783 } 861 784 862 785 /** ··· 1170 1215 int found; 1171 1216 1172 1217 while ((found = select_parent(parent)) != 0) 1173 - __shrink_dcache_sb(sb, &found, 0); 1218 + __shrink_dcache_sb(sb, found, 0); 1174 1219 } 1175 1220 EXPORT_SYMBOL(shrink_dcache_parent); 1176 1221 1177 - /* 1178 - * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain. 1179 - * 1180 - * We need to avoid reentering the filesystem if the caller is performing a 1181 - * GFP_NOFS allocation attempt. One example deadlock is: 1182 - * 1183 - * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 1184 - * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 1185 - * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 1186 - * 1187 - * In this case we return -1 to tell the caller that we baled. 1188 - */ 1189 - static int shrink_dcache_memory(struct shrinker *shrink, 1190 - struct shrink_control *sc) 1191 - { 1192 - int nr = sc->nr_to_scan; 1193 - gfp_t gfp_mask = sc->gfp_mask; 1194 - 1195 - if (nr) { 1196 - if (!(gfp_mask & __GFP_FS)) 1197 - return -1; 1198 - prune_dcache(nr); 1199 - } 1200 - 1201 - return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 1202 - } 1203 - 1204 - static struct shrinker dcache_shrinker = { 1205 - .shrink = shrink_dcache_memory, 1206 - .seeks = DEFAULT_SEEKS, 1207 - }; 1208 - 1209 1222 /** 1210 - * d_alloc - allocate a dcache entry 1211 - * @parent: parent of entry to allocate 1223 + * __d_alloc - allocate a dcache entry 1224 + * @sb: filesystem it will belong to 1212 1225 * @name: qstr of the name 1213 1226 * 1214 1227 * Allocates a dentry. It returns %NULL if there is insufficient memory ··· 1184 1261 * copied and the copy passed in may be reused after this call. 1185 1262 */ 1186 1263 1187 - struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1264 + struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) 1188 1265 { 1189 1266 struct dentry *dentry; 1190 1267 char *dname; ··· 1214 1291 spin_lock_init(&dentry->d_lock); 1215 1292 seqcount_init(&dentry->d_seq); 1216 1293 dentry->d_inode = NULL; 1217 - dentry->d_parent = NULL; 1218 - dentry->d_sb = NULL; 1294 + dentry->d_parent = dentry; 1295 + dentry->d_sb = sb; 1219 1296 dentry->d_op = NULL; 1220 1297 dentry->d_fsdata = NULL; 1221 1298 INIT_HLIST_BL_NODE(&dentry->d_hash); ··· 1223 1300 INIT_LIST_HEAD(&dentry->d_subdirs); 1224 1301 INIT_LIST_HEAD(&dentry->d_alias); 1225 1302 INIT_LIST_HEAD(&dentry->d_u.d_child); 1226 - 1227 - if (parent) { 1228 - spin_lock(&parent->d_lock); 1229 - /* 1230 - * don't need child lock because it is not subject 1231 - * to concurrency here 1232 - */ 1233 - __dget_dlock(parent); 1234 - dentry->d_parent = parent; 1235 - dentry->d_sb = parent->d_sb; 1236 - d_set_d_op(dentry, dentry->d_sb->s_d_op); 1237 - list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1238 - spin_unlock(&parent->d_lock); 1239 - } 1303 + d_set_d_op(dentry, dentry->d_sb->s_d_op); 1240 1304 1241 1305 this_cpu_inc(nr_dentry); 1306 + 1307 + return dentry; 1308 + } 1309 + 1310 + /** 1311 + * d_alloc - allocate a dcache entry 1312 + * @parent: parent of entry to allocate 1313 + * @name: qstr of the name 1314 + * 1315 + * Allocates a dentry. It returns %NULL if there is insufficient memory 1316 + * available. On a success the dentry is returned. The name passed in is 1317 + * copied and the copy passed in may be reused after this call. 1318 + */ 1319 + struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 1320 + { 1321 + struct dentry *dentry = __d_alloc(parent->d_sb, name); 1322 + if (!dentry) 1323 + return NULL; 1324 + 1325 + spin_lock(&parent->d_lock); 1326 + /* 1327 + * don't need child lock because it is not subject 1328 + * to concurrency here 1329 + */ 1330 + __dget_dlock(parent); 1331 + dentry->d_parent = parent; 1332 + list_add(&dentry->d_u.d_child, &parent->d_subdirs); 1333 + spin_unlock(&parent->d_lock); 1242 1334 1243 1335 return dentry; 1244 1336 } ··· 1261 1323 1262 1324 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) 1263 1325 { 1264 - struct dentry *dentry = d_alloc(NULL, name); 1265 - if (dentry) { 1266 - dentry->d_sb = sb; 1267 - d_set_d_op(dentry, dentry->d_sb->s_d_op); 1268 - dentry->d_parent = dentry; 1326 + struct dentry *dentry = __d_alloc(sb, name); 1327 + if (dentry) 1269 1328 dentry->d_flags |= DCACHE_DISCONNECTED; 1270 - } 1271 1329 return dentry; 1272 1330 } 1273 1331 EXPORT_SYMBOL(d_alloc_pseudo); ··· 1433 1499 if (root_inode) { 1434 1500 static const struct qstr name = { .name = "/", .len = 1 }; 1435 1501 1436 - res = d_alloc(NULL, &name); 1437 - if (res) { 1438 - res->d_sb = root_inode->i_sb; 1439 - d_set_d_op(res, res->d_sb->s_d_op); 1440 - res->d_parent = res; 1502 + res = __d_alloc(root_inode->i_sb, &name); 1503 + if (res) 1441 1504 d_instantiate(res, root_inode); 1442 - } 1443 1505 } 1444 1506 return res; 1445 1507 } ··· 1496 1566 if (res) 1497 1567 goto out_iput; 1498 1568 1499 - tmp = d_alloc(NULL, &anonstring); 1569 + tmp = __d_alloc(inode->i_sb, &anonstring); 1500 1570 if (!tmp) { 1501 1571 res = ERR_PTR(-ENOMEM); 1502 1572 goto out_iput; 1503 1573 } 1504 - tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1505 - 1506 1574 1507 1575 spin_lock(&inode->i_lock); 1508 1576 res = __d_find_any_alias(inode); ··· 1512 1584 1513 1585 /* attach a disconnected dentry */ 1514 1586 spin_lock(&tmp->d_lock); 1515 - tmp->d_sb = inode->i_sb; 1516 - d_set_d_op(tmp, tmp->d_sb->s_d_op); 1517 1587 tmp->d_inode = inode; 1518 1588 tmp->d_flags |= DCACHE_DISCONNECTED; 1519 1589 list_add(&tmp->d_alias, &inode->i_dentry); ··· 1551 1625 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1552 1626 { 1553 1627 struct dentry *new = NULL; 1628 + 1629 + if (IS_ERR(inode)) 1630 + return ERR_CAST(inode); 1554 1631 1555 1632 if (inode && S_ISDIR(inode->i_mode)) { 1556 1633 spin_lock(&inode->i_lock); ··· 1637 1708 } 1638 1709 1639 1710 /* 1711 + * We are going to instantiate this dentry, unhash it and clear the 1712 + * lookup flag so we can do that. 1713 + */ 1714 + if (unlikely(d_need_lookup(found))) 1715 + d_clear_need_lookup(found); 1716 + 1717 + /* 1640 1718 * Negative dentry: instantiate it unless the inode is a directory and 1641 1719 * already has a dentry. 1642 1720 */ 1643 - spin_lock(&inode->i_lock); 1644 - if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { 1645 - __d_instantiate(found, inode); 1646 - spin_unlock(&inode->i_lock); 1647 - security_d_instantiate(found, inode); 1648 - return found; 1721 + new = d_splice_alias(inode, found); 1722 + if (new) { 1723 + dput(found); 1724 + found = new; 1649 1725 } 1650 - 1651 - /* 1652 - * In case a directory already has a (disconnected) entry grab a 1653 - * reference to it, move it in place and use it. 1654 - */ 1655 - new = list_entry(inode->i_dentry.next, struct dentry, d_alias); 1656 - __dget(new); 1657 - spin_unlock(&inode->i_lock); 1658 - security_d_instantiate(found, inode); 1659 - d_move(new, found); 1660 - iput(inode); 1661 - dput(found); 1662 - return new; 1726 + return found; 1663 1727 1664 1728 err_out: 1665 1729 iput(inode); ··· 2967 3045 */ 2968 3046 dentry_cache = KMEM_CACHE(dentry, 2969 3047 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2970 - 2971 - register_shrinker(&dcache_shrinker); 2972 3048 2973 3049 /* Hash may have been set up in dcache_init_early */ 2974 3050 if (!hashdist)
+65 -23
fs/direct-io.c
··· 135 135 struct page *pages[DIO_PAGES]; /* page buffer */ 136 136 }; 137 137 138 + static void __inode_dio_wait(struct inode *inode) 139 + { 140 + wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); 141 + DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); 142 + 143 + do { 144 + prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); 145 + if (atomic_read(&inode->i_dio_count)) 146 + schedule(); 147 + } while (atomic_read(&inode->i_dio_count)); 148 + finish_wait(wq, &q.wait); 149 + } 150 + 151 + /** 152 + * inode_dio_wait - wait for outstanding DIO requests to finish 153 + * @inode: inode to wait for 154 + * 155 + * Waits for all pending direct I/O requests to finish so that we can 156 + * proceed with a truncate or equivalent operation. 157 + * 158 + * Must be called under a lock that serializes taking new references 159 + * to i_dio_count, usually by inode->i_mutex. 160 + */ 161 + void inode_dio_wait(struct inode *inode) 162 + { 163 + if (atomic_read(&inode->i_dio_count)) 164 + __inode_dio_wait(inode); 165 + } 166 + EXPORT_SYMBOL_GPL(inode_dio_wait); 167 + 168 + /* 169 + * inode_dio_done - signal finish of a direct I/O requests 170 + * @inode: inode the direct I/O happens on 171 + * 172 + * This is called once we've finished processing a direct I/O request, 173 + * and is used to wake up callers waiting for direct I/O to be quiesced. 174 + */ 175 + void inode_dio_done(struct inode *inode) 176 + { 177 + if (atomic_dec_and_test(&inode->i_dio_count)) 178 + wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); 179 + } 180 + EXPORT_SYMBOL_GPL(inode_dio_done); 181 + 138 182 /* 139 183 * How many pages are in the queue? 140 184 */ ··· 293 249 if (dio->end_io && dio->result) { 294 250 dio->end_io(dio->iocb, offset, transferred, 295 251 dio->map_bh.b_private, ret, is_async); 296 - } else if (is_async) { 297 - aio_complete(dio->iocb, ret, 0); 252 + } else { 253 + if (is_async) 254 + aio_complete(dio->iocb, ret, 0); 255 + inode_dio_done(dio->inode); 298 256 } 299 - 300 - if (dio->flags & DIO_LOCKING) 301 - /* lockdep: non-owner release */ 302 - up_read_non_owner(&dio->inode->i_alloc_sem); 303 257 304 258 return ret; 305 259 } ··· 1022 980 return ret; 1023 981 } 1024 982 1025 - /* 1026 - * Releases both i_mutex and i_alloc_sem 1027 - */ 1028 983 static ssize_t 1029 984 direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 1030 985 const struct iovec *iov, loff_t offset, unsigned long nr_segs, ··· 1185 1146 * For writes this function is called under i_mutex and returns with 1186 1147 * i_mutex held, for reads, i_mutex is not held on entry, but it is 1187 1148 * taken and dropped again before returning. 1188 - * For reads and writes i_alloc_sem is taken in shared mode and released 1189 - * on I/O completion (which may happen asynchronously after returning to 1190 - * the caller). 1191 - * 1192 1149 * - if the flags value does NOT contain DIO_LOCKING we don't use any 1193 1150 * internal locking but rather rely on the filesystem to synchronize 1194 1151 * direct I/O reads/writes versus each other and truncate. 1195 - * For reads and writes both i_mutex and i_alloc_sem are not held on 1196 - * entry and are never taken. 1152 + * 1153 + * To help with locking against truncate we incremented the i_dio_count 1154 + * counter before starting direct I/O, and decrement it once we are done. 1155 + * Truncate can wait for it to reach zero to provide exclusion. It is 1156 + * expected that filesystem provide exclusion between new direct I/O 1157 + * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, 1158 + * but other filesystems need to take care of this on their own. 1197 1159 */ 1198 1160 ssize_t 1199 1161 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ··· 1240 1200 } 1241 1201 } 1242 1202 1203 + /* watch out for a 0 len io from a tricksy fs */ 1204 + if (rw == READ && end == offset) 1205 + return 0; 1206 + 1243 1207 dio = kmalloc(sizeof(*dio), GFP_KERNEL); 1244 1208 retval = -ENOMEM; 1245 1209 if (!dio) ··· 1257 1213 1258 1214 dio->flags = flags; 1259 1215 if (dio->flags & DIO_LOCKING) { 1260 - /* watch out for a 0 len io from a tricksy fs */ 1261 - if (rw == READ && end > offset) { 1216 + if (rw == READ) { 1262 1217 struct address_space *mapping = 1263 1218 iocb->ki_filp->f_mapping; 1264 1219 ··· 1272 1229 goto out; 1273 1230 } 1274 1231 } 1275 - 1276 - /* 1277 - * Will be released at I/O completion, possibly in a 1278 - * different thread. 1279 - */ 1280 - down_read_non_owner(&inode->i_alloc_sem); 1281 1232 } 1233 + 1234 + /* 1235 + * Will be decremented at I/O completion time. 1236 + */ 1237 + atomic_inc(&inode->i_dio_count); 1282 1238 1283 1239 /* 1284 1240 * For file extending writes updating i_size before data
+4 -3
fs/ecryptfs/file.c
··· 270 270 } 271 271 272 272 static int 273 - ecryptfs_fsync(struct file *file, int datasync) 273 + ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 274 274 { 275 275 int rc = 0; 276 276 277 - rc = generic_file_fsync(file, datasync); 277 + rc = generic_file_fsync(file, start, end, datasync); 278 278 if (rc) 279 279 goto out; 280 - rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync); 280 + rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end, 281 + datasync); 281 282 out: 282 283 return rc; 283 284 }
+6 -31
fs/ecryptfs/inode.c
··· 147 147 * @lower_dir_inode: inode of the parent in the lower fs of the new file 148 148 * @dentry: New file's dentry 149 149 * @mode: The mode of the new file 150 - * @nd: nameidata of ecryptfs' parent's dentry & vfsmount 151 150 * 152 151 * Creates the file in the lower file system. 153 152 * ··· 154 155 */ 155 156 static int 156 157 ecryptfs_create_underlying_file(struct inode *lower_dir_inode, 157 - struct dentry *dentry, int mode, 158 - struct nameidata *nd) 158 + struct dentry *dentry, int mode) 159 159 { 160 160 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); 161 - struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); 162 - struct dentry *dentry_save; 163 - struct vfsmount *vfsmount_save; 164 - unsigned int flags_save; 165 - int rc; 166 - 167 - if (nd) { 168 - dentry_save = nd->path.dentry; 169 - vfsmount_save = nd->path.mnt; 170 - flags_save = nd->flags; 171 - nd->path.dentry = lower_dentry; 172 - nd->path.mnt = lower_mnt; 173 - nd->flags &= ~LOOKUP_OPEN; 174 - } 175 - rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); 176 - if (nd) { 177 - nd->path.dentry = dentry_save; 178 - nd->path.mnt = vfsmount_save; 179 - nd->flags = flags_save; 180 - } 181 - return rc; 161 + return vfs_create(lower_dir_inode, lower_dentry, mode, NULL); 182 162 } 183 163 184 164 /** ··· 175 197 */ 176 198 static int 177 199 ecryptfs_do_create(struct inode *directory_inode, 178 - struct dentry *ecryptfs_dentry, int mode, 179 - struct nameidata *nd) 200 + struct dentry *ecryptfs_dentry, int mode) 180 201 { 181 202 int rc; 182 203 struct dentry *lower_dentry; ··· 190 213 goto out; 191 214 } 192 215 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, 193 - ecryptfs_dentry, mode, nd); 216 + ecryptfs_dentry, mode); 194 217 if (rc) { 195 218 printk(KERN_ERR "%s: Failure to create dentry in lower fs; " 196 219 "rc = [%d]\n", __func__, rc); ··· 271 294 int rc; 272 295 273 296 /* ecryptfs_do_create() calls ecryptfs_interpose() */ 274 - rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode, nd); 297 + rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); 275 298 if (unlikely(rc)) { 276 299 ecryptfs_printk(KERN_WARNING, "Failed to create file in" 277 300 "lower filesystem\n"); ··· 919 942 } 920 943 921 944 static int 922 - ecryptfs_permission(struct inode *inode, int mask, unsigned int flags) 945 + ecryptfs_permission(struct inode *inode, int mask) 923 946 { 924 - if (flags & IPERM_FLAG_RCU) 925 - return -ECHILD; 926 947 return inode_permission(ecryptfs_inode_to_lower(inode), mask); 927 948 } 928 949
+2 -5
fs/efs/namei.c
··· 60 60 61 61 struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { 62 62 efs_ino_t inodenum; 63 - struct inode * inode = NULL; 63 + struct inode *inode = NULL; 64 64 65 65 inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); 66 - if (inodenum) { 66 + if (inodenum) 67 67 inode = efs_iget(dir->i_sb, inodenum); 68 - if (IS_ERR(inode)) 69 - return ERR_CAST(inode); 70 - } 71 68 72 69 return d_splice_alias(inode, dentry); 73 70 }
+11 -3
fs/exec.c
··· 1114 1114 } 1115 1115 EXPORT_SYMBOL(flush_old_exec); 1116 1116 1117 + void would_dump(struct linux_binprm *bprm, struct file *file) 1118 + { 1119 + if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0) 1120 + bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; 1121 + } 1122 + EXPORT_SYMBOL(would_dump); 1123 + 1117 1124 void setup_new_exec(struct linux_binprm * bprm) 1118 1125 { 1119 1126 int i, ch; ··· 1160 1153 if (bprm->cred->uid != current_euid() || 1161 1154 bprm->cred->gid != current_egid()) { 1162 1155 current->pdeath_signal = 0; 1163 - } else if (file_permission(bprm->file, MAY_READ) || 1164 - bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) { 1165 - set_dumpable(current->mm, suid_dumpable); 1156 + } else { 1157 + would_dump(bprm, bprm->file); 1158 + if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) 1159 + set_dumpable(current->mm, suid_dumpable); 1166 1160 } 1167 1161 1168 1162 /*
+9 -1
fs/exofs/file.c
··· 42 42 * Note, in exofs all metadata is written as part of inode, regardless. 43 43 * The writeout is synchronous 44 44 */ 45 - static int exofs_file_fsync(struct file *filp, int datasync) 45 + static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end, 46 + int datasync) 46 47 { 48 + struct inode *inode = filp->f_mapping->host; 47 49 int ret; 48 50 51 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 52 + if (ret) 53 + return ret; 54 + 55 + mutex_lock(&inode->i_mutex); 49 56 ret = sync_inode_metadata(filp->f_mapping->host, 1); 57 + mutex_unlock(&inode->i_mutex); 50 58 return ret; 51 59 } 52 60
+1 -6
fs/exofs/namei.c
··· 55 55 return ERR_PTR(-ENAMETOOLONG); 56 56 57 57 ino = exofs_inode_by_name(dir, dentry); 58 - inode = NULL; 59 - if (ino) { 60 - inode = exofs_iget(dir->i_sb, ino); 61 - if (IS_ERR(inode)) 62 - return ERR_CAST(inode); 63 - } 58 + inode = ino ? exofs_iget(dir->i_sb, ino) : NULL; 64 59 return d_splice_alias(inode, dentry); 65 60 } 66 61
+2 -2
fs/ext2/acl.c
··· 232 232 } 233 233 234 234 int 235 - ext2_check_acl(struct inode *inode, int mask, unsigned int flags) 235 + ext2_check_acl(struct inode *inode, int mask) 236 236 { 237 237 struct posix_acl *acl; 238 238 239 - if (flags & IPERM_FLAG_RCU) { 239 + if (mask & MAY_NOT_BLOCK) { 240 240 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 241 241 return -ECHILD; 242 242 return -EAGAIN;
+1 -1
fs/ext2/acl.h
··· 54 54 #ifdef CONFIG_EXT2_FS_POSIX_ACL 55 55 56 56 /* acl.c */ 57 - extern int ext2_check_acl (struct inode *, int, unsigned int); 57 + extern int ext2_check_acl (struct inode *, int); 58 58 extern int ext2_acl_chmod (struct inode *); 59 59 extern int ext2_init_acl (struct inode *, struct inode *); 60 60
+2 -1
fs/ext2/ext2.h
··· 150 150 extern const struct file_operations ext2_dir_operations; 151 151 152 152 /* file.c */ 153 - extern int ext2_fsync(struct file *file, int datasync); 153 + extern int ext2_fsync(struct file *file, loff_t start, loff_t end, 154 + int datasync); 154 155 extern const struct inode_operations ext2_file_inode_operations; 155 156 extern const struct file_operations ext2_file_operations; 156 157 extern const struct file_operations ext2_xip_file_operations;
+2 -2
fs/ext2/file.c
··· 40 40 return 0; 41 41 } 42 42 43 - int ext2_fsync(struct file *file, int datasync) 43 + int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) 44 44 { 45 45 int ret; 46 46 struct super_block *sb = file->f_mapping->host->i_sb; 47 47 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 48 48 49 - ret = generic_file_fsync(file, datasync); 49 + ret = generic_file_fsync(file, start, end, datasync); 50 50 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { 51 51 /* We don't really know where the IO error happened... */ 52 52 ext2_error(sb, __func__,
+4 -2
fs/ext2/inode.c
··· 843 843 struct inode *inode = mapping->host; 844 844 ssize_t ret; 845 845 846 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, 847 - iov, offset, nr_segs, ext2_get_block, NULL); 846 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 847 + ext2_get_block); 848 848 if (ret < 0 && (rw & WRITE)) 849 849 ext2_write_failed(mapping, offset + iov_length(iov, nr_segs)); 850 850 return ret; ··· 1183 1183 return -EINVAL; 1184 1184 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1185 1185 return -EPERM; 1186 + 1187 + inode_dio_wait(inode); 1186 1188 1187 1189 if (mapping_is_xip(inode->i_mapping)) 1188 1190 error = xip_truncate_page(inode->i_mapping, newsize);
+5 -9
fs/ext2/namei.c
··· 67 67 inode = NULL; 68 68 if (ino) { 69 69 inode = ext2_iget(dir->i_sb, ino); 70 - if (IS_ERR(inode)) { 71 - if (PTR_ERR(inode) == -ESTALE) { 72 - ext2_error(dir->i_sb, __func__, 73 - "deleted inode referenced: %lu", 74 - (unsigned long) ino); 75 - return ERR_PTR(-EIO); 76 - } else { 77 - return ERR_CAST(inode); 78 - } 70 + if (inode == ERR_PTR(-ESTALE)) { 71 + ext2_error(dir->i_sb, __func__, 72 + "deleted inode referenced: %lu", 73 + (unsigned long) ino); 74 + return ERR_PTR(-EIO); 79 75 } 80 76 } 81 77 return d_splice_alias(inode, dentry);
+2 -2
fs/ext3/acl.c
··· 240 240 } 241 241 242 242 int 243 - ext3_check_acl(struct inode *inode, int mask, unsigned int flags) 243 + ext3_check_acl(struct inode *inode, int mask) 244 244 { 245 245 struct posix_acl *acl; 246 246 247 - if (flags & IPERM_FLAG_RCU) { 247 + if (mask & MAY_NOT_BLOCK) { 248 248 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 249 249 return -ECHILD; 250 250 return -EAGAIN;
+1 -1
fs/ext3/acl.h
··· 54 54 #ifdef CONFIG_EXT3_FS_POSIX_ACL 55 55 56 56 /* acl.c */ 57 - extern int ext3_check_acl (struct inode *, int, unsigned int); 57 + extern int ext3_check_acl (struct inode *, int); 58 58 extern int ext3_acl_chmod (struct inode *); 59 59 extern int ext3_init_acl (handle_t *, struct inode *, struct inode *); 60 60
+16 -2
fs/ext3/fsync.c
··· 43 43 * inode to disk. 44 44 */ 45 45 46 - int ext3_sync_file(struct file *file, int datasync) 46 + int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 47 47 { 48 48 struct inode *inode = file->f_mapping->host; 49 49 struct ext3_inode_info *ei = EXT3_I(inode); ··· 53 53 54 54 if (inode->i_sb->s_flags & MS_RDONLY) 55 55 return 0; 56 + 57 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 58 + if (ret) 59 + return ret; 60 + 61 + /* 62 + * Taking the mutex here just to keep consistent with how fsync was 63 + * called previously, however it looks like we don't need to take 64 + * i_mutex at all. 65 + */ 66 + mutex_lock(&inode->i_mutex); 56 67 57 68 J_ASSERT(ext3_journal_current_handle() == NULL); 58 69 ··· 81 70 * (they were dirtied by commit). But that's OK - the blocks are 82 71 * safe in-journal, which is all fsync() needs to ensure. 83 72 */ 84 - if (ext3_should_journal_data(inode)) 73 + if (ext3_should_journal_data(inode)) { 74 + mutex_unlock(&inode->i_mutex); 85 75 return ext3_force_commit(inode->i_sb); 76 + } 86 77 87 78 if (datasync) 88 79 commit_tid = atomic_read(&ei->i_datasync_tid); ··· 104 91 */ 105 92 if (needs_barrier) 106 93 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 94 + mutex_unlock(&inode->i_mutex); 107 95 return ret; 108 96 }
+5 -3
fs/ext3/inode.c
··· 1816 1816 } 1817 1817 1818 1818 retry: 1819 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 1820 - offset, nr_segs, 1821 - ext3_get_block, NULL); 1819 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 1820 + ext3_get_block); 1822 1821 /* 1823 1822 * In case of error extending write may have instantiated a few 1824 1823 * blocks outside i_size. Trim these off again. ··· 3214 3215 error = ext3_mark_inode_dirty(handle, inode); 3215 3216 ext3_journal_stop(handle); 3216 3217 } 3218 + 3219 + if (attr->ia_valid & ATTR_SIZE) 3220 + inode_dio_wait(inode); 3217 3221 3218 3222 if (S_ISREG(inode->i_mode) && 3219 3223 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
+5 -9
fs/ext3/namei.c
··· 1038 1038 return ERR_PTR(-EIO); 1039 1039 } 1040 1040 inode = ext3_iget(dir->i_sb, ino); 1041 - if (IS_ERR(inode)) { 1042 - if (PTR_ERR(inode) == -ESTALE) { 1043 - ext3_error(dir->i_sb, __func__, 1044 - "deleted inode referenced: %lu", 1045 - ino); 1046 - return ERR_PTR(-EIO); 1047 - } else { 1048 - return ERR_CAST(inode); 1049 - } 1041 + if (inode == ERR_PTR(-ESTALE)) { 1042 + ext3_error(dir->i_sb, __func__, 1043 + "deleted inode referenced: %lu", 1044 + ino); 1045 + return ERR_PTR(-EIO); 1050 1046 } 1051 1047 } 1052 1048 return d_splice_alias(inode, dentry);
+2
fs/ext3/super.c
··· 1718 1718 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 1719 1719 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 1720 1720 1721 + /* enable barriers by default */ 1722 + set_opt(sbi->s_mount_opt, BARRIER); 1721 1723 set_opt(sbi->s_mount_opt, RESERVATION); 1722 1724 1723 1725 if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
+2 -2
fs/ext4/acl.c
··· 238 238 } 239 239 240 240 int 241 - ext4_check_acl(struct inode *inode, int mask, unsigned int flags) 241 + ext4_check_acl(struct inode *inode, int mask) 242 242 { 243 243 struct posix_acl *acl; 244 244 245 - if (flags & IPERM_FLAG_RCU) { 245 + if (mask & MAY_NOT_BLOCK) { 246 246 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 247 247 return -ECHILD; 248 248 return -EAGAIN;
+1 -1
fs/ext4/acl.h
··· 54 54 #ifdef CONFIG_EXT4_FS_POSIX_ACL 55 55 56 56 /* acl.c */ 57 - extern int ext4_check_acl(struct inode *, int, unsigned int); 57 + extern int ext4_check_acl(struct inode *, int); 58 58 extern int ext4_acl_chmod(struct inode *); 59 59 extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); 60 60
+1 -1
fs/ext4/ext4.h
··· 1758 1758 extern void ext4_htree_free_dir_info(struct dir_private_info *p); 1759 1759 1760 1760 /* fsync.c */ 1761 - extern int ext4_sync_file(struct file *, int); 1761 + extern int ext4_sync_file(struct file *, loff_t, loff_t, int); 1762 1762 extern int ext4_flush_completed_IO(struct inode *); 1763 1763 1764 1764 /* hash.c */
+21
fs/ext4/file.c
··· 236 236 } 237 237 offset += file->f_pos; 238 238 break; 239 + case SEEK_DATA: 240 + /* 241 + * In the generic case the entire file is data, so as long as 242 + * offset isn't at the end of the file then the offset is data. 243 + */ 244 + if (offset >= inode->i_size) { 245 + mutex_unlock(&inode->i_mutex); 246 + return -ENXIO; 247 + } 248 + break; 249 + case SEEK_HOLE: 250 + /* 251 + * There is a virtual hole at the end of the file, so as long as 252 + * offset isn't i_size or larger, return i_size. 253 + */ 254 + if (offset >= inode->i_size) { 255 + mutex_unlock(&inode->i_mutex); 256 + return -ENXIO; 257 + } 258 + offset = inode->i_size; 259 + break; 239 260 } 240 261 241 262 if (offset < 0 || offset > maxbytes) {
+35 -3
fs/ext4/fsync.c
··· 151 151 return ret; 152 152 } 153 153 154 + /** 155 + * __sync_file - generic_file_fsync without the locking and filemap_write 156 + * @inode: inode to sync 157 + * @datasync: only sync essential metadata if true 158 + * 159 + * This is just generic_file_fsync without the locking. This is needed for 160 + * nojournal mode to make sure this inodes data/metadata makes it to disk 161 + * properly. The i_mutex should be held already. 162 + */ 163 + static int __sync_inode(struct inode *inode, int datasync) 164 + { 165 + int err; 166 + int ret; 167 + 168 + ret = sync_mapping_buffers(inode->i_mapping); 169 + if (!(inode->i_state & I_DIRTY)) 170 + return ret; 171 + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 172 + return ret; 173 + 174 + err = sync_inode_metadata(inode, 1); 175 + if (ret == 0) 176 + ret = err; 177 + return ret; 178 + } 179 + 154 180 /* 155 181 * akpm: A new design for ext4_sync_file(). 156 182 * ··· 191 165 * i_mutex lock is held when entering and exiting this function 192 166 */ 193 167 194 - int ext4_sync_file(struct file *file, int datasync) 168 + int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 195 169 { 196 170 struct inode *inode = file->f_mapping->host; 197 171 struct ext4_inode_info *ei = EXT4_I(inode); ··· 204 178 205 179 trace_ext4_sync_file_enter(file, datasync); 206 180 181 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 182 + if (ret) 183 + return ret; 184 + mutex_lock(&inode->i_mutex); 185 + 207 186 if (inode->i_sb->s_flags & MS_RDONLY) 208 - return 0; 187 + goto out; 209 188 210 189 ret = ext4_flush_completed_IO(inode); 211 190 if (ret < 0) 212 191 goto out; 213 192 214 193 if (!journal) { 215 - ret = generic_file_fsync(file, datasync); 194 + ret = __sync_inode(inode, datasync); 216 195 if (!ret && !list_empty(&inode->i_dentry)) 217 196 ret = ext4_sync_parent(inode); 218 197 goto out; ··· 251 220 if (needs_barrier) 252 221 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 253 222 out: 223 + mutex_unlock(&inode->i_mutex); 254 224 trace_ext4_sync_file_exit(inode, ret); 255 225 return ret; 256 226 }
+68 -57
fs/ext4/inode.c
··· 3501 3501 offset, nr_segs, 3502 3502 ext4_get_block, NULL, NULL, 0); 3503 3503 else { 3504 - ret = blockdev_direct_IO(rw, iocb, inode, 3505 - inode->i_sb->s_bdev, iov, 3506 - offset, nr_segs, 3507 - ext4_get_block, NULL); 3504 + ret = blockdev_direct_IO(rw, iocb, inode, iov, 3505 + offset, nr_segs, ext4_get_block); 3508 3506 3509 3507 if (unlikely((rw & WRITE) && ret < 0)) { 3510 3508 loff_t isize = i_size_read(inode); ··· 3573 3575 ssize_t size, void *private, int ret, 3574 3576 bool is_async) 3575 3577 { 3578 + struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 3576 3579 ext4_io_end_t *io_end = iocb->private; 3577 3580 struct workqueue_struct *wq; 3578 3581 unsigned long flags; ··· 3595 3596 out: 3596 3597 if (is_async) 3597 3598 aio_complete(iocb, ret, 0); 3599 + inode_dio_done(inode); 3598 3600 return; 3599 3601 } 3600 3602 ··· 3616 3616 /* queue the work to convert unwritten extents to written */ 3617 3617 queue_work(wq, &io_end->work); 3618 3618 iocb->private = NULL; 3619 + 3620 + /* XXX: probably should move into the real I/O completion handler */ 3621 + inode_dio_done(inode); 3619 3622 } 3620 3623 3621 3624 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) ··· 3751 3748 EXT4_I(inode)->cur_aio_dio = iocb->private; 3752 3749 } 3753 3750 3754 - ret = blockdev_direct_IO(rw, iocb, inode, 3751 + ret = __blockdev_direct_IO(rw, iocb, inode, 3755 3752 inode->i_sb->s_bdev, iov, 3756 3753 offset, nr_segs, 3757 3754 ext4_get_block_write, 3758 - ext4_end_io_dio); 3755 + ext4_end_io_dio, 3756 + NULL, 3757 + DIO_LOCKING | DIO_SKIP_HOLES); 3759 3758 if (iocb->private) 3760 3759 EXT4_I(inode)->cur_aio_dio = NULL; 3761 3760 /* ··· 5356 5351 } 5357 5352 5358 5353 if (attr->ia_valid & ATTR_SIZE) { 5354 + inode_dio_wait(inode); 5355 + 5359 5356 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 5360 5357 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 5361 5358 ··· 5850 5843 struct page *page = vmf->page; 5851 5844 loff_t size; 5852 5845 unsigned long len; 5853 - int ret = -EINVAL; 5854 - void *fsdata; 5846 + int ret; 5855 5847 struct file *file = vma->vm_file; 5856 5848 struct inode *inode = file->f_path.dentry->d_inode; 5857 5849 struct address_space *mapping = inode->i_mapping; 5850 + handle_t *handle; 5851 + get_block_t *get_block; 5852 + int retries = 0; 5858 5853 5859 5854 /* 5860 - * Get i_alloc_sem to stop truncates messing with the inode. We cannot 5861 - * get i_mutex because we are already holding mmap_sem. 5855 + * This check is racy but catches the common case. We rely on 5856 + * __block_page_mkwrite() to do a reliable check. 5862 5857 */ 5863 - down_read(&inode->i_alloc_sem); 5864 - size = i_size_read(inode); 5865 - if (page->mapping != mapping || size <= page_offset(page) 5866 - || !PageUptodate(page)) { 5867 - /* page got truncated from under us? */ 5868 - goto out_unlock; 5858 + vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 5859 + /* Delalloc case is easy... */ 5860 + if (test_opt(inode->i_sb, DELALLOC) && 5861 + !ext4_should_journal_data(inode) && 5862 + !ext4_nonda_switch(inode->i_sb)) { 5863 + do { 5864 + ret = __block_page_mkwrite(vma, vmf, 5865 + ext4_da_get_block_prep); 5866 + } while (ret == -ENOSPC && 5867 + ext4_should_retry_alloc(inode->i_sb, &retries)); 5868 + goto out_ret; 5869 5869 } 5870 - ret = 0; 5871 5870 5872 5871 lock_page(page); 5873 - wait_on_page_writeback(page); 5874 - if (PageMappedToDisk(page)) { 5875 - up_read(&inode->i_alloc_sem); 5876 - return VM_FAULT_LOCKED; 5872 + size = i_size_read(inode); 5873 + /* Page got truncated from under us? */ 5874 + if (page->mapping != mapping || page_offset(page) > size) { 5875 + unlock_page(page); 5876 + ret = VM_FAULT_NOPAGE; 5877 + goto out; 5877 5878 } 5878 5879 5879 5880 if (page->index == size >> PAGE_CACHE_SHIFT) 5880 5881 len = size & ~PAGE_CACHE_MASK; 5881 5882 else 5882 5883 len = PAGE_CACHE_SIZE; 5883 - 5884 5884 /* 5885 - * return if we have all the buffers mapped. This avoid 5886 - * the need to call write_begin/write_end which does a 5887 - * journal_start/journal_stop which can block and take 5888 - * long time 5885 + * Return if we have all the buffers mapped. This avoids the need to do 5886 + * journal_start/journal_stop which can block and take a long time 5889 5887 */ 5890 5888 if (page_has_buffers(page)) { 5891 5889 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 5892 5890 ext4_bh_unmapped)) { 5893 - up_read(&inode->i_alloc_sem); 5894 - return VM_FAULT_LOCKED; 5891 + /* Wait so that we don't change page under IO */ 5892 + wait_on_page_writeback(page); 5893 + ret = VM_FAULT_LOCKED; 5894 + goto out; 5895 5895 } 5896 5896 } 5897 5897 unlock_page(page); 5898 - /* 5899 - * OK, we need to fill the hole... Do write_begin write_end 5900 - * to do block allocation/reservation.We are not holding 5901 - * inode.i__mutex here. That allow * parallel write_begin, 5902 - * write_end call. lock_page prevent this from happening 5903 - * on the same page though 5904 - */ 5905 - ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 5906 - len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 5907 - if (ret < 0) 5908 - goto out_unlock; 5909 - ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 5910 - len, len, page, fsdata); 5911 - if (ret < 0) 5912 - goto out_unlock; 5913 - ret = 0; 5914 - 5915 - /* 5916 - * write_begin/end might have created a dirty page and someone 5917 - * could wander in and start the IO. Make sure that hasn't 5918 - * happened. 5919 - */ 5920 - lock_page(page); 5921 - wait_on_page_writeback(page); 5922 - up_read(&inode->i_alloc_sem); 5923 - return VM_FAULT_LOCKED; 5924 - out_unlock: 5925 - if (ret) 5898 + /* OK, we need to fill the hole... */ 5899 + if (ext4_should_dioread_nolock(inode)) 5900 + get_block = ext4_get_block_write; 5901 + else 5902 + get_block = ext4_get_block; 5903 + retry_alloc: 5904 + handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 5905 + if (IS_ERR(handle)) { 5926 5906 ret = VM_FAULT_SIGBUS; 5927 - up_read(&inode->i_alloc_sem); 5907 + goto out; 5908 + } 5909 + ret = __block_page_mkwrite(vma, vmf, get_block); 5910 + if (!ret && ext4_should_journal_data(inode)) { 5911 + if (walk_page_buffers(handle, page_buffers(page), 0, 5912 + PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 5913 + unlock_page(page); 5914 + ret = VM_FAULT_SIGBUS; 5915 + goto out; 5916 + } 5917 + ext4_set_inode_state(inode, EXT4_STATE_JDATA); 5918 + } 5919 + ext4_journal_stop(handle); 5920 + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 5921 + goto retry_alloc; 5922 + out_ret: 5923 + ret = block_page_mkwrite_return(ret); 5924 + out: 5928 5925 return ret; 5929 5926 }
+5 -9
fs/ext4/namei.c
··· 1037 1037 return ERR_PTR(-EIO); 1038 1038 } 1039 1039 inode = ext4_iget(dir->i_sb, ino); 1040 - if (IS_ERR(inode)) { 1041 - if (PTR_ERR(inode) == -ESTALE) { 1042 - EXT4_ERROR_INODE(dir, 1043 - "deleted inode referenced: %u", 1044 - ino); 1045 - return ERR_PTR(-EIO); 1046 - } else { 1047 - return ERR_CAST(inode); 1048 - } 1040 + if (inode == ERR_PTR(-ESTALE)) { 1041 + EXT4_ERROR_INODE(dir, 1042 + "deleted inode referenced: %u", 1043 + ino); 1044 + return ERR_PTR(-EIO); 1049 1045 } 1050 1046 } 1051 1047 return d_splice_alias(inode, dentry);
+3 -1
fs/fat/fat.h
··· 109 109 int i_attrs; /* unused attribute bits */ 110 110 loff_t i_pos; /* on-disk position of directory entry or 0 */ 111 111 struct hlist_node i_fat_hash; /* hash by i_location */ 112 + struct rw_semaphore truncate_lock; /* protect bmap against truncate */ 112 113 struct inode vfs_inode; 113 114 }; 114 115 ··· 310 309 extern void fat_truncate_blocks(struct inode *inode, loff_t offset); 311 310 extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, 312 311 struct kstat *stat); 313 - extern int fat_file_fsync(struct file *file, int datasync); 312 + extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, 313 + int datasync); 314 314 315 315 /* fat/inode.c */ 316 316 extern void fat_attach(struct inode *inode, loff_t i_pos);
+6 -2
fs/fat/file.c
··· 149 149 return 0; 150 150 } 151 151 152 - int fat_file_fsync(struct file *filp, int datasync) 152 + int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 153 153 { 154 154 struct inode *inode = filp->f_mapping->host; 155 155 int res, err; 156 156 157 - res = generic_file_fsync(filp, datasync); 157 + res = generic_file_fsync(filp, start, end, datasync); 158 158 err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); 159 159 160 160 return res ? res : err; ··· 397 397 * sequence. 398 398 */ 399 399 if (attr->ia_valid & ATTR_SIZE) { 400 + inode_dio_wait(inode); 401 + 400 402 if (attr->ia_size > inode->i_size) { 401 403 error = fat_cont_expand(inode, attr->ia_size); 402 404 if (error || attr->ia_valid == ATTR_SIZE) ··· 431 429 } 432 430 433 431 if (attr->ia_valid & ATTR_SIZE) { 432 + down_write(&MSDOS_I(inode)->truncate_lock); 434 433 truncate_setsize(inode, attr->ia_size); 435 434 fat_truncate_blocks(inode, attr->ia_size); 435 + up_write(&MSDOS_I(inode)->truncate_lock); 436 436 } 437 437 438 438 setattr_copy(inode, attr);
+6 -4
fs/fat/inode.c
··· 211 211 * FAT need to use the DIO_LOCKING for avoiding the race 212 212 * condition of fat_get_block() and ->truncate(). 213 213 */ 214 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, 215 - iov, offset, nr_segs, fat_get_block, NULL); 214 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 215 + fat_get_block); 216 216 if (ret < 0 && (rw & WRITE)) 217 217 fat_write_failed(mapping, offset + iov_length(iov, nr_segs)); 218 218 ··· 224 224 sector_t blocknr; 225 225 226 226 /* fat_get_cluster() assumes the requested blocknr isn't truncated. */ 227 - down_read(&mapping->host->i_alloc_sem); 227 + down_read(&MSDOS_I(mapping->host)->truncate_lock); 228 228 blocknr = generic_block_bmap(mapping, block, fat_get_block); 229 - up_read(&mapping->host->i_alloc_sem); 229 + up_read(&MSDOS_I(mapping->host)->truncate_lock); 230 230 231 231 return blocknr; 232 232 } ··· 510 510 ei = kmem_cache_alloc(fat_inode_cachep, GFP_NOFS); 511 511 if (!ei) 512 512 return NULL; 513 + 514 + init_rwsem(&ei->truncate_lock); 513 515 return &ei->vfs_inode; 514 516 } 515 517
+10 -19
fs/fat/namei_msdos.c
··· 209 209 int err; 210 210 211 211 lock_super(sb); 212 - 213 212 err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); 214 - if (err) { 215 - if (err == -ENOENT) { 216 - inode = NULL; 217 - goto out; 218 - } 219 - goto error; 213 + switch (err) { 214 + case -ENOENT: 215 + inode = NULL; 216 + break; 217 + case 0: 218 + inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); 219 + brelse(sinfo.bh); 220 + break; 221 + default: 222 + inode = ERR_PTR(err); 220 223 } 221 - 222 - inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); 223 - brelse(sinfo.bh); 224 - if (IS_ERR(inode)) { 225 - err = PTR_ERR(inode); 226 - goto error; 227 - } 228 - out: 229 224 unlock_super(sb); 230 225 return d_splice_alias(inode, dentry); 231 - 232 - error: 233 - unlock_super(sb); 234 - return ERR_PTR(err); 235 226 } 236 227 237 228 /***** Creates a directory entry (name is already formatted). */
+2 -4
fs/fat/namei_vfat.c
··· 82 82 * case sensitive name which is specified by user if this is 83 83 * for creation. 84 84 */ 85 - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { 86 - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 87 - return 0; 88 - } 85 + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 86 + return 0; 89 87 90 88 return vfat_revalidate_shortname(dentry); 91 89 }
+1 -27
fs/fs-writeback.c
··· 461 461 } 462 462 463 463 /* 464 - * For background writeback the caller does not have the sb pinned 465 - * before calling writeback. So make sure that we do pin it, so it doesn't 466 - * go away while we are writing inodes from it. 467 - */ 468 - static bool pin_sb_for_writeback(struct super_block *sb) 469 - { 470 - spin_lock(&sb_lock); 471 - if (list_empty(&sb->s_instances)) { 472 - spin_unlock(&sb_lock); 473 - return false; 474 - } 475 - 476 - sb->s_count++; 477 - spin_unlock(&sb_lock); 478 - 479 - if (down_read_trylock(&sb->s_umount)) { 480 - if (sb->s_root) 481 - return true; 482 - up_read(&sb->s_umount); 483 - } 484 - 485 - put_super(sb); 486 - return false; 487 - } 488 - 489 - /* 490 464 * Write a portion of b_io inodes which belong to @sb. 491 465 * 492 466 * If @only_this_sb is true, then find and write all such ··· 559 585 struct inode *inode = wb_inode(wb->b_io.prev); 560 586 struct super_block *sb = inode->i_sb; 561 587 562 - if (!pin_sb_for_writeback(sb)) { 588 + if (!grab_super_passive(sb)) { 563 589 requeue_io(inode); 564 590 continue; 565 591 }
+14 -14
fs/fuse/dir.c
··· 382 382 struct fuse_entry_out outentry; 383 383 struct fuse_file *ff; 384 384 struct file *file; 385 - int flags = nd->intent.open.flags - 1; 385 + int flags = nd->intent.open.flags; 386 386 387 387 if (fc->no_create) 388 388 return -ENOSYS; ··· 576 576 static int fuse_create(struct inode *dir, struct dentry *entry, int mode, 577 577 struct nameidata *nd) 578 578 { 579 - if (nd && (nd->flags & LOOKUP_OPEN)) { 579 + if (nd) { 580 580 int err = fuse_create_open(dir, entry, mode, nd); 581 581 if (err != -ENOSYS) 582 582 return err; ··· 971 971 return err; 972 972 } 973 973 974 - static int fuse_perm_getattr(struct inode *inode, int flags) 974 + static int fuse_perm_getattr(struct inode *inode, int mask) 975 975 { 976 - if (flags & IPERM_FLAG_RCU) 976 + if (mask & MAY_NOT_BLOCK) 977 977 return -ECHILD; 978 978 979 979 return fuse_do_getattr(inode, NULL, NULL); ··· 992 992 * access request is sent. Execute permission is still checked 993 993 * locally based on file mode. 994 994 */ 995 - static int fuse_permission(struct inode *inode, int mask, unsigned int flags) 995 + static int fuse_permission(struct inode *inode, int mask) 996 996 { 997 997 struct fuse_conn *fc = get_fuse_conn(inode); 998 998 bool refreshed = false; ··· 1011 1011 if (fi->i_time < get_jiffies_64()) { 1012 1012 refreshed = true; 1013 1013 1014 - err = fuse_perm_getattr(inode, flags); 1014 + err = fuse_perm_getattr(inode, mask); 1015 1015 if (err) 1016 1016 return err; 1017 1017 } 1018 1018 } 1019 1019 1020 1020 if (fc->flags & FUSE_DEFAULT_PERMISSIONS) { 1021 - err = generic_permission(inode, mask, flags, NULL); 1021 + err = generic_permission(inode, mask); 1022 1022 1023 1023 /* If permission is denied, try to refresh file 1024 1024 attributes. This is also needed, because the root 1025 1025 node will at first have no permissions */ 1026 1026 if (err == -EACCES && !refreshed) { 1027 - err = fuse_perm_getattr(inode, flags); 1027 + err = fuse_perm_getattr(inode, mask); 1028 1028 if (!err) 1029 - err = generic_permission(inode, mask, 1030 - flags, NULL); 1029 + err = generic_permission(inode, mask); 1031 1030 } 1032 1031 1033 1032 /* Note: the opposite of the above test does not ··· 1034 1035 noticed immediately, only after the attribute 1035 1036 timeout has expired */ 1036 1037 } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { 1037 - if (flags & IPERM_FLAG_RCU) 1038 + if (mask & MAY_NOT_BLOCK) 1038 1039 return -ECHILD; 1039 1040 1040 1041 err = fuse_access(inode, mask); ··· 1043 1044 if (refreshed) 1044 1045 return -EACCES; 1045 1046 1046 - err = fuse_perm_getattr(inode, flags); 1047 + err = fuse_perm_getattr(inode, mask); 1047 1048 if (!err && !(inode->i_mode & S_IXUGO)) 1048 1049 return -EACCES; 1049 1050 } ··· 1176 1177 return 0; 1177 1178 } 1178 1179 1179 - static int fuse_dir_fsync(struct file *file, int datasync) 1180 + static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, 1181 + int datasync) 1180 1182 { 1181 - return fuse_fsync_common(file, datasync, 1); 1183 + return fuse_fsync_common(file, start, end, datasync, 1); 1182 1184 } 1183 1185 1184 1186 static bool update_mtime(unsigned ivalid)
+37 -8
fs/fuse/file.c
··· 400 400 fuse_release_nowrite(inode); 401 401 } 402 402 403 - int fuse_fsync_common(struct file *file, int datasync, int isdir) 403 + int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 404 + int datasync, int isdir) 404 405 { 405 406 struct inode *inode = file->f_mapping->host; 406 407 struct fuse_conn *fc = get_fuse_conn(inode); ··· 413 412 if (is_bad_inode(inode)) 414 413 return -EIO; 415 414 415 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 416 + if (err) 417 + return err; 418 + 416 419 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 417 420 return 0; 421 + 422 + mutex_lock(&inode->i_mutex); 418 423 419 424 /* 420 425 * Start writeback against all dirty pages of the inode, then ··· 429 422 */ 430 423 err = write_inode_now(inode, 0); 431 424 if (err) 432 - return err; 425 + goto out; 433 426 434 427 fuse_sync_writes(inode); 435 428 436 429 req = fuse_get_req(fc); 437 - if (IS_ERR(req)) 438 - return PTR_ERR(req); 430 + if (IS_ERR(req)) { 431 + err = PTR_ERR(req); 432 + goto out; 433 + } 439 434 440 435 memset(&inarg, 0, sizeof(inarg)); 441 436 inarg.fh = ff->fh; ··· 457 448 fc->no_fsync = 1; 458 449 err = 0; 459 450 } 451 + out: 452 + mutex_unlock(&inode->i_mutex); 460 453 return err; 461 454 } 462 455 463 - static int fuse_fsync(struct file *file, int datasync) 456 + static int fuse_fsync(struct file *file, loff_t start, loff_t end, 457 + int datasync) 464 458 { 465 - return fuse_fsync_common(file, datasync, 0); 459 + return fuse_fsync_common(file, start, end, datasync, 0); 466 460 } 467 461 468 462 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, ··· 1612 1600 struct inode *inode = file->f_path.dentry->d_inode; 1613 1601 1614 1602 mutex_lock(&inode->i_mutex); 1615 - switch (origin) { 1616 - case SEEK_END: 1603 + if (origin != SEEK_CUR || origin != SEEK_SET) { 1617 1604 retval = fuse_update_attributes(inode, NULL, file, NULL); 1618 1605 if (retval) 1619 1606 goto exit; 1607 + } 1608 + 1609 + switch (origin) { 1610 + case SEEK_END: 1620 1611 offset += i_size_read(inode); 1621 1612 break; 1622 1613 case SEEK_CUR: 1623 1614 offset += file->f_pos; 1615 + break; 1616 + case SEEK_DATA: 1617 + if (offset >= i_size_read(inode)) { 1618 + retval = -ENXIO; 1619 + goto exit; 1620 + } 1621 + break; 1622 + case SEEK_HOLE: 1623 + if (offset >= i_size_read(inode)) { 1624 + retval = -ENXIO; 1625 + goto exit; 1626 + } 1627 + offset = i_size_read(inode); 1628 + break; 1624 1629 } 1625 1630 retval = -EINVAL; 1626 1631 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
+2 -1
fs/fuse/fuse_i.h
··· 589 589 /** 590 590 * Send FSYNC or FSYNCDIR request 591 591 */ 592 - int fuse_fsync_common(struct file *file, int datasync, int isdir); 592 + int fuse_fsync_common(struct file *file, loff_t start, loff_t end, 593 + int datasync, int isdir); 593 594 594 595 /** 595 596 * Notify poll wakeup
+2 -2
fs/generic_acl.c
··· 190 190 } 191 191 192 192 int 193 - generic_check_acl(struct inode *inode, int mask, unsigned int flags) 193 + generic_check_acl(struct inode *inode, int mask) 194 194 { 195 - if (flags & IPERM_FLAG_RCU) { 195 + if (mask & MAY_NOT_BLOCK) { 196 196 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 197 197 return -ECHILD; 198 198 } else {
+2 -2
fs/gfs2/acl.c
··· 75 75 * Returns: errno 76 76 */ 77 77 78 - int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags) 78 + int gfs2_check_acl(struct inode *inode, int mask) 79 79 { 80 80 struct posix_acl *acl; 81 81 int error; 82 82 83 - if (flags & IPERM_FLAG_RCU) { 83 + if (mask & MAY_NOT_BLOCK) { 84 84 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 85 85 return -ECHILD; 86 86 return -EAGAIN;
+1 -1
fs/gfs2/acl.h
··· 16 16 #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 17 17 #define GFS2_ACL_MAX_ENTRIES 25 18 18 19 - extern int gfs2_check_acl(struct inode *inode, int mask, unsigned int); 19 + extern int gfs2_check_acl(struct inode *inode, int mask); 20 20 extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode); 21 21 extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); 22 22 extern const struct xattr_handler gfs2_xattr_system_handler;
+2
fs/gfs2/bmap.c
··· 1216 1216 if (ret) 1217 1217 return ret; 1218 1218 1219 + inode_dio_wait(inode); 1220 + 1219 1221 oldsize = inode->i_size; 1220 1222 if (newsize >= oldsize) 1221 1223 return do_grow(inode, newsize);
+15 -4
fs/gfs2/file.c
··· 245 245 !capable(CAP_LINUX_IMMUTABLE)) 246 246 goto out; 247 247 if (!IS_IMMUTABLE(inode)) { 248 - error = gfs2_permission(inode, MAY_WRITE, 0); 248 + error = gfs2_permission(inode, MAY_WRITE); 249 249 if (error) 250 250 goto out; 251 251 } ··· 546 546 547 547 /** 548 548 * gfs2_fsync - sync the dirty data for a file (across the cluster) 549 - * @file: the file that points to the dentry (we ignore this) 549 + * @file: the file that points to the dentry 550 + * @start: the start position in the file to sync 551 + * @end: the end position in the file to sync 550 552 * @datasync: set if we can ignore timestamp changes 551 553 * 552 554 * The VFS will flush data for us. We only need to worry ··· 557 555 * Returns: errno 558 556 */ 559 557 560 - static int gfs2_fsync(struct file *file, int datasync) 558 + static int gfs2_fsync(struct file *file, loff_t start, loff_t end, 559 + int datasync) 561 560 { 562 561 struct inode *inode = file->f_mapping->host; 563 562 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); 564 563 struct gfs2_inode *ip = GFS2_I(inode); 565 564 int ret; 566 565 566 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 567 + if (ret) 568 + return ret; 569 + mutex_lock(&inode->i_mutex); 570 + 567 571 if (datasync) 568 572 sync_state &= ~I_DIRTY_SYNC; 569 573 570 574 if (sync_state) { 571 575 ret = sync_inode_metadata(inode, 1); 572 - if (ret) 576 + if (ret) { 577 + mutex_unlock(&inode->i_mutex); 573 578 return ret; 579 + } 574 580 gfs2_ail_flush(ip->i_gl); 575 581 } 576 582 583 + mutex_unlock(&inode->i_mutex); 577 584 return 0; 578 585 } 579 586
+15 -20
fs/gfs2/inode.c
··· 307 307 } 308 308 309 309 if (!is_root) { 310 - error = gfs2_permission(dir, MAY_EXEC, 0); 310 + error = gfs2_permission(dir, MAY_EXEC); 311 311 if (error) 312 312 goto out; 313 313 } ··· 337 337 { 338 338 int error; 339 339 340 - error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0); 340 + error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); 341 341 if (error) 342 342 return error; 343 343 ··· 792 792 static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, 793 793 struct nameidata *nd) 794 794 { 795 - struct inode *inode = NULL; 796 - 797 - inode = gfs2_lookupi(dir, &dentry->d_name, 0); 798 - if (inode && IS_ERR(inode)) 799 - return ERR_CAST(inode); 800 - 801 - if (inode) { 795 + struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0); 796 + if (inode && !IS_ERR(inode)) { 802 797 struct gfs2_glock *gl = GFS2_I(inode)->i_gl; 803 798 struct gfs2_holder gh; 804 799 int error; ··· 803 808 return ERR_PTR(error); 804 809 } 805 810 gfs2_glock_dq_uninit(&gh); 806 - return d_splice_alias(inode, dentry); 807 811 } 808 - d_add(dentry, inode); 809 - 810 - return NULL; 812 + return d_splice_alias(inode, dentry); 811 813 } 812 814 813 815 /** ··· 849 857 if (inode->i_nlink == 0) 850 858 goto out_gunlock; 851 859 852 - error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC, 0); 860 + error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); 853 861 if (error) 854 862 goto out_gunlock; 855 863 ··· 982 990 if (IS_APPEND(&dip->i_inode)) 983 991 return -EPERM; 984 992 985 - error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0); 993 + error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); 986 994 if (error) 987 995 return error; 988 996 ··· 1328 1336 } 1329 1337 } 1330 1338 } else { 1331 - error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC, 0); 1339 + error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC); 1332 1340 if (error) 1333 1341 goto out_gunlock; 1334 1342 ··· 1363 1371 /* Check out the dir to be renamed */ 1364 1372 1365 1373 if (dir_rename) { 1366 - error = gfs2_permission(odentry->d_inode, MAY_WRITE, 0); 1374 + error = gfs2_permission(odentry->d_inode, MAY_WRITE); 1367 1375 if (error) 1368 1376 goto out_gunlock; 1369 1377 } ··· 1535 1543 * Returns: errno 1536 1544 */ 1537 1545 1538 - int gfs2_permission(struct inode *inode, int mask, unsigned int flags) 1546 + int gfs2_permission(struct inode *inode, int mask) 1539 1547 { 1540 1548 struct gfs2_inode *ip; 1541 1549 struct gfs2_holder i_gh; ··· 1545 1553 1546 1554 ip = GFS2_I(inode); 1547 1555 if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { 1548 - if (flags & IPERM_FLAG_RCU) 1556 + if (mask & MAY_NOT_BLOCK) 1549 1557 return -ECHILD; 1550 1558 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 1551 1559 if (error) ··· 1556 1564 if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode)) 1557 1565 error = -EACCES; 1558 1566 else 1559 - error = generic_permission(inode, mask, flags, gfs2_check_acl); 1567 + error = generic_permission(inode, mask); 1560 1568 if (unlock) 1561 1569 gfs2_glock_dq_uninit(&i_gh); 1562 1570 ··· 1846 1854 .listxattr = gfs2_listxattr, 1847 1855 .removexattr = gfs2_removexattr, 1848 1856 .fiemap = gfs2_fiemap, 1857 + .check_acl = gfs2_check_acl, 1849 1858 }; 1850 1859 1851 1860 const struct inode_operations gfs2_dir_iops = { ··· 1867 1874 .listxattr = gfs2_listxattr, 1868 1875 .removexattr = gfs2_removexattr, 1869 1876 .fiemap = gfs2_fiemap, 1877 + .check_acl = gfs2_check_acl, 1870 1878 }; 1871 1879 1872 1880 const struct inode_operations gfs2_symlink_iops = { ··· 1882 1888 .listxattr = gfs2_listxattr, 1883 1889 .removexattr = gfs2_removexattr, 1884 1890 .fiemap = gfs2_fiemap, 1891 + .check_acl = gfs2_check_acl, 1885 1892 }; 1886 1893
+1 -1
fs/gfs2/inode.h
··· 108 108 109 109 extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, 110 110 int is_root); 111 - extern int gfs2_permission(struct inode *inode, int mask, unsigned int flags); 111 + extern int gfs2_permission(struct inode *inode, int mask); 112 112 extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); 113 113 extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 114 114 extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+12 -3
fs/hfs/inode.c
··· 123 123 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 124 124 ssize_t ret; 125 125 126 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 127 - offset, nr_segs, hfs_get_block, NULL); 126 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 127 + hfs_get_block); 128 128 129 129 /* 130 130 * In case of error extending write may have instantiated a few ··· 615 615 616 616 if ((attr->ia_valid & ATTR_SIZE) && 617 617 attr->ia_size != i_size_read(inode)) { 618 + inode_dio_wait(inode); 619 + 618 620 error = vmtruncate(inode, attr->ia_size); 619 621 if (error) 620 622 return error; ··· 627 625 return 0; 628 626 } 629 627 630 - static int hfs_file_fsync(struct file *filp, int datasync) 628 + static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, 629 + int datasync) 631 630 { 632 631 struct inode *inode = filp->f_mapping->host; 633 632 struct super_block * sb; 634 633 int ret, err; 634 + 635 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 636 + if (ret) 637 + return ret; 638 + mutex_lock(&inode->i_mutex); 635 639 636 640 /* sync the inode to buffers */ 637 641 ret = write_inode_now(inode, 0); ··· 655 647 err = sync_blockdev(sb->s_bdev); 656 648 if (!ret) 657 649 ret = err; 650 + mutex_unlock(&inode->i_mutex); 658 651 return ret; 659 652 } 660 653
+2 -1
fs/hfsplus/hfsplus_fs.h
··· 404 404 int hfsplus_cat_write_inode(struct inode *); 405 405 struct inode *hfsplus_new_inode(struct super_block *, int); 406 406 void hfsplus_delete_inode(struct inode *); 407 - int hfsplus_file_fsync(struct file *file, int datasync); 407 + int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, 408 + int datasync); 408 409 409 410 /* ioctl.c */ 410 411 long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+13 -3
fs/hfsplus/inode.c
··· 119 119 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 120 120 ssize_t ret; 121 121 122 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 123 - offset, nr_segs, hfsplus_get_block, NULL); 122 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 123 + hfsplus_get_block); 124 124 125 125 /* 126 126 * In case of error extending write may have instantiated a few ··· 298 298 299 299 if ((attr->ia_valid & ATTR_SIZE) && 300 300 attr->ia_size != i_size_read(inode)) { 301 + inode_dio_wait(inode); 302 + 301 303 error = vmtruncate(inode, attr->ia_size); 302 304 if (error) 303 305 return error; ··· 310 308 return 0; 311 309 } 312 310 313 - int hfsplus_file_fsync(struct file *file, int datasync) 311 + int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, 312 + int datasync) 314 313 { 315 314 struct inode *inode = file->f_mapping->host; 316 315 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 317 316 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 318 317 int error = 0, error2; 318 + 319 + error = filemap_write_and_wait_range(inode->i_mapping, start, end); 320 + if (error) 321 + return error; 322 + mutex_lock(&inode->i_mutex); 319 323 320 324 /* 321 325 * Sync inode metadata into the catalog and extent trees. ··· 349 341 350 342 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) 351 343 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 344 + 345 + mutex_unlock(&inode->i_mutex); 352 346 353 347 return error; 354 348 }
+16 -5
fs/hostfs/hostfs_kern.c
··· 362 362 return 0; 363 363 } 364 364 365 - int hostfs_fsync(struct file *file, int datasync) 365 + int hostfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 366 366 { 367 - return fsync_file(HOSTFS_I(file->f_mapping->host)->fd, datasync); 367 + struct inode *inode = file->f_mapping->host; 368 + int ret; 369 + 370 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 371 + if (ret) 372 + return ret; 373 + 374 + mutex_lock(&inode->i_mutex); 375 + ret = fsync_file(HOSTFS_I(inode)->fd, datasync); 376 + mutex_unlock(&inode->i_mutex); 377 + 378 + return ret; 368 379 } 369 380 370 381 static const struct file_operations hostfs_file_fops = { ··· 759 748 return err; 760 749 } 761 750 762 - int hostfs_permission(struct inode *ino, int desired, unsigned int flags) 751 + int hostfs_permission(struct inode *ino, int desired) 763 752 { 764 753 char *name; 765 754 int r = 0, w = 0, x = 0, err; 766 755 767 - if (flags & IPERM_FLAG_RCU) 756 + if (desired & MAY_NOT_BLOCK) 768 757 return -ECHILD; 769 758 770 759 if (desired & MAY_READ) r = 1; ··· 781 770 err = access_file(name, r, w, x); 782 771 __putname(name); 783 772 if (!err) 784 - err = generic_permission(ino, desired, flags, NULL); 773 + err = generic_permission(ino, desired); 785 774 return err; 786 775 } 787 776
+4
fs/hpfs/dir.c
··· 29 29 struct hpfs_inode_info *hpfs_inode = hpfs_i(i); 30 30 struct super_block *s = i->i_sb; 31 31 32 + /* Somebody else will have to figure out what to do here */ 33 + if (whence == SEEK_DATA || whence == SEEK_HOLE) 34 + return -EINVAL; 35 + 32 36 hpfs_lock(s); 33 37 34 38 /*printk("dir lseek\n");*/
+6 -1
fs/hpfs/file.c
··· 18 18 return 0; 19 19 } 20 20 21 - int hpfs_file_fsync(struct file *file, int datasync) 21 + int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 22 22 { 23 23 struct inode *inode = file->f_mapping->host; 24 + int ret; 25 + 26 + ret = filemap_write_and_wait_range(file->f_mapping, start, end); 27 + if (ret) 28 + return ret; 24 29 return sync_blockdev(inode->i_sb->s_bdev); 25 30 } 26 31
+1 -1
fs/hpfs/hpfs_fn.h
··· 258 258 259 259 /* file.c */ 260 260 261 - int hpfs_file_fsync(struct file *, int); 261 + int hpfs_file_fsync(struct file *, loff_t, loff_t, int); 262 262 extern const struct file_operations hpfs_file_ops; 263 263 extern const struct inode_operations hpfs_file_iops; 264 264 extern const struct address_space_operations hpfs_aops;
+1 -1
fs/hpfs/namei.c
··· 398 398 hpfs_unlock(dir->i_sb); 399 399 return -ENOSPC; 400 400 } 401 - if (generic_permission(inode, MAY_WRITE, 0, NULL) || 401 + if (generic_permission(inode, MAY_WRITE) || 402 402 !S_ISREG(inode->i_mode) || 403 403 get_write_access(inode)) { 404 404 d_rehash(dentry);
+3 -2
fs/hppfs/hppfs.c
··· 573 573 return err; 574 574 } 575 575 576 - static int hppfs_fsync(struct file *file, int datasync) 576 + static int hppfs_fsync(struct file *file, loff_t start, loff_t end, 577 + int datasync) 577 578 { 578 - return 0; 579 + return filemap_write_and_wait_range(file->f_mapping, start, end); 579 580 } 580 581 581 582 static const struct file_operations hppfs_dir_fops = {
+41 -88
fs/inode.c
··· 33 33 * 34 34 * inode->i_lock protects: 35 35 * inode->i_state, inode->i_hash, __iget() 36 - * inode_lru_lock protects: 37 - * inode_lru, inode->i_lru 36 + * inode->i_sb->s_inode_lru_lock protects: 37 + * inode->i_sb->s_inode_lru, inode->i_lru 38 38 * inode_sb_list_lock protects: 39 39 * sb->s_inodes, inode->i_sb_list 40 40 * inode_wb_list_lock protects: ··· 46 46 * 47 47 * inode_sb_list_lock 48 48 * inode->i_lock 49 - * inode_lru_lock 49 + * inode->i_sb->s_inode_lru_lock 50 50 * 51 51 * inode_wb_list_lock 52 52 * inode->i_lock ··· 64 64 static struct hlist_head *inode_hashtable __read_mostly; 65 65 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); 66 66 67 - static LIST_HEAD(inode_lru); 68 - static DEFINE_SPINLOCK(inode_lru_lock); 69 - 70 67 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 71 68 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); 72 - 73 - /* 74 - * iprune_sem provides exclusion between the icache shrinking and the 75 - * umount path. 76 - * 77 - * We don't actually need it to protect anything in the umount path, 78 - * but only need to cycle through it to make sure any inode that 79 - * prune_icache took off the LRU list has been fully torn down by the 80 - * time we are past evict_inodes. 81 - */ 82 - static DECLARE_RWSEM(iprune_sem); 83 69 84 70 /* 85 71 * Empty aops. Can be used for the cases where the user does not ··· 81 95 struct inodes_stat_t inodes_stat; 82 96 83 97 static DEFINE_PER_CPU(unsigned int, nr_inodes); 98 + static DEFINE_PER_CPU(unsigned int, nr_unused); 84 99 85 100 static struct kmem_cache *inode_cachep __read_mostly; 86 101 ··· 96 109 97 110 static inline int get_nr_inodes_unused(void) 98 111 { 99 - return inodes_stat.nr_unused; 112 + int i; 113 + int sum = 0; 114 + for_each_possible_cpu(i) 115 + sum += per_cpu(nr_unused, i); 116 + return sum < 0 ? 0 : sum; 100 117 } 101 118 102 119 int get_nr_dirty_inodes(void) ··· 118 127 void __user *buffer, size_t *lenp, loff_t *ppos) 119 128 { 120 129 inodes_stat.nr_inodes = get_nr_inodes(); 130 + inodes_stat.nr_unused = get_nr_inodes_unused(); 121 131 return proc_dointvec(table, write, buffer, lenp, ppos); 122 132 } 123 133 #endif ··· 168 176 mutex_init(&inode->i_mutex); 169 177 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); 170 178 171 - init_rwsem(&inode->i_alloc_sem); 172 - lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); 179 + atomic_set(&inode->i_dio_count, 0); 173 180 174 181 mapping->a_ops = &empty_aops; 175 182 mapping->host = inode; ··· 328 337 329 338 static void inode_lru_list_add(struct inode *inode) 330 339 { 331 - spin_lock(&inode_lru_lock); 340 + spin_lock(&inode->i_sb->s_inode_lru_lock); 332 341 if (list_empty(&inode->i_lru)) { 333 - list_add(&inode->i_lru, &inode_lru); 334 - inodes_stat.nr_unused++; 342 + list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); 343 + inode->i_sb->s_nr_inodes_unused++; 344 + this_cpu_inc(nr_unused); 335 345 } 336 - spin_unlock(&inode_lru_lock); 346 + spin_unlock(&inode->i_sb->s_inode_lru_lock); 337 347 } 338 348 339 349 static void inode_lru_list_del(struct inode *inode) 340 350 { 341 - spin_lock(&inode_lru_lock); 351 + spin_lock(&inode->i_sb->s_inode_lru_lock); 342 352 if (!list_empty(&inode->i_lru)) { 343 353 list_del_init(&inode->i_lru); 344 - inodes_stat.nr_unused--; 354 + inode->i_sb->s_nr_inodes_unused--; 355 + this_cpu_dec(nr_unused); 345 356 } 346 - spin_unlock(&inode_lru_lock); 357 + spin_unlock(&inode->i_sb->s_inode_lru_lock); 347 358 } 348 359 349 360 /** ··· 530 537 spin_unlock(&inode_sb_list_lock); 531 538 532 539 dispose_list(&dispose); 533 - 534 - /* 535 - * Cycle through iprune_sem to make sure any inode that prune_icache 536 - * moved off the list before we took the lock has been fully torn 537 - * down. 538 - */ 539 - down_write(&iprune_sem); 540 - up_write(&iprune_sem); 541 540 } 542 541 543 542 /** ··· 592 607 } 593 608 594 609 /* 595 - * Scan `goal' inodes on the unused list for freeable ones. They are moved to a 596 - * temporary list and then are freed outside inode_lru_lock by dispose_list(). 610 + * Walk the superblock inode LRU for freeable inodes and attempt to free them. 611 + * This is called from the superblock shrinker function with a number of inodes 612 + * to trim from the LRU. Inodes to be freed are moved to a temporary list and 613 + * then are freed outside inode_lock by dispose_list(). 597 614 * 598 615 * Any inodes which are pinned purely because of attached pagecache have their 599 616 * pagecache removed. If the inode has metadata buffers attached to ··· 609 622 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 610 623 * with this flag set because they are the inodes that are out of order. 611 624 */ 612 - static void prune_icache(int nr_to_scan) 625 + void prune_icache_sb(struct super_block *sb, int nr_to_scan) 613 626 { 614 627 LIST_HEAD(freeable); 615 628 int nr_scanned; 616 629 unsigned long reap = 0; 617 630 618 - down_read(&iprune_sem); 619 - spin_lock(&inode_lru_lock); 620 - for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { 631 + spin_lock(&sb->s_inode_lru_lock); 632 + for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) { 621 633 struct inode *inode; 622 634 623 - if (list_empty(&inode_lru)) 635 + if (list_empty(&sb->s_inode_lru)) 624 636 break; 625 637 626 - inode = list_entry(inode_lru.prev, struct inode, i_lru); 638 + inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); 627 639 628 640 /* 629 - * we are inverting the inode_lru_lock/inode->i_lock here, 641 + * we are inverting the sb->s_inode_lru_lock/inode->i_lock here, 630 642 * so use a trylock. If we fail to get the lock, just move the 631 643 * inode to the back of the list so we don't spin on it. 632 644 */ 633 645 if (!spin_trylock(&inode->i_lock)) { 634 - list_move(&inode->i_lru, &inode_lru); 646 + list_move(&inode->i_lru, &sb->s_inode_lru); 635 647 continue; 636 648 } 637 649 ··· 642 656 (inode->i_state & ~I_REFERENCED)) { 643 657 list_del_init(&inode->i_lru); 644 658 spin_unlock(&inode->i_lock); 645 - inodes_stat.nr_unused--; 659 + sb->s_nr_inodes_unused--; 660 + this_cpu_dec(nr_unused); 646 661 continue; 647 662 } 648 663 649 664 /* recently referenced inodes get one more pass */ 650 665 if (inode->i_state & I_REFERENCED) { 651 666 inode->i_state &= ~I_REFERENCED; 652 - list_move(&inode->i_lru, &inode_lru); 667 + list_move(&inode->i_lru, &sb->s_inode_lru); 653 668 spin_unlock(&inode->i_lock); 654 669 continue; 655 670 } 656 671 if (inode_has_buffers(inode) || inode->i_data.nrpages) { 657 672 __iget(inode); 658 673 spin_unlock(&inode->i_lock); 659 - spin_unlock(&inode_lru_lock); 674 + spin_unlock(&sb->s_inode_lru_lock); 660 675 if (remove_inode_buffers(inode)) 661 676 reap += invalidate_mapping_pages(&inode->i_data, 662 677 0, -1); 663 678 iput(inode); 664 - spin_lock(&inode_lru_lock); 679 + spin_lock(&sb->s_inode_lru_lock); 665 680 666 - if (inode != list_entry(inode_lru.next, 681 + if (inode != list_entry(sb->s_inode_lru.next, 667 682 struct inode, i_lru)) 668 683 continue; /* wrong inode or list_empty */ 669 684 /* avoid lock inversions with trylock */ ··· 680 693 spin_unlock(&inode->i_lock); 681 694 682 695 list_move(&inode->i_lru, &freeable); 683 - inodes_stat.nr_unused--; 696 + sb->s_nr_inodes_unused--; 697 + this_cpu_dec(nr_unused); 684 698 } 685 699 if (current_is_kswapd()) 686 700 __count_vm_events(KSWAPD_INODESTEAL, reap); 687 701 else 688 702 __count_vm_events(PGINODESTEAL, reap); 689 - spin_unlock(&inode_lru_lock); 703 + spin_unlock(&sb->s_inode_lru_lock); 690 704 691 705 dispose_list(&freeable); 692 - up_read(&iprune_sem); 693 706 } 694 - 695 - /* 696 - * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, 697 - * "unused" means that no dentries are referring to the inodes: the files are 698 - * not open and the dcache references to those inodes have already been 699 - * reclaimed. 700 - * 701 - * This function is passed the number of inodes to scan, and it returns the 702 - * total number of remaining possibly-reclaimable inodes. 703 - */ 704 - static int shrink_icache_memory(struct shrinker *shrink, 705 - struct shrink_control *sc) 706 - { 707 - int nr = sc->nr_to_scan; 708 - gfp_t gfp_mask = sc->gfp_mask; 709 - 710 - if (nr) { 711 - /* 712 - * Nasty deadlock avoidance. We may hold various FS locks, 713 - * and we don't want to recurse into the FS that called us 714 - * in clear_inode() and friends.. 715 - */ 716 - if (!(gfp_mask & __GFP_FS)) 717 - return -1; 718 - prune_icache(nr); 719 - } 720 - return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure; 721 - } 722 - 723 - static struct shrinker icache_shrinker = { 724 - .shrink = shrink_icache_memory, 725 - .seeks = DEFAULT_SEEKS, 726 - }; 727 707 728 708 static void __wait_on_freeing_inode(struct inode *inode); 729 709 /* ··· 1285 1331 1286 1332 WARN_ON(inode->i_state & I_NEW); 1287 1333 1288 - if (op && op->drop_inode) 1334 + if (op->drop_inode) 1289 1335 drop = op->drop_inode(inode); 1290 1336 else 1291 1337 drop = generic_drop_inode(inode); ··· 1571 1617 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1572 1618 SLAB_MEM_SPREAD), 1573 1619 init_once); 1574 - register_shrinker(&icache_shrinker); 1575 1620 1576 1621 /* Hash may have been set up in inode_init_early */ 1577 1622 if (!hashdist)
+6
fs/internal.h
··· 97 97 * super.c 98 98 */ 99 99 extern int do_remount_sb(struct super_block *, int, void *, int); 100 + extern bool grab_super_passive(struct super_block *sb); 100 101 extern void __put_super(struct super_block *sb); 101 102 extern void put_super(struct super_block *sb); 102 103 extern struct dentry *mount_fs(struct file_system_type *, ··· 136 135 extern int get_nr_dirty_inodes(void); 137 136 extern void evict_inodes(struct super_block *); 138 137 extern int invalidate_inodes(struct super_block *, bool); 138 + 139 + /* 140 + * dcache.c 141 + */ 142 + extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
-3
fs/isofs/dir.c
··· 254 254 char *tmpname; 255 255 struct iso_directory_record *tmpde; 256 256 struct inode *inode = filp->f_path.dentry->d_inode; 257 - struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); 258 257 259 258 tmpname = (char *)__get_free_page(GFP_KERNEL); 260 259 if (tmpname == NULL) 261 260 return -ENOMEM; 262 261 263 - mutex_lock(&sbi->s_mutex); 264 262 tmpde = (struct iso_directory_record *) (tmpname+1024); 265 263 266 264 result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde); 267 265 268 266 free_page((unsigned long) tmpname); 269 - mutex_unlock(&sbi->s_mutex); 270 267 return result; 271 268 } 272 269
-1
fs/isofs/inode.c
··· 863 863 sbi->s_utf8 = opt.utf8; 864 864 sbi->s_nocompress = opt.nocompress; 865 865 sbi->s_overriderockperm = opt.overriderockperm; 866 - mutex_init(&sbi->s_mutex); 867 866 /* 868 867 * It would be incredibly stupid to allow people to mark every file 869 868 * on the disk as suid, so we merely allow them to set the default
-1
fs/isofs/isofs.h
··· 55 55 gid_t s_gid; 56 56 uid_t s_uid; 57 57 struct nls_table *s_nls_iocharset; /* Native language support table */ 58 - struct mutex s_mutex; /* replaces BKL, please remove if possible */ 59 58 }; 60 59 61 60 #define ISOFS_INVALID_MODE ((mode_t) -1)
+2 -11
fs/isofs/namei.c
··· 168 168 int found; 169 169 unsigned long uninitialized_var(block); 170 170 unsigned long uninitialized_var(offset); 171 - struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb); 172 171 struct inode *inode; 173 172 struct page *page; 174 173 ··· 175 176 if (!page) 176 177 return ERR_PTR(-ENOMEM); 177 178 178 - mutex_lock(&sbi->s_mutex); 179 179 found = isofs_find_entry(dir, dentry, 180 180 &block, &offset, 181 181 page_address(page), 182 182 1024 + page_address(page)); 183 183 __free_page(page); 184 184 185 - inode = NULL; 186 - if (found) { 187 - inode = isofs_iget(dir->i_sb, block, offset); 188 - if (IS_ERR(inode)) { 189 - mutex_unlock(&sbi->s_mutex); 190 - return ERR_CAST(inode); 191 - } 192 - } 193 - mutex_unlock(&sbi->s_mutex); 185 + inode = found ? isofs_iget(dir->i_sb, block, offset) : NULL; 186 + 194 187 return d_splice_alias(inode, dentry); 195 188 }
-3
fs/isofs/rock.c
··· 678 678 679 679 init_rock_state(&rs, inode); 680 680 block = ei->i_iget5_block; 681 - mutex_lock(&sbi->s_mutex); 682 681 bh = sb_bread(inode->i_sb, block); 683 682 if (!bh) 684 683 goto out_noread; ··· 747 748 goto fail; 748 749 brelse(bh); 749 750 *rpnt = '\0'; 750 - mutex_unlock(&sbi->s_mutex); 751 751 SetPageUptodate(page); 752 752 kunmap(page); 753 753 unlock_page(page); ··· 763 765 printk("symlink spans iso9660 blocks\n"); 764 766 fail: 765 767 brelse(bh); 766 - mutex_unlock(&sbi->s_mutex); 767 768 error: 768 769 SetPageError(page); 769 770 kunmap(page);
+2 -2
fs/jffs2/acl.c
··· 259 259 return rc; 260 260 } 261 261 262 - int jffs2_check_acl(struct inode *inode, int mask, unsigned int flags) 262 + int jffs2_check_acl(struct inode *inode, int mask) 263 263 { 264 264 struct posix_acl *acl; 265 265 int rc; 266 266 267 - if (flags & IPERM_FLAG_RCU) 267 + if (mask & MAY_NOT_BLOCK) 268 268 return -ECHILD; 269 269 270 270 acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
+1 -1
fs/jffs2/acl.h
··· 26 26 27 27 #ifdef CONFIG_JFFS2_FS_POSIX_ACL 28 28 29 - extern int jffs2_check_acl(struct inode *, int, unsigned int); 29 + extern int jffs2_check_acl(struct inode *, int); 30 30 extern int jffs2_acl_chmod(struct inode *); 31 31 extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); 32 32 extern int jffs2_init_acl_post(struct inode *);
+5 -4
fs/jffs2/dir.c
··· 102 102 mutex_unlock(&dir_f->sem); 103 103 if (ino) { 104 104 inode = jffs2_iget(dir_i->i_sb, ino); 105 - if (IS_ERR(inode)) { 105 + if (IS_ERR(inode)) 106 106 printk(KERN_WARNING "iget() failed for ino #%u\n", ino); 107 - return ERR_CAST(inode); 108 - } 109 107 } 110 108 111 109 return d_splice_alias(inode, target); ··· 820 822 821 823 if (victim_f) { 822 824 /* There was a victim. Kill it off nicely */ 823 - drop_nlink(new_dentry->d_inode); 825 + if (S_ISDIR(new_dentry->d_inode->i_mode)) 826 + clear_nlink(new_dentry->d_inode); 827 + else 828 + drop_nlink(new_dentry->d_inode); 824 829 /* Don't oops if the victim was a dirent pointing to an 825 830 inode which didn't exist. */ 826 831 if (victim_f->inocache) {
+8 -1
fs/jffs2/file.c
··· 27 27 struct page **pagep, void **fsdata); 28 28 static int jffs2_readpage (struct file *filp, struct page *pg); 29 29 30 - int jffs2_fsync(struct file *filp, int datasync) 30 + int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 31 31 { 32 32 struct inode *inode = filp->f_mapping->host; 33 33 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 34 + int ret; 34 35 36 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 37 + if (ret) 38 + return ret; 39 + 40 + mutex_lock(&inode->i_mutex); 35 41 /* Trigger GC to flush any pending writes for this inode */ 36 42 jffs2_flush_wbuf_gc(c, inode->i_ino); 43 + mutex_unlock(&inode->i_mutex); 37 44 38 45 return 0; 39 46 }
+1 -1
fs/jffs2/os-linux.h
··· 158 158 extern const struct file_operations jffs2_file_operations; 159 159 extern const struct inode_operations jffs2_file_inode_operations; 160 160 extern const struct address_space_operations jffs2_file_address_operations; 161 - int jffs2_fsync(struct file *, int); 161 + int jffs2_fsync(struct file *, loff_t, loff_t, int); 162 162 int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); 163 163 164 164 /* ioctl.c */
+2 -2
fs/jfs/acl.c
··· 114 114 return rc; 115 115 } 116 116 117 - int jfs_check_acl(struct inode *inode, int mask, unsigned int flags) 117 + int jfs_check_acl(struct inode *inode, int mask) 118 118 { 119 119 struct posix_acl *acl; 120 120 121 - if (flags & IPERM_FLAG_RCU) 121 + if (mask & MAY_NOT_BLOCK) 122 122 return -ECHILD; 123 123 124 124 acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
+10 -1
fs/jfs/file.c
··· 28 28 #include "jfs_acl.h" 29 29 #include "jfs_debug.h" 30 30 31 - int jfs_fsync(struct file *file, int datasync) 31 + int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 32 32 { 33 33 struct inode *inode = file->f_mapping->host; 34 34 int rc = 0; 35 35 36 + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); 37 + if (rc) 38 + return rc; 39 + 40 + mutex_lock(&inode->i_mutex); 36 41 if (!(inode->i_state & I_DIRTY) || 37 42 (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { 38 43 /* Make sure committed changes hit the disk */ 39 44 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); 45 + mutex_unlock(&inode->i_mutex); 40 46 return rc; 41 47 } 42 48 43 49 rc |= jfs_commit_inode(inode, 1); 50 + mutex_unlock(&inode->i_mutex); 44 51 45 52 return rc ? -EIO : 0; 46 53 } ··· 117 110 118 111 if ((iattr->ia_valid & ATTR_SIZE) && 119 112 iattr->ia_size != i_size_read(inode)) { 113 + inode_dio_wait(inode); 114 + 120 115 rc = vmtruncate(inode, iattr->ia_size); 121 116 if (rc) 122 117 return rc;
+2 -2
fs/jfs/inode.c
··· 329 329 struct inode *inode = file->f_mapping->host; 330 330 ssize_t ret; 331 331 332 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 333 - offset, nr_segs, jfs_get_block, NULL); 332 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 333 + jfs_get_block); 334 334 335 335 /* 336 336 * In case of error extending write may have instantiated a few
+1 -1
fs/jfs/jfs_acl.h
··· 20 20 21 21 #ifdef CONFIG_JFS_POSIX_ACL 22 22 23 - int jfs_check_acl(struct inode *, int, unsigned int flags); 23 + int jfs_check_acl(struct inode *, int); 24 24 int jfs_init_acl(tid_t, struct inode *, struct inode *); 25 25 int jfs_acl_chmod(struct inode *inode); 26 26
+1 -1
fs/jfs/jfs_inode.h
··· 21 21 struct fid; 22 22 23 23 extern struct inode *ialloc(struct inode *, umode_t); 24 - extern int jfs_fsync(struct file *, int); 24 + extern int jfs_fsync(struct file *, loff_t, loff_t, int); 25 25 extern long jfs_ioctl(struct file *, unsigned int, unsigned long); 26 26 extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); 27 27 extern struct inode *jfs_iget(struct super_block *, unsigned long);
+16 -31
fs/jfs/namei.c
··· 1456 1456 ino_t inum; 1457 1457 struct inode *ip; 1458 1458 struct component_name key; 1459 - const char *name = dentry->d_name.name; 1460 - int len = dentry->d_name.len; 1461 1459 int rc; 1462 1460 1463 - jfs_info("jfs_lookup: name = %s", name); 1461 + jfs_info("jfs_lookup: name = %s", dentry->d_name.name); 1464 1462 1465 - if ((name[0] == '.') && (len == 1)) 1466 - inum = dip->i_ino; 1467 - else if (strcmp(name, "..") == 0) 1468 - inum = PARENT(dip); 1469 - else { 1470 - if ((rc = get_UCSname(&key, dentry))) 1471 - return ERR_PTR(rc); 1472 - rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); 1473 - free_UCSname(&key); 1474 - if (rc == -ENOENT) { 1475 - d_add(dentry, NULL); 1476 - return NULL; 1477 - } else if (rc) { 1478 - jfs_err("jfs_lookup: dtSearch returned %d", rc); 1479 - return ERR_PTR(rc); 1480 - } 1481 - } 1482 - 1483 - ip = jfs_iget(dip->i_sb, inum); 1484 - if (IS_ERR(ip)) { 1485 - jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); 1486 - return ERR_CAST(ip); 1463 + if ((rc = get_UCSname(&key, dentry))) 1464 + return ERR_PTR(rc); 1465 + rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); 1466 + free_UCSname(&key); 1467 + if (rc == -ENOENT) { 1468 + ip = NULL; 1469 + } else if (rc) { 1470 + jfs_err("jfs_lookup: dtSearch returned %d", rc); 1471 + ip = ERR_PTR(rc); 1472 + } else { 1473 + ip = jfs_iget(dip->i_sb, inum); 1474 + if (IS_ERR(ip)) 1475 + jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum); 1487 1476 } 1488 1477 1489 1478 return d_splice_alias(ip, dentry); ··· 1586 1597 1587 1598 static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1588 1599 { 1589 - if (nd && nd->flags & LOOKUP_RCU) 1590 - return -ECHILD; 1591 1600 /* 1592 1601 * This is not negative dentry. Always valid. 1593 1602 * ··· 1611 1624 * case sensitive name which is specified by user if this is 1612 1625 * for creation. 1613 1626 */ 1614 - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { 1615 - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 1616 - return 0; 1617 - } 1627 + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) 1628 + return 0; 1618 1629 return 1; 1619 1630 } 1620 1631
+18 -8
fs/libfs.c
··· 16 16 17 17 #include <asm/uaccess.h> 18 18 19 + #include "internal.h" 20 + 19 21 static inline int simple_positive(struct dentry *dentry) 20 22 { 21 23 return dentry->d_inode && !d_unhashed(dentry); ··· 248 246 root->i_ino = 1; 249 247 root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; 250 248 root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; 251 - dentry = d_alloc(NULL, &d_name); 249 + dentry = __d_alloc(s, &d_name); 252 250 if (!dentry) { 253 251 iput(root); 254 252 goto Enomem; 255 253 } 256 - dentry->d_sb = s; 257 - dentry->d_parent = dentry; 258 254 d_instantiate(dentry, root); 259 255 s->s_root = dentry; 260 256 s->s_d_op = dops; ··· 328 328 329 329 if (new_dentry->d_inode) { 330 330 simple_unlink(new_dir, new_dentry); 331 - if (they_are_dirs) 331 + if (they_are_dirs) { 332 + drop_nlink(new_dentry->d_inode); 332 333 drop_nlink(old_dir); 334 + } 333 335 } else if (they_are_dirs) { 334 336 drop_nlink(old_dir); 335 337 inc_nlink(new_dir); ··· 907 905 * filesystems which track all non-inode metadata in the buffers list 908 906 * hanging off the address_space structure. 909 907 */ 910 - int generic_file_fsync(struct file *file, int datasync) 908 + int generic_file_fsync(struct file *file, loff_t start, loff_t end, 909 + int datasync) 911 910 { 912 911 struct inode *inode = file->f_mapping->host; 913 912 int err; 914 913 int ret; 915 914 915 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 916 + if (err) 917 + return err; 918 + 919 + mutex_lock(&inode->i_mutex); 916 920 ret = sync_mapping_buffers(inode->i_mapping); 917 921 if (!(inode->i_state & I_DIRTY)) 918 - return ret; 922 + goto out; 919 923 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 920 - return ret; 924 + goto out; 921 925 922 926 err = sync_inode_metadata(inode, 1); 923 927 if (ret == 0) 924 928 ret = err; 929 + out: 930 + mutex_unlock(&inode->i_mutex); 925 931 return ret; 926 932 } 927 933 EXPORT_SYMBOL(generic_file_fsync); ··· 966 956 /* 967 957 * No-op implementation of ->fsync for in-memory filesystems. 968 958 */ 969 - int noop_fsync(struct file *file, int datasync) 959 + int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) 970 960 { 971 961 return 0; 972 962 }
+1 -3
fs/logfs/dir.c
··· 371 371 page_cache_release(page); 372 372 373 373 inode = logfs_iget(dir->i_sb, ino); 374 - if (IS_ERR(inode)) { 374 + if (IS_ERR(inode)) 375 375 printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n", 376 376 ino, dir->i_ino, index); 377 - return ERR_CAST(inode); 378 - } 379 377 return d_splice_alias(inode, dentry); 380 378 } 381 379
+10 -1
fs/logfs/file.c
··· 219 219 } 220 220 } 221 221 222 - int logfs_fsync(struct file *file, int datasync) 222 + int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 223 223 { 224 224 struct super_block *sb = file->f_mapping->host->i_sb; 225 + struct inode *inode = file->f_mapping->host; 226 + int ret; 225 227 228 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 229 + if (ret) 230 + return ret; 231 + 232 + mutex_lock(&inode->i_mutex); 226 233 logfs_write_anchor(sb); 234 + mutex_unlock(&inode->i_mutex); 235 + 227 236 return 0; 228 237 } 229 238
+1 -1
fs/logfs/logfs.h
··· 506 506 extern const struct address_space_operations logfs_reg_aops; 507 507 int logfs_readpage(struct file *file, struct page *page); 508 508 long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 509 - int logfs_fsync(struct file *file, int datasync); 509 + int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync); 510 510 511 511 /* gc.c */ 512 512 u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec);
+1 -2
fs/minix/inode.c
··· 596 596 597 597 int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 598 598 { 599 - struct inode *dir = dentry->d_parent->d_inode; 600 - struct super_block *sb = dir->i_sb; 599 + struct super_block *sb = dentry->d_sb; 601 600 generic_fillattr(dentry->d_inode, stat); 602 601 if (INODE_VERSION(dentry->d_inode) == MINIX_V1) 603 602 stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
+188 -274
fs/namei.c
··· 176 176 /* 177 177 * This does basic POSIX ACL permission checking 178 178 */ 179 - static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, 180 - int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) 179 + static int acl_permission_check(struct inode *inode, int mask) 181 180 { 181 + int (*check_acl)(struct inode *inode, int mask); 182 182 unsigned int mode = inode->i_mode; 183 183 184 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 184 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK; 185 185 186 186 if (current_user_ns() != inode_userns(inode)) 187 187 goto other_perms; ··· 189 189 if (current_fsuid() == inode->i_uid) 190 190 mode >>= 6; 191 191 else { 192 + check_acl = inode->i_op->check_acl; 192 193 if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { 193 - int error = check_acl(inode, mask, flags); 194 + int error = check_acl(inode, mask); 194 195 if (error != -EAGAIN) 195 196 return error; 196 197 } ··· 204 203 /* 205 204 * If the DACs are ok we don't need any capability check. 206 205 */ 207 - if ((mask & ~mode) == 0) 206 + if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) 208 207 return 0; 209 208 return -EACCES; 210 209 } ··· 213 212 * generic_permission - check for access rights on a Posix-like filesystem 214 213 * @inode: inode to check access rights for 215 214 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 216 - * @check_acl: optional callback to check for Posix ACLs 217 - * @flags: IPERM_FLAG_ flags. 218 215 * 219 216 * Used to check for read/write/execute permissions on a file. 220 217 * We use "fsuid" for this, letting us set arbitrary permissions ··· 223 224 * request cannot be satisfied (eg. requires blocking or too much complexity). 224 225 * It would then be called again in ref-walk mode. 225 226 */ 226 - int generic_permission(struct inode *inode, int mask, unsigned int flags, 227 - int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) 227 + int generic_permission(struct inode *inode, int mask) 228 228 { 229 229 int ret; 230 230 231 231 /* 232 232 * Do the basic POSIX ACL permission checks. 233 233 */ 234 - ret = acl_permission_check(inode, mask, flags, check_acl); 234 + ret = acl_permission_check(inode, mask); 235 235 if (ret != -EACCES) 236 236 return ret; 237 237 238 + if (S_ISDIR(inode->i_mode)) { 239 + /* DACs are overridable for directories */ 240 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 241 + return 0; 242 + if (!(mask & MAY_WRITE)) 243 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) 244 + return 0; 245 + return -EACCES; 246 + } 238 247 /* 239 248 * Read/write DACs are always overridable. 240 - * Executable DACs are overridable for all directories and 241 - * for non-directories that have least one exec bit set. 249 + * Executable DACs are overridable when there is 250 + * at least one exec bit set. 242 251 */ 243 - if (!(mask & MAY_EXEC) || execute_ok(inode)) 252 + if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) 244 253 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 245 254 return 0; 246 255 ··· 256 249 * Searching includes executable on directories, else just read. 257 250 */ 258 251 mask &= MAY_READ | MAY_WRITE | MAY_EXEC; 259 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) 252 + if (mask == MAY_READ) 260 253 if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) 261 254 return 0; 262 255 ··· 295 288 } 296 289 297 290 if (inode->i_op->permission) 298 - retval = inode->i_op->permission(inode, mask, 0); 291 + retval = inode->i_op->permission(inode, mask); 299 292 else 300 - retval = generic_permission(inode, mask, 0, 301 - inode->i_op->check_acl); 293 + retval = generic_permission(inode, mask); 302 294 303 295 if (retval) 304 296 return retval; ··· 307 301 return retval; 308 302 309 303 return security_inode_permission(inode, mask); 310 - } 311 - 312 - /** 313 - * file_permission - check for additional access rights to a given file 314 - * @file: file to check access rights for 315 - * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 316 - * 317 - * Used to check for read/write/execute permissions on an already opened 318 - * file. 319 - * 320 - * Note: 321 - * Do not use this function in new code. All access checks should 322 - * be done using inode_permission(). 323 - */ 324 - int file_permission(struct file *file, int mask) 325 - { 326 - return inode_permission(file->f_path.dentry->d_inode, mask); 327 - } 328 - 329 - /* 330 - * get_write_access() gets write permission for a file. 331 - * put_write_access() releases this write permission. 332 - * This is used for regular files. 333 - * We cannot support write (and maybe mmap read-write shared) accesses and 334 - * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode 335 - * can have the following values: 336 - * 0: no writers, no VM_DENYWRITE mappings 337 - * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist 338 - * > 0: (i_writecount) users are writing to the file. 339 - * 340 - * Normally we operate on that counter with atomic_{inc,dec} and it's safe 341 - * except for the cases where we don't hold i_writecount yet. Then we need to 342 - * use {get,deny}_write_access() - these functions check the sign and refuse 343 - * to do the change if sign is wrong. Exclusion between them is provided by 344 - * the inode->i_lock spinlock. 345 - */ 346 - 347 - int get_write_access(struct inode * inode) 348 - { 349 - spin_lock(&inode->i_lock); 350 - if (atomic_read(&inode->i_writecount) < 0) { 351 - spin_unlock(&inode->i_lock); 352 - return -ETXTBSY; 353 - } 354 - atomic_inc(&inode->i_writecount); 355 - spin_unlock(&inode->i_lock); 356 - 357 - return 0; 358 - } 359 - 360 - int deny_write_access(struct file * file) 361 - { 362 - struct inode *inode = file->f_path.dentry->d_inode; 363 - 364 - spin_lock(&inode->i_lock); 365 - if (atomic_read(&inode->i_writecount) > 0) { 366 - spin_unlock(&inode->i_lock); 367 - return -ETXTBSY; 368 - } 369 - atomic_dec(&inode->i_writecount); 370 - spin_unlock(&inode->i_lock); 371 - 372 - return 0; 373 304 } 374 305 375 306 /** ··· 435 492 return dentry->d_op->d_revalidate(dentry, nd); 436 493 } 437 494 438 - static struct dentry * 439 - do_revalidate(struct dentry *dentry, struct nameidata *nd) 440 - { 441 - int status = d_revalidate(dentry, nd); 442 - if (unlikely(status <= 0)) { 443 - /* 444 - * The dentry failed validation. 445 - * If d_revalidate returned 0 attempt to invalidate 446 - * the dentry otherwise d_revalidate is asking us 447 - * to return a fail status. 448 - */ 449 - if (status < 0) { 450 - dput(dentry); 451 - dentry = ERR_PTR(status); 452 - } else if (!d_invalidate(dentry)) { 453 - dput(dentry); 454 - dentry = NULL; 455 - } 456 - } 457 - return dentry; 458 - } 459 - 460 495 /** 461 496 * complete_walk - successful completion of path walk 462 497 * @nd: pointer nameidata ··· 487 566 488 567 path_put(&nd->path); 489 568 return status; 490 - } 491 - 492 - /* 493 - * Short-cut version of permission(), for calling on directories 494 - * during pathname resolution. Combines parts of permission() 495 - * and generic_permission(), and tests ONLY for MAY_EXEC permission. 496 - * 497 - * If appropriate, check DAC only. If not appropriate, or 498 - * short-cut DAC fails, then call ->permission() to do more 499 - * complete permission check. 500 - */ 501 - static inline int exec_permission(struct inode *inode, unsigned int flags) 502 - { 503 - int ret; 504 - struct user_namespace *ns = inode_userns(inode); 505 - 506 - if (inode->i_op->permission) { 507 - ret = inode->i_op->permission(inode, MAY_EXEC, flags); 508 - } else { 509 - ret = acl_permission_check(inode, MAY_EXEC, flags, 510 - inode->i_op->check_acl); 511 - } 512 - if (likely(!ret)) 513 - goto ok; 514 - if (ret == -ECHILD) 515 - return ret; 516 - 517 - if (ns_capable(ns, CAP_DAC_OVERRIDE) || 518 - ns_capable(ns, CAP_DAC_READ_SEARCH)) 519 - goto ok; 520 - 521 - return ret; 522 - ok: 523 - return security_inode_exec_permission(inode, flags); 524 569 } 525 570 526 571 static __always_inline void set_root(struct nameidata *nd) ··· 663 776 /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT 664 777 * and this is the terminal part of the path. 665 778 */ 666 - if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE)) 779 + if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) 667 780 return -EISDIR; /* we actually want to stop here */ 668 781 669 782 /* We want to mount if someone is trying to open/create a file of any ··· 675 788 * appended a '/' to the name. 676 789 */ 677 790 if (!(flags & LOOKUP_FOLLOW) && 678 - !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY | 791 + !(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | 679 792 LOOKUP_OPEN | LOOKUP_CREATE))) 680 793 return -EISDIR; 681 794 ··· 694 807 * the path being looked up; if it wasn't then the remainder of 695 808 * the path is inaccessible and we should say so. 696 809 */ 697 - if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE)) 810 + if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT)) 698 811 return -EREMOTE; 699 812 return PTR_ERR(mnt); 700 813 } ··· 1021 1134 } 1022 1135 1023 1136 /* 1137 + * We already have a dentry, but require a lookup to be performed on the parent 1138 + * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error. 1139 + * parent->d_inode->i_mutex must be held. d_lookup must have verified that no 1140 + * child exists while under i_mutex. 1141 + */ 1142 + static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry, 1143 + struct nameidata *nd) 1144 + { 1145 + struct inode *inode = parent->d_inode; 1146 + struct dentry *old; 1147 + 1148 + /* Don't create child dentry for a dead directory. */ 1149 + if (unlikely(IS_DEADDIR(inode))) 1150 + return ERR_PTR(-ENOENT); 1151 + 1152 + old = inode->i_op->lookup(inode, dentry, nd); 1153 + if (unlikely(old)) { 1154 + dput(dentry); 1155 + dentry = old; 1156 + } 1157 + return dentry; 1158 + } 1159 + 1160 + /* 1024 1161 * It's more convoluted than I'd like it to be, but... it's still fairly 1025 1162 * small and for now I'd prefer to have fast path as straight as possible. 1026 1163 * It _is_ time-critical. ··· 1083 1172 goto unlazy; 1084 1173 } 1085 1174 } 1175 + if (unlikely(d_need_lookup(dentry))) 1176 + goto unlazy; 1086 1177 path->mnt = mnt; 1087 1178 path->dentry = dentry; 1088 1179 if (unlikely(!__follow_mount_rcu(nd, path, inode))) ··· 1099 1186 dentry = __d_lookup(parent, name); 1100 1187 } 1101 1188 1189 + if (dentry && unlikely(d_need_lookup(dentry))) { 1190 + dput(dentry); 1191 + dentry = NULL; 1192 + } 1102 1193 retry: 1103 1194 if (unlikely(!dentry)) { 1104 1195 struct inode *dir = parent->d_inode; ··· 1112 1195 dentry = d_lookup(parent, name); 1113 1196 if (likely(!dentry)) { 1114 1197 dentry = d_alloc_and_lookup(parent, name, nd); 1198 + if (IS_ERR(dentry)) { 1199 + mutex_unlock(&dir->i_mutex); 1200 + return PTR_ERR(dentry); 1201 + } 1202 + /* known good */ 1203 + need_reval = 0; 1204 + status = 1; 1205 + } else if (unlikely(d_need_lookup(dentry))) { 1206 + dentry = d_inode_lookup(parent, dentry, nd); 1115 1207 if (IS_ERR(dentry)) { 1116 1208 mutex_unlock(&dir->i_mutex); 1117 1209 return PTR_ERR(dentry); ··· 1160 1234 static inline int may_lookup(struct nameidata *nd) 1161 1235 { 1162 1236 if (nd->flags & LOOKUP_RCU) { 1163 - int err = exec_permission(nd->inode, IPERM_FLAG_RCU); 1237 + int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); 1164 1238 if (err != -ECHILD) 1165 1239 return err; 1166 1240 if (unlazy_walk(nd, NULL)) 1167 1241 return -ECHILD; 1168 1242 } 1169 - return exec_permission(nd->inode, 0); 1243 + return inode_permission(nd->inode, MAY_EXEC); 1170 1244 } 1171 1245 1172 1246 static inline int handle_dots(struct nameidata *nd, int type) ··· 1280 1354 { 1281 1355 struct path next; 1282 1356 int err; 1283 - unsigned int lookup_flags = nd->flags; 1284 1357 1285 1358 while (*name=='/') 1286 1359 name++; ··· 1292 1367 struct qstr this; 1293 1368 unsigned int c; 1294 1369 int type; 1295 - 1296 - nd->flags |= LOOKUP_CONTINUE; 1297 1370 1298 1371 err = may_lookup(nd); 1299 1372 if (err) ··· 1354 1431 /* here ends the main loop */ 1355 1432 1356 1433 last_component: 1357 - /* Clear LOOKUP_CONTINUE iff it was previously unset */ 1358 - nd->flags &= lookup_flags | ~LOOKUP_CONTINUE; 1359 1434 nd->last = this; 1360 1435 nd->last_type = type; 1361 1436 return 0; ··· 1436 1515 if (!S_ISDIR(dentry->d_inode->i_mode)) 1437 1516 goto fput_fail; 1438 1517 1439 - retval = file_permission(file, MAY_EXEC); 1518 + retval = inode_permission(dentry->d_inode, MAY_EXEC); 1440 1519 if (retval) 1441 1520 goto fput_fail; 1442 1521 } ··· 1574 1653 * @mnt: pointer to vfs mount of the base directory 1575 1654 * @name: pointer to file name 1576 1655 * @flags: lookup flags 1577 - * @nd: pointer to nameidata 1656 + * @path: pointer to struct path to fill 1578 1657 */ 1579 1658 int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, 1580 1659 const char *name, unsigned int flags, 1581 - struct nameidata *nd) 1660 + struct path *path) 1582 1661 { 1583 - nd->root.dentry = dentry; 1584 - nd->root.mnt = mnt; 1662 + struct nameidata nd; 1663 + int err; 1664 + nd.root.dentry = dentry; 1665 + nd.root.mnt = mnt; 1666 + BUG_ON(flags & LOOKUP_PARENT); 1585 1667 /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ 1586 - return do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, nd); 1668 + err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); 1669 + if (!err) 1670 + *path = nd.path; 1671 + return err; 1587 1672 } 1588 1673 1589 1674 static struct dentry *__lookup_hash(struct qstr *name, ··· 1599 1672 struct dentry *dentry; 1600 1673 int err; 1601 1674 1602 - err = exec_permission(inode, 0); 1675 + err = inode_permission(inode, MAY_EXEC); 1603 1676 if (err) 1604 1677 return ERR_PTR(err); 1605 1678 ··· 1610 1683 */ 1611 1684 dentry = d_lookup(base, name); 1612 1685 1613 - if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) 1614 - dentry = do_revalidate(dentry, nd); 1686 + if (dentry && d_need_lookup(dentry)) { 1687 + /* 1688 + * __lookup_hash is called with the parent dir's i_mutex already 1689 + * held, so we are good to go here. 1690 + */ 1691 + dentry = d_inode_lookup(base, dentry, nd); 1692 + if (IS_ERR(dentry)) 1693 + return dentry; 1694 + } 1695 + 1696 + if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) { 1697 + int status = d_revalidate(dentry, nd); 1698 + if (unlikely(status <= 0)) { 1699 + /* 1700 + * The dentry failed validation. 1701 + * If d_revalidate returned 0 attempt to invalidate 1702 + * the dentry otherwise d_revalidate is asking us 1703 + * to return a fail status. 1704 + */ 1705 + if (status < 0) { 1706 + dput(dentry); 1707 + return ERR_PTR(status); 1708 + } else if (!d_invalidate(dentry)) { 1709 + dput(dentry); 1710 + dentry = NULL; 1711 + } 1712 + } 1713 + } 1615 1714 1616 1715 if (!dentry) 1617 1716 dentry = d_alloc_and_lookup(base, name, nd); ··· 1965 2012 return error; 1966 2013 } 1967 2014 1968 - /* 1969 - * Note that while the flag value (low two bits) for sys_open means: 1970 - * 00 - read-only 1971 - * 01 - write-only 1972 - * 10 - read-write 1973 - * 11 - special 1974 - * it is changed into 1975 - * 00 - no permissions needed 1976 - * 01 - read-permission 1977 - * 10 - write-permission 1978 - * 11 - read-write 1979 - * for the internal routines (ie open_namei()/follow_link() etc) 1980 - * This is more logical, and also allows the 00 "no perm needed" 1981 - * to be used for symlinks (where the permissions are checked 1982 - * later). 1983 - * 1984 - */ 1985 2015 static inline int open_to_namei_flags(int flag) 1986 2016 { 1987 - if ((flag+1) & O_ACCMODE) 1988 - flag++; 2017 + if ((flag & O_ACCMODE) == 3) 2018 + flag--; 1989 2019 return flag; 1990 2020 } 1991 2021 ··· 2263 2327 return file; 2264 2328 } 2265 2329 2266 - /** 2267 - * lookup_create - lookup a dentry, creating it if it doesn't exist 2268 - * @nd: nameidata info 2269 - * @is_dir: directory flag 2270 - * 2271 - * Simple function to lookup and return a dentry and create it 2272 - * if it doesn't exist. Is SMP-safe. 2273 - * 2274 - * Returns with nd->path.dentry->d_inode->i_mutex locked. 2275 - */ 2276 - struct dentry *lookup_create(struct nameidata *nd, int is_dir) 2330 + struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) 2277 2331 { 2278 2332 struct dentry *dentry = ERR_PTR(-EEXIST); 2333 + struct nameidata nd; 2334 + int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); 2335 + if (error) 2336 + return ERR_PTR(error); 2279 2337 2280 - mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2281 2338 /* 2282 2339 * Yucky last component or no last component at all? 2283 2340 * (foo/., foo/.., /////) 2284 2341 */ 2285 - if (nd->last_type != LAST_NORM) 2286 - goto fail; 2287 - nd->flags &= ~LOOKUP_PARENT; 2288 - nd->flags |= LOOKUP_CREATE | LOOKUP_EXCL; 2289 - nd->intent.open.flags = O_EXCL; 2342 + if (nd.last_type != LAST_NORM) 2343 + goto out; 2344 + nd.flags &= ~LOOKUP_PARENT; 2345 + nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; 2346 + nd.intent.open.flags = O_EXCL; 2290 2347 2291 2348 /* 2292 2349 * Do the final lookup. 2293 2350 */ 2294 - dentry = lookup_hash(nd); 2351 + mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 2352 + dentry = lookup_hash(&nd); 2295 2353 if (IS_ERR(dentry)) 2296 2354 goto fail; 2297 2355 ··· 2297 2367 * all is fine. Let's be bastards - you had / on the end, you've 2298 2368 * been asking for (non-existent) directory. -ENOENT for you. 2299 2369 */ 2300 - if (unlikely(!is_dir && nd->last.name[nd->last.len])) { 2370 + if (unlikely(!is_dir && nd.last.name[nd.last.len])) { 2301 2371 dput(dentry); 2302 2372 dentry = ERR_PTR(-ENOENT); 2373 + goto fail; 2303 2374 } 2375 + *path = nd.path; 2304 2376 return dentry; 2305 2377 eexist: 2306 2378 dput(dentry); 2307 2379 dentry = ERR_PTR(-EEXIST); 2308 2380 fail: 2381 + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2382 + out: 2383 + path_put(&nd.path); 2309 2384 return dentry; 2310 2385 } 2311 - EXPORT_SYMBOL_GPL(lookup_create); 2386 + EXPORT_SYMBOL(kern_path_create); 2387 + 2388 + struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) 2389 + { 2390 + char *tmp = getname(pathname); 2391 + struct dentry *res; 2392 + if (IS_ERR(tmp)) 2393 + return ERR_CAST(tmp); 2394 + res = kern_path_create(dfd, tmp, path, is_dir); 2395 + putname(tmp); 2396 + return res; 2397 + } 2398 + EXPORT_SYMBOL(user_path_create); 2312 2399 2313 2400 int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 2314 2401 { ··· 2375 2428 SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, 2376 2429 unsigned, dev) 2377 2430 { 2378 - int error; 2379 - char *tmp; 2380 2431 struct dentry *dentry; 2381 - struct nameidata nd; 2432 + struct path path; 2433 + int error; 2382 2434 2383 2435 if (S_ISDIR(mode)) 2384 2436 return -EPERM; 2385 2437 2386 - error = user_path_parent(dfd, filename, &nd, &tmp); 2387 - if (error) 2388 - return error; 2438 + dentry = user_path_create(dfd, filename, &path, 0); 2439 + if (IS_ERR(dentry)) 2440 + return PTR_ERR(dentry); 2389 2441 2390 - dentry = lookup_create(&nd, 0); 2391 - if (IS_ERR(dentry)) { 2392 - error = PTR_ERR(dentry); 2393 - goto out_unlock; 2394 - } 2395 - if (!IS_POSIXACL(nd.path.dentry->d_inode)) 2442 + if (!IS_POSIXACL(path.dentry->d_inode)) 2396 2443 mode &= ~current_umask(); 2397 2444 error = may_mknod(mode); 2398 2445 if (error) 2399 2446 goto out_dput; 2400 - error = mnt_want_write(nd.path.mnt); 2447 + error = mnt_want_write(path.mnt); 2401 2448 if (error) 2402 2449 goto out_dput; 2403 - error = security_path_mknod(&nd.path, dentry, mode, dev); 2450 + error = security_path_mknod(&path, dentry, mode, dev); 2404 2451 if (error) 2405 2452 goto out_drop_write; 2406 2453 switch (mode & S_IFMT) { 2407 2454 case 0: case S_IFREG: 2408 - error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); 2455 + error = vfs_create(path.dentry->d_inode,dentry,mode,NULL); 2409 2456 break; 2410 2457 case S_IFCHR: case S_IFBLK: 2411 - error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, 2458 + error = vfs_mknod(path.dentry->d_inode,dentry,mode, 2412 2459 new_decode_dev(dev)); 2413 2460 break; 2414 2461 case S_IFIFO: case S_IFSOCK: 2415 - error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); 2462 + error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); 2416 2463 break; 2417 2464 } 2418 2465 out_drop_write: 2419 - mnt_drop_write(nd.path.mnt); 2466 + mnt_drop_write(path.mnt); 2420 2467 out_dput: 2421 2468 dput(dentry); 2422 - out_unlock: 2423 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2424 - path_put(&nd.path); 2425 - putname(tmp); 2469 + mutex_unlock(&path.dentry->d_inode->i_mutex); 2470 + path_put(&path); 2426 2471 2427 2472 return error; 2428 2473 } ··· 2447 2508 2448 2509 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) 2449 2510 { 2450 - int error = 0; 2451 - char * tmp; 2452 2511 struct dentry *dentry; 2453 - struct nameidata nd; 2512 + struct path path; 2513 + int error; 2454 2514 2455 - error = user_path_parent(dfd, pathname, &nd, &tmp); 2456 - if (error) 2457 - goto out_err; 2458 - 2459 - dentry = lookup_create(&nd, 1); 2460 - error = PTR_ERR(dentry); 2515 + dentry = user_path_create(dfd, pathname, &path, 1); 2461 2516 if (IS_ERR(dentry)) 2462 - goto out_unlock; 2517 + return PTR_ERR(dentry); 2463 2518 2464 - if (!IS_POSIXACL(nd.path.dentry->d_inode)) 2519 + if (!IS_POSIXACL(path.dentry->d_inode)) 2465 2520 mode &= ~current_umask(); 2466 - error = mnt_want_write(nd.path.mnt); 2521 + error = mnt_want_write(path.mnt); 2467 2522 if (error) 2468 2523 goto out_dput; 2469 - error = security_path_mkdir(&nd.path, dentry, mode); 2524 + error = security_path_mkdir(&path, dentry, mode); 2470 2525 if (error) 2471 2526 goto out_drop_write; 2472 - error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); 2527 + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); 2473 2528 out_drop_write: 2474 - mnt_drop_write(nd.path.mnt); 2529 + mnt_drop_write(path.mnt); 2475 2530 out_dput: 2476 2531 dput(dentry); 2477 - out_unlock: 2478 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2479 - path_put(&nd.path); 2480 - putname(tmp); 2481 - out_err: 2532 + mutex_unlock(&path.dentry->d_inode->i_mutex); 2533 + path_put(&path); 2482 2534 return error; 2483 2535 } 2484 2536 ··· 2729 2799 { 2730 2800 int error; 2731 2801 char *from; 2732 - char *to; 2733 2802 struct dentry *dentry; 2734 - struct nameidata nd; 2803 + struct path path; 2735 2804 2736 2805 from = getname(oldname); 2737 2806 if (IS_ERR(from)) 2738 2807 return PTR_ERR(from); 2739 2808 2740 - error = user_path_parent(newdfd, newname, &nd, &to); 2741 - if (error) 2742 - goto out_putname; 2743 - 2744 - dentry = lookup_create(&nd, 0); 2809 + dentry = user_path_create(newdfd, newname, &path, 0); 2745 2810 error = PTR_ERR(dentry); 2746 2811 if (IS_ERR(dentry)) 2747 - goto out_unlock; 2812 + goto out_putname; 2748 2813 2749 - error = mnt_want_write(nd.path.mnt); 2814 + error = mnt_want_write(path.mnt); 2750 2815 if (error) 2751 2816 goto out_dput; 2752 - error = security_path_symlink(&nd.path, dentry, from); 2817 + error = security_path_symlink(&path, dentry, from); 2753 2818 if (error) 2754 2819 goto out_drop_write; 2755 - error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); 2820 + error = vfs_symlink(path.dentry->d_inode, dentry, from); 2756 2821 out_drop_write: 2757 - mnt_drop_write(nd.path.mnt); 2822 + mnt_drop_write(path.mnt); 2758 2823 out_dput: 2759 2824 dput(dentry); 2760 - out_unlock: 2761 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2762 - path_put(&nd.path); 2763 - putname(to); 2825 + mutex_unlock(&path.dentry->d_inode->i_mutex); 2826 + path_put(&path); 2764 2827 out_putname: 2765 2828 putname(from); 2766 2829 return error; ··· 2818 2895 int, newdfd, const char __user *, newname, int, flags) 2819 2896 { 2820 2897 struct dentry *new_dentry; 2821 - struct nameidata nd; 2822 - struct path old_path; 2898 + struct path old_path, new_path; 2823 2899 int how = 0; 2824 2900 int error; 2825 - char *to; 2826 2901 2827 2902 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 2828 2903 return -EINVAL; ··· 2842 2921 if (error) 2843 2922 return error; 2844 2923 2845 - error = user_path_parent(newdfd, newname, &nd, &to); 2846 - if (error) 2847 - goto out; 2848 - error = -EXDEV; 2849 - if (old_path.mnt != nd.path.mnt) 2850 - goto out_release; 2851 - new_dentry = lookup_create(&nd, 0); 2924 + new_dentry = user_path_create(newdfd, newname, &new_path, 0); 2852 2925 error = PTR_ERR(new_dentry); 2853 2926 if (IS_ERR(new_dentry)) 2854 - goto out_unlock; 2855 - error = mnt_want_write(nd.path.mnt); 2927 + goto out; 2928 + 2929 + error = -EXDEV; 2930 + if (old_path.mnt != new_path.mnt) 2931 + goto out_dput; 2932 + error = mnt_want_write(new_path.mnt); 2856 2933 if (error) 2857 2934 goto out_dput; 2858 - error = security_path_link(old_path.dentry, &nd.path, new_dentry); 2935 + error = security_path_link(old_path.dentry, &new_path, new_dentry); 2859 2936 if (error) 2860 2937 goto out_drop_write; 2861 - error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); 2938 + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); 2862 2939 out_drop_write: 2863 - mnt_drop_write(nd.path.mnt); 2940 + mnt_drop_write(new_path.mnt); 2864 2941 out_dput: 2865 2942 dput(new_dentry); 2866 - out_unlock: 2867 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 2868 - out_release: 2869 - path_put(&nd.path); 2870 - putname(to); 2943 + mutex_unlock(&new_path.dentry->d_inode->i_mutex); 2944 + path_put(&new_path); 2871 2945 out: 2872 2946 path_put(&old_path); 2873 2947 ··· 3268 3352 EXPORT_SYMBOL(__page_symlink); 3269 3353 EXPORT_SYMBOL(page_symlink); 3270 3354 EXPORT_SYMBOL(page_symlink_inode_operations); 3271 - EXPORT_SYMBOL(kern_path_parent); 3272 3355 EXPORT_SYMBOL(kern_path); 3273 3356 EXPORT_SYMBOL(vfs_path_lookup); 3274 3357 EXPORT_SYMBOL(inode_permission); 3275 - EXPORT_SYMBOL(file_permission); 3276 3358 EXPORT_SYMBOL(unlock_rename); 3277 3359 EXPORT_SYMBOL(vfs_create); 3278 3360 EXPORT_SYMBOL(vfs_follow_link);
+2 -2
fs/namespace.c
··· 934 934 int res = 0; 935 935 936 936 br_read_lock(vfsmount_lock); 937 - if (p->event != ns->event) { 938 - p->event = ns->event; 937 + if (p->m.poll_event != ns->event) { 938 + p->m.poll_event = ns->event; 939 939 res = 1; 940 940 } 941 941 br_read_unlock(vfsmount_lock);
+2 -2
fs/ncpfs/file.c
··· 20 20 21 21 #include "ncp_fs.h" 22 22 23 - static int ncp_fsync(struct file *file, int datasync) 23 + static int ncp_fsync(struct file *file, loff_t start, loff_t end, int datasync) 24 24 { 25 - return 0; 25 + return filemap_write_and_wait_range(file->f_mapping, start, end); 26 26 } 27 27 28 28 /*
+4 -5
fs/nfs/cache_lib.c
··· 113 113 114 114 int nfs_cache_register(struct cache_detail *cd) 115 115 { 116 - struct nameidata nd; 117 116 struct vfsmount *mnt; 117 + struct path path; 118 118 int ret; 119 119 120 120 mnt = rpc_get_mount(); 121 121 if (IS_ERR(mnt)) 122 122 return PTR_ERR(mnt); 123 - ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &nd); 123 + ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &path); 124 124 if (ret) 125 125 goto err; 126 - ret = sunrpc_cache_register_pipefs(nd.path.dentry, 127 - cd->name, 0600, cd); 128 - path_put(&nd.path); 126 + ret = sunrpc_cache_register_pipefs(path.dentry, cd->name, 0600, cd); 127 + path_put(&path); 129 128 if (!ret) 130 129 return ret; 131 130 err:
+48 -39
fs/nfs/dir.c
··· 56 56 static int nfs_mknod(struct inode *, struct dentry *, int, dev_t); 57 57 static int nfs_rename(struct inode *, struct dentry *, 58 58 struct inode *, struct dentry *); 59 - static int nfs_fsync_dir(struct file *, int); 59 + static int nfs_fsync_dir(struct file *, loff_t, loff_t, int); 60 60 static loff_t nfs_llseek_dir(struct file *, loff_t, int); 61 61 static void nfs_readdir_clear_array(struct page*); 62 62 ··· 945 945 * All directory operations under NFS are synchronous, so fsync() 946 946 * is a dummy operation. 947 947 */ 948 - static int nfs_fsync_dir(struct file *filp, int datasync) 948 + static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end, 949 + int datasync) 949 950 { 950 951 struct dentry *dentry = filp->f_path.dentry; 952 + struct inode *inode = dentry->d_inode; 951 953 952 954 dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n", 953 955 dentry->d_parent->d_name.name, dentry->d_name.name, 954 956 datasync); 955 957 958 + mutex_lock(&inode->i_mutex); 956 959 nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC); 960 + mutex_unlock(&inode->i_mutex); 957 961 return 0; 958 962 } 959 963 ··· 1001 997 * Return the intent data that applies to this particular path component 1002 998 * 1003 999 * Note that the current set of intents only apply to the very last 1004 - * component of the path. 1005 - * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT. 1000 + * component of the path and none of them is set before that last 1001 + * component. 1006 1002 */ 1007 1003 static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, 1008 1004 unsigned int mask) 1009 1005 { 1010 - if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT)) 1011 - return 0; 1012 1006 return nd->flags & mask; 1013 1007 } 1014 1008 ··· 1340 1338 return 0; 1341 1339 /* Are we trying to write to a read only partition? */ 1342 1340 if (__mnt_is_readonly(nd->path.mnt) && 1343 - (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE))) 1341 + (nd->intent.open.flags & (O_CREAT|O_TRUNC|O_ACCMODE))) 1344 1342 return 0; 1345 1343 return 1; 1346 1344 } 1347 1345 1348 - static struct nfs_open_context *nameidata_to_nfs_open_context(struct dentry *dentry, struct nameidata *nd) 1346 + static fmode_t flags_to_mode(int flags) 1349 1347 { 1350 - struct path path = { 1351 - .mnt = nd->path.mnt, 1352 - .dentry = dentry, 1353 - }; 1348 + fmode_t res = (__force fmode_t)flags & FMODE_EXEC; 1349 + if ((flags & O_ACCMODE) != O_WRONLY) 1350 + res |= FMODE_READ; 1351 + if ((flags & O_ACCMODE) != O_RDONLY) 1352 + res |= FMODE_WRITE; 1353 + return res; 1354 + } 1355 + 1356 + static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags) 1357 + { 1354 1358 struct nfs_open_context *ctx; 1355 1359 struct rpc_cred *cred; 1356 - fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); 1360 + fmode_t fmode = flags_to_mode(open_flags); 1357 1361 1358 1362 cred = rpc_lookup_cred(); 1359 1363 if (IS_ERR(cred)) 1360 1364 return ERR_CAST(cred); 1361 - ctx = alloc_nfs_open_context(&path, cred, fmode); 1365 + ctx = alloc_nfs_open_context(dentry, cred, fmode); 1362 1366 put_rpccred(cred); 1363 1367 if (ctx == NULL) 1364 1368 return ERR_PTR(-ENOMEM); ··· 1384 1376 1385 1377 /* If the open_intent is for execute, we have an extra check to make */ 1386 1378 if (ctx->mode & FMODE_EXEC) { 1387 - ret = nfs_may_open(ctx->path.dentry->d_inode, 1379 + ret = nfs_may_open(ctx->dentry->d_inode, 1388 1380 ctx->cred, 1389 1381 nd->intent.open.flags); 1390 1382 if (ret < 0) 1391 1383 goto out; 1392 1384 } 1393 - filp = lookup_instantiate_filp(nd, ctx->path.dentry, do_open); 1385 + filp = lookup_instantiate_filp(nd, ctx->dentry, do_open); 1394 1386 if (IS_ERR(filp)) 1395 1387 ret = PTR_ERR(filp); 1396 1388 else ··· 1428 1420 goto out; 1429 1421 } 1430 1422 1431 - ctx = nameidata_to_nfs_open_context(dentry, nd); 1423 + open_flags = nd->intent.open.flags; 1424 + 1425 + ctx = create_nfs_open_context(dentry, open_flags); 1432 1426 res = ERR_CAST(ctx); 1433 1427 if (IS_ERR(ctx)) 1434 1428 goto out; 1435 1429 1436 - open_flags = nd->intent.open.flags; 1437 1430 if (nd->flags & LOOKUP_CREATE) { 1438 1431 attr.ia_mode = nd->intent.open.create_mode; 1439 1432 attr.ia_valid = ATTR_MODE; ··· 1472 1463 res = d_add_unique(dentry, inode); 1473 1464 nfs_unblock_sillyrename(dentry->d_parent); 1474 1465 if (res != NULL) { 1475 - dput(ctx->path.dentry); 1476 - ctx->path.dentry = dget(res); 1466 + dput(ctx->dentry); 1467 + ctx->dentry = dget(res); 1477 1468 dentry = res; 1478 1469 } 1479 1470 err = nfs_intent_set_file(nd, ctx); ··· 1526 1517 /* We can't create new files, or truncate existing ones here */ 1527 1518 openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); 1528 1519 1529 - ctx = nameidata_to_nfs_open_context(dentry, nd); 1520 + ctx = create_nfs_open_context(dentry, openflags); 1530 1521 ret = PTR_ERR(ctx); 1531 1522 if (IS_ERR(ctx)) 1532 1523 goto out; ··· 1579 1570 struct nfs_open_context *ctx = NULL; 1580 1571 struct iattr attr; 1581 1572 int error; 1582 - int open_flags = 0; 1573 + int open_flags = O_CREAT|O_EXCL; 1583 1574 1584 1575 dfprintk(VFS, "NFS: create(%s/%ld), %s\n", 1585 1576 dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); ··· 1587 1578 attr.ia_mode = mode; 1588 1579 attr.ia_valid = ATTR_MODE; 1589 1580 1590 - if ((nd->flags & LOOKUP_CREATE) != 0) { 1581 + if (nd) 1591 1582 open_flags = nd->intent.open.flags; 1592 1583 1593 - ctx = nameidata_to_nfs_open_context(dentry, nd); 1594 - error = PTR_ERR(ctx); 1595 - if (IS_ERR(ctx)) 1596 - goto out_err_drop; 1597 - } 1584 + ctx = create_nfs_open_context(dentry, open_flags); 1585 + error = PTR_ERR(ctx); 1586 + if (IS_ERR(ctx)) 1587 + goto out_err_drop; 1598 1588 1599 1589 error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx); 1600 1590 if (error != 0) 1601 1591 goto out_put_ctx; 1602 - if (ctx != NULL) { 1592 + if (nd) { 1603 1593 error = nfs_intent_set_file(nd, ctx); 1604 1594 if (error < 0) 1605 1595 goto out_err; 1596 + } else { 1597 + put_nfs_open_context(ctx); 1606 1598 } 1607 1599 return 0; 1608 1600 out_put_ctx: 1609 - if (ctx != NULL) 1610 - put_nfs_open_context(ctx); 1601 + put_nfs_open_context(ctx); 1611 1602 out_err_drop: 1612 1603 d_drop(dentry); 1613 1604 out_err: ··· 1669 1660 { 1670 1661 struct iattr attr; 1671 1662 int error; 1672 - int open_flags = 0; 1663 + int open_flags = O_CREAT|O_EXCL; 1673 1664 1674 1665 dfprintk(VFS, "NFS: create(%s/%ld), %s\n", 1675 1666 dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); ··· 1677 1668 attr.ia_mode = mode; 1678 1669 attr.ia_valid = ATTR_MODE; 1679 1670 1680 - if ((nd->flags & LOOKUP_CREATE) != 0) 1671 + if (nd) 1681 1672 open_flags = nd->intent.open.flags; 1682 1673 1683 1674 error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); ··· 2268 2259 { 2269 2260 int mask = 0; 2270 2261 2271 - if (openflags & FMODE_READ) 2262 + if ((openflags & O_ACCMODE) != O_WRONLY) 2272 2263 mask |= MAY_READ; 2273 - if (openflags & FMODE_WRITE) 2264 + if ((openflags & O_ACCMODE) != O_RDONLY) 2274 2265 mask |= MAY_WRITE; 2275 - if (openflags & FMODE_EXEC) 2266 + if (openflags & __FMODE_EXEC) 2276 2267 mask |= MAY_EXEC; 2277 2268 return mask; 2278 2269 } ··· 2282 2273 return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags)); 2283 2274 } 2284 2275 2285 - int nfs_permission(struct inode *inode, int mask, unsigned int flags) 2276 + int nfs_permission(struct inode *inode, int mask) 2286 2277 { 2287 2278 struct rpc_cred *cred; 2288 2279 int res = 0; 2289 2280 2290 - if (flags & IPERM_FLAG_RCU) 2281 + if (mask & MAY_NOT_BLOCK) 2291 2282 return -ECHILD; 2292 2283 2293 2284 nfs_inc_stats(inode, NFSIOS_VFSACCESS); ··· 2337 2328 out_notsup: 2338 2329 res = nfs_revalidate_inode(NFS_SERVER(inode), inode); 2339 2330 if (res == 0) 2340 - res = generic_permission(inode, mask, flags, NULL); 2331 + res = generic_permission(inode, mask); 2341 2332 goto out; 2342 2333 } 2343 2334
+2 -2
fs/nfs/direct.c
··· 284 284 loff_t pos) 285 285 { 286 286 struct nfs_open_context *ctx = dreq->ctx; 287 - struct inode *inode = ctx->path.dentry->d_inode; 287 + struct inode *inode = ctx->dentry->d_inode; 288 288 unsigned long user_addr = (unsigned long)iov->iov_base; 289 289 size_t count = iov->iov_len; 290 290 size_t rsize = NFS_SERVER(inode)->rsize; ··· 715 715 loff_t pos, int sync) 716 716 { 717 717 struct nfs_open_context *ctx = dreq->ctx; 718 - struct inode *inode = ctx->path.dentry->d_inode; 718 + struct inode *inode = ctx->dentry->d_inode; 719 719 unsigned long user_addr = (unsigned long)iov->iov_base; 720 720 size_t count = iov->iov_len; 721 721 struct rpc_task *task;
+13 -5
fs/nfs/file.c
··· 55 55 static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, 56 56 unsigned long nr_segs, loff_t pos); 57 57 static int nfs_file_flush(struct file *, fl_owner_t id); 58 - static int nfs_file_fsync(struct file *, int datasync); 58 + static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync); 59 59 static int nfs_check_flags(int flags); 60 60 static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); 61 61 static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); ··· 187 187 filp->f_path.dentry->d_name.name, 188 188 offset, origin); 189 189 190 - /* origin == SEEK_END => we must revalidate the cached file length */ 191 - if (origin == SEEK_END) { 190 + /* 191 + * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 192 + * the cached file length 193 + */ 194 + if (origin != SEEK_SET || origin != SEEK_CUR) { 192 195 struct inode *inode = filp->f_mapping->host; 193 196 194 197 int retval = nfs_revalidate_file_size(inode, filp); ··· 308 305 * fall back to doing a synchronous write. 309 306 */ 310 307 static int 311 - nfs_file_fsync(struct file *file, int datasync) 308 + nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) 312 309 { 313 310 struct dentry *dentry = file->f_path.dentry; 314 311 struct nfs_open_context *ctx = nfs_file_open_context(file); ··· 316 313 int have_error, status; 317 314 int ret = 0; 318 315 319 - 320 316 dprintk("NFS: fsync file(%s/%s) datasync %d\n", 321 317 dentry->d_parent->d_name.name, dentry->d_name.name, 322 318 datasync); 319 + 320 + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 321 + if (ret) 322 + return ret; 323 + mutex_lock(&inode->i_mutex); 323 324 324 325 nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 325 326 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); ··· 336 329 if (!ret && !datasync) 337 330 /* application has asked for meta-data sync */ 338 331 ret = pnfs_layoutcommit_inode(inode, true); 332 + mutex_unlock(&inode->i_mutex); 339 333 return ret; 340 334 } 341 335
+11 -9
fs/nfs/inode.c
··· 567 567 struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) 568 568 { 569 569 struct nfs_lock_context *res, *new = NULL; 570 - struct inode *inode = ctx->path.dentry->d_inode; 570 + struct inode *inode = ctx->dentry->d_inode; 571 571 572 572 spin_lock(&inode->i_lock); 573 573 res = __nfs_find_lock_context(ctx); ··· 594 594 void nfs_put_lock_context(struct nfs_lock_context *l_ctx) 595 595 { 596 596 struct nfs_open_context *ctx = l_ctx->open_context; 597 - struct inode *inode = ctx->path.dentry->d_inode; 597 + struct inode *inode = ctx->dentry->d_inode; 598 598 599 599 if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock)) 600 600 return; ··· 620 620 return; 621 621 if (!is_sync) 622 622 return; 623 - inode = ctx->path.dentry->d_inode; 623 + inode = ctx->dentry->d_inode; 624 624 if (!list_empty(&NFS_I(inode)->open_files)) 625 625 return; 626 626 server = NFS_SERVER(inode); ··· 629 629 nfs_revalidate_inode(server, inode); 630 630 } 631 631 632 - struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode) 632 + struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode) 633 633 { 634 634 struct nfs_open_context *ctx; 635 635 636 636 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 637 637 if (ctx != NULL) { 638 - ctx->path = *path; 639 - path_get(&ctx->path); 638 + nfs_sb_active(dentry->d_sb); 639 + ctx->dentry = dget(dentry); 640 640 ctx->cred = get_rpccred(cred); 641 641 ctx->state = NULL; 642 642 ctx->mode = f_mode; ··· 658 658 659 659 static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) 660 660 { 661 - struct inode *inode = ctx->path.dentry->d_inode; 661 + struct inode *inode = ctx->dentry->d_inode; 662 + struct super_block *sb = ctx->dentry->d_sb; 662 663 663 664 if (!list_empty(&ctx->list)) { 664 665 if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock)) ··· 672 671 NFS_PROTO(inode)->close_context(ctx, is_sync); 673 672 if (ctx->cred != NULL) 674 673 put_rpccred(ctx->cred); 675 - path_put(&ctx->path); 674 + dput(ctx->dentry); 675 + nfs_sb_deactive(sb); 676 676 kfree(ctx); 677 677 } 678 678 ··· 743 741 cred = rpc_lookup_cred(); 744 742 if (IS_ERR(cred)) 745 743 return PTR_ERR(cred); 746 - ctx = alloc_nfs_open_context(&filp->f_path, cred, filp->f_mode); 744 + ctx = alloc_nfs_open_context(filp->f_path.dentry, cred, filp->f_mode); 747 745 put_rpccred(cred); 748 746 if (ctx == NULL) 749 747 return -ENOMEM;
+5 -5
fs/nfs/nfs4_fs.h
··· 238 238 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 239 239 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 240 240 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 241 - extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); 241 + extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); 242 242 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); 243 243 extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, 244 244 struct nfs4_fs_locations *fs_locations, struct page *page); ··· 341 341 extern void nfs4_put_state_owner(struct nfs4_state_owner *); 342 342 extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); 343 343 extern void nfs4_put_open_state(struct nfs4_state *); 344 - extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); 345 - extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); 344 + extern void nfs4_close_state(struct nfs4_state *, fmode_t); 345 + extern void nfs4_close_sync(struct nfs4_state *, fmode_t); 346 346 extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 347 347 extern void nfs4_schedule_lease_recovery(struct nfs_client *); 348 348 extern void nfs4_schedule_state_manager(struct nfs_client *); ··· 373 373 374 374 #else 375 375 376 - #define nfs4_close_state(a, b, c) do { } while (0) 377 - #define nfs4_close_sync(a, b, c) do { } while (0) 376 + #define nfs4_close_state(a, b) do { } while (0) 377 + #define nfs4_close_sync(a, b) do { } while (0) 378 378 379 379 #endif /* CONFIG_NFS_V4 */ 380 380 #endif /* __LINUX_FS_NFS_NFS4_FS.H */
+34 -36
fs/nfs/nfs4proc.c
··· 763 763 struct nfs_open_confirmres c_res; 764 764 struct nfs_fattr f_attr; 765 765 struct nfs_fattr dir_attr; 766 - struct path path; 767 766 struct dentry *dir; 767 + struct dentry *dentry; 768 768 struct nfs4_state_owner *owner; 769 769 struct nfs4_state *state; 770 770 struct iattr attrs; ··· 786 786 nfs_fattr_init(&p->dir_attr); 787 787 } 788 788 789 - static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, 789 + static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 790 790 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 791 791 const struct iattr *attrs, 792 792 gfp_t gfp_mask) 793 793 { 794 - struct dentry *parent = dget_parent(path->dentry); 794 + struct dentry *parent = dget_parent(dentry); 795 795 struct inode *dir = parent->d_inode; 796 796 struct nfs_server *server = NFS_SERVER(dir); 797 797 struct nfs4_opendata *p; ··· 802 802 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 803 803 if (p->o_arg.seqid == NULL) 804 804 goto err_free; 805 - path_get(path); 806 - p->path = *path; 805 + nfs_sb_active(dentry->d_sb); 806 + p->dentry = dget(dentry); 807 807 p->dir = parent; 808 808 p->owner = sp; 809 809 atomic_inc(&sp->so_count); ··· 812 812 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 813 813 p->o_arg.clientid = server->nfs_client->cl_clientid; 814 814 p->o_arg.id = sp->so_owner_id.id; 815 - p->o_arg.name = &p->path.dentry->d_name; 815 + p->o_arg.name = &dentry->d_name; 816 816 p->o_arg.server = server; 817 817 p->o_arg.bitmask = server->attr_bitmask; 818 818 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; ··· 842 842 { 843 843 struct nfs4_opendata *p = container_of(kref, 844 844 struct nfs4_opendata, kref); 845 + struct super_block *sb = p->dentry->d_sb; 845 846 846 847 nfs_free_seqid(p->o_arg.seqid); 847 848 if (p->state != NULL) 848 849 nfs4_put_open_state(p->state); 849 850 nfs4_put_state_owner(p->owner); 850 851 dput(p->dir); 851 - path_put(&p->path); 852 + dput(p->dentry); 853 + nfs_sb_deactive(sb); 852 854 kfree(p); 853 855 } 854 856 ··· 1132 1130 { 1133 1131 struct nfs4_opendata *opendata; 1134 1132 1135 - opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS); 1133 + opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1136 1134 if (opendata == NULL) 1137 1135 return ERR_PTR(-ENOMEM); 1138 1136 opendata->state = state; ··· 1156 1154 newstate = nfs4_opendata_to_nfs4_state(opendata); 1157 1155 if (IS_ERR(newstate)) 1158 1156 return PTR_ERR(newstate); 1159 - nfs4_close_state(&opendata->path, newstate, fmode); 1157 + nfs4_close_state(newstate, fmode); 1160 1158 *res = newstate; 1161 1159 return 0; 1162 1160 } ··· 1354 1352 goto out_free; 1355 1353 state = nfs4_opendata_to_nfs4_state(data); 1356 1354 if (!IS_ERR(state)) 1357 - nfs4_close_state(&data->path, state, data->o_arg.fmode); 1355 + nfs4_close_state(state, data->o_arg.fmode); 1358 1356 out_free: 1359 1357 nfs4_opendata_put(data); 1360 1358 } ··· 1499 1497 goto out_free; 1500 1498 state = nfs4_opendata_to_nfs4_state(data); 1501 1499 if (!IS_ERR(state)) 1502 - nfs4_close_state(&data->path, state, data->o_arg.fmode); 1500 + nfs4_close_state(state, data->o_arg.fmode); 1503 1501 out_free: 1504 1502 nfs4_opendata_put(data); 1505 1503 } ··· 1650 1648 return PTR_ERR(opendata); 1651 1649 ret = nfs4_open_recover(opendata, state); 1652 1650 if (ret == -ESTALE) 1653 - d_drop(ctx->path.dentry); 1651 + d_drop(ctx->dentry); 1654 1652 nfs4_opendata_put(opendata); 1655 1653 return ret; 1656 1654 } ··· 1708 1706 /* 1709 1707 * Returns a referenced nfs4_state 1710 1708 */ 1711 - static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) 1709 + static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) 1712 1710 { 1713 1711 struct nfs4_state_owner *sp; 1714 1712 struct nfs4_state *state = NULL; ··· 1725 1723 status = nfs4_recover_expired_lease(server); 1726 1724 if (status != 0) 1727 1725 goto err_put_state_owner; 1728 - if (path->dentry->d_inode != NULL) 1729 - nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode); 1726 + if (dentry->d_inode != NULL) 1727 + nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1730 1728 status = -ENOMEM; 1731 - opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL); 1729 + opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1732 1730 if (opendata == NULL) 1733 1731 goto err_put_state_owner; 1734 1732 1735 - if (path->dentry->d_inode != NULL) 1736 - opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp); 1733 + if (dentry->d_inode != NULL) 1734 + opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1737 1735 1738 1736 status = _nfs4_proc_open(opendata); 1739 1737 if (status != 0) ··· 1771 1769 } 1772 1770 1773 1771 1774 - static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) 1772 + static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) 1775 1773 { 1776 1774 struct nfs4_exception exception = { }; 1777 1775 struct nfs4_state *res; 1778 1776 int status; 1779 1777 1780 1778 do { 1781 - status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res); 1779 + status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); 1782 1780 if (status == 0) 1783 1781 break; 1784 1782 /* NOTE: BAD_SEQID means the server and client disagree about the ··· 1875 1873 } 1876 1874 1877 1875 struct nfs4_closedata { 1878 - struct path path; 1879 1876 struct inode *inode; 1880 1877 struct nfs4_state *state; 1881 1878 struct nfs_closeargs arg; ··· 1889 1888 { 1890 1889 struct nfs4_closedata *calldata = data; 1891 1890 struct nfs4_state_owner *sp = calldata->state->owner; 1891 + struct super_block *sb = calldata->state->inode->i_sb; 1892 1892 1893 1893 if (calldata->roc) 1894 1894 pnfs_roc_release(calldata->state->inode); 1895 1895 nfs4_put_open_state(calldata->state); 1896 1896 nfs_free_seqid(calldata->arg.seqid); 1897 1897 nfs4_put_state_owner(sp); 1898 - path_put(&calldata->path); 1898 + nfs_sb_deactive(sb); 1899 1899 kfree(calldata); 1900 1900 } 1901 1901 ··· 2016 2014 * 2017 2015 * NOTE: Caller must be holding the sp->so_owner semaphore! 2018 2016 */ 2019 - int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) 2017 + int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) 2020 2018 { 2021 2019 struct nfs_server *server = NFS_SERVER(state->inode); 2022 2020 struct nfs4_closedata *calldata; ··· 2052 2050 calldata->res.seqid = calldata->arg.seqid; 2053 2051 calldata->res.server = server; 2054 2052 calldata->roc = roc; 2055 - path_get(path); 2056 - calldata->path = *path; 2053 + nfs_sb_active(calldata->inode->i_sb); 2057 2054 2058 2055 msg.rpc_argp = &calldata->arg; 2059 2056 msg.rpc_resp = &calldata->res; ··· 2081 2080 struct nfs4_state *state; 2082 2081 2083 2082 /* Protect against concurrent sillydeletes */ 2084 - state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred); 2083 + state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); 2085 2084 if (IS_ERR(state)) 2086 2085 return ERR_CAST(state); 2087 2086 ctx->state = state; ··· 2093 2092 if (ctx->state == NULL) 2094 2093 return; 2095 2094 if (is_sync) 2096 - nfs4_close_sync(&ctx->path, ctx->state, ctx->mode); 2095 + nfs4_close_sync(ctx->state, ctx->mode); 2097 2096 else 2098 - nfs4_close_state(&ctx->path, ctx->state, ctx->mode); 2097 + nfs4_close_state(ctx->state, ctx->mode); 2099 2098 } 2100 2099 2101 2100 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) ··· 2617 2616 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2618 2617 int flags, struct nfs_open_context *ctx) 2619 2618 { 2620 - struct path my_path = { 2621 - .dentry = dentry, 2622 - }; 2623 - struct path *path = &my_path; 2619 + struct dentry *de = dentry; 2624 2620 struct nfs4_state *state; 2625 2621 struct rpc_cred *cred = NULL; 2626 2622 fmode_t fmode = 0; ··· 2625 2627 2626 2628 if (ctx != NULL) { 2627 2629 cred = ctx->cred; 2628 - path = &ctx->path; 2630 + de = ctx->dentry; 2629 2631 fmode = ctx->mode; 2630 2632 } 2631 2633 sattr->ia_mode &= ~current_umask(); 2632 - state = nfs4_do_open(dir, path, fmode, flags, sattr, cred); 2634 + state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); 2633 2635 d_drop(dentry); 2634 2636 if (IS_ERR(state)) { 2635 2637 status = PTR_ERR(state); ··· 2640 2642 if (ctx != NULL) 2641 2643 ctx->state = state; 2642 2644 else 2643 - nfs4_close_sync(path, state, fmode); 2645 + nfs4_close_sync(state, fmode); 2644 2646 out: 2645 2647 return status; 2646 2648 } ··· 4292 4294 memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, 4293 4295 sizeof(data->lsp->ls_stateid.data)); 4294 4296 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; 4295 - renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp); 4297 + renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4296 4298 } 4297 4299 out: 4298 4300 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
+6 -6
fs/nfs/nfs4state.c
··· 641 641 /* 642 642 * Close the current file. 643 643 */ 644 - static void __nfs4_close(struct path *path, struct nfs4_state *state, 644 + static void __nfs4_close(struct nfs4_state *state, 645 645 fmode_t fmode, gfp_t gfp_mask, int wait) 646 646 { 647 647 struct nfs4_state_owner *owner = state->owner; ··· 685 685 } else { 686 686 bool roc = pnfs_roc(state->inode); 687 687 688 - nfs4_do_close(path, state, gfp_mask, wait, roc); 688 + nfs4_do_close(state, gfp_mask, wait, roc); 689 689 } 690 690 } 691 691 692 - void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) 692 + void nfs4_close_state(struct nfs4_state *state, fmode_t fmode) 693 693 { 694 - __nfs4_close(path, state, fmode, GFP_NOFS, 0); 694 + __nfs4_close(state, fmode, GFP_NOFS, 0); 695 695 } 696 696 697 - void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) 697 + void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) 698 698 { 699 - __nfs4_close(path, state, fmode, GFP_KERNEL, 1); 699 + __nfs4_close(state, fmode, GFP_KERNEL, 1); 700 700 } 701 701 702 702 /*
+2 -2
fs/nfs/pagelist.c
··· 114 114 if (!nfs_lock_request_dontget(req)) 115 115 return 0; 116 116 if (test_bit(PG_MAPPED, &req->wb_flags)) 117 - radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 117 + radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 118 118 return 1; 119 119 } 120 120 ··· 124 124 void nfs_clear_page_tag_locked(struct nfs_page *req) 125 125 { 126 126 if (test_bit(PG_MAPPED, &req->wb_flags)) { 127 - struct inode *inode = req->wb_context->path.dentry->d_inode; 127 + struct inode *inode = req->wb_context->dentry->d_inode; 128 128 struct nfs_inode *nfsi = NFS_I(inode); 129 129 130 130 spin_lock(&inode->i_lock);
+4 -4
fs/nfs/read.c
··· 144 144 145 145 static void nfs_readpage_release(struct nfs_page *req) 146 146 { 147 - struct inode *d_inode = req->wb_context->path.dentry->d_inode; 147 + struct inode *d_inode = req->wb_context->dentry->d_inode; 148 148 149 149 if (PageUptodate(req->wb_page)) 150 150 nfs_readpage_to_fscache(d_inode, req->wb_page, 0); ··· 152 152 unlock_page(req->wb_page); 153 153 154 154 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", 155 - req->wb_context->path.dentry->d_inode->i_sb->s_id, 156 - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 155 + req->wb_context->dentry->d_inode->i_sb->s_id, 156 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 157 157 req->wb_bytes, 158 158 (long long)req_offset(req)); 159 159 nfs_release_request(req); ··· 207 207 unsigned int count, unsigned int offset, 208 208 struct pnfs_layout_segment *lseg) 209 209 { 210 - struct inode *inode = req->wb_context->path.dentry->d_inode; 210 + struct inode *inode = req->wb_context->dentry->d_inode; 211 211 212 212 data->req = req; 213 213 data->inode = inode;
+5 -11
fs/nfs/super.c
··· 2773 2773 static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, 2774 2774 const char *export_path) 2775 2775 { 2776 - struct nameidata *nd = NULL; 2777 2776 struct mnt_namespace *ns_private; 2778 2777 struct super_block *s; 2779 2778 struct dentry *dentry; 2779 + struct path path; 2780 2780 int ret; 2781 - 2782 - nd = kmalloc(sizeof(*nd), GFP_KERNEL); 2783 - if (nd == NULL) 2784 - return ERR_PTR(-ENOMEM); 2785 2781 2786 2782 ns_private = create_mnt_ns(root_mnt); 2787 2783 ret = PTR_ERR(ns_private); ··· 2789 2793 goto out_put_mnt_ns; 2790 2794 2791 2795 ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, 2792 - export_path, LOOKUP_FOLLOW, nd); 2796 + export_path, LOOKUP_FOLLOW, &path); 2793 2797 2794 2798 nfs_referral_loop_unprotect(); 2795 2799 put_mnt_ns(ns_private); ··· 2797 2801 if (ret != 0) 2798 2802 goto out_err; 2799 2803 2800 - s = nd->path.mnt->mnt_sb; 2804 + s = path.mnt->mnt_sb; 2801 2805 atomic_inc(&s->s_active); 2802 - dentry = dget(nd->path.dentry); 2806 + dentry = dget(path.dentry); 2803 2807 2804 - path_put(&nd->path); 2805 - kfree(nd); 2808 + path_put(&path); 2806 2809 down_write(&s->s_umount); 2807 2810 return dentry; 2808 2811 out_put_mnt_ns: ··· 2809 2814 out_mntput: 2810 2815 mntput(root_mnt); 2811 2816 out_err: 2812 - kfree(nd); 2813 2817 return ERR_PTR(ret); 2814 2818 } 2815 2819
+11 -11
fs/nfs/write.c
··· 409 409 */ 410 410 static void nfs_inode_remove_request(struct nfs_page *req) 411 411 { 412 - struct inode *inode = req->wb_context->path.dentry->d_inode; 412 + struct inode *inode = req->wb_context->dentry->d_inode; 413 413 struct nfs_inode *nfsi = NFS_I(inode); 414 414 415 415 BUG_ON (!NFS_WBACK_BUSY(req)); ··· 438 438 static void 439 439 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 440 440 { 441 - struct inode *inode = req->wb_context->path.dentry->d_inode; 441 + struct inode *inode = req->wb_context->dentry->d_inode; 442 442 struct nfs_inode *nfsi = NFS_I(inode); 443 443 444 444 spin_lock(&inode->i_lock); ··· 852 852 struct pnfs_layout_segment *lseg, 853 853 int how) 854 854 { 855 - struct inode *inode = req->wb_context->path.dentry->d_inode; 855 + struct inode *inode = req->wb_context->dentry->d_inode; 856 856 857 857 /* Set up the RPC argument and reply structs 858 858 * NB: take care not to mess about with data->commit et al. */ 859 859 860 860 data->req = req; 861 - data->inode = inode = req->wb_context->path.dentry->d_inode; 861 + data->inode = inode = req->wb_context->dentry->d_inode; 862 862 data->cred = req->wb_context->cred; 863 863 data->lseg = get_lseg(lseg); 864 864 ··· 1053 1053 1054 1054 dprintk("NFS: %5u write(%s/%lld %d@%lld)", 1055 1055 task->tk_pid, 1056 - data->req->wb_context->path.dentry->d_inode->i_sb->s_id, 1056 + data->req->wb_context->dentry->d_inode->i_sb->s_id, 1057 1057 (long long) 1058 - NFS_FILEID(data->req->wb_context->path.dentry->d_inode), 1058 + NFS_FILEID(data->req->wb_context->dentry->d_inode), 1059 1059 data->req->wb_bytes, (long long)req_offset(data->req)); 1060 1060 1061 1061 nfs_writeback_done(task, data); ··· 1148 1148 1149 1149 dprintk("NFS: %5u write (%s/%lld %d@%lld)", 1150 1150 data->task.tk_pid, 1151 - req->wb_context->path.dentry->d_inode->i_sb->s_id, 1152 - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 1151 + req->wb_context->dentry->d_inode->i_sb->s_id, 1152 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1153 1153 req->wb_bytes, 1154 1154 (long long)req_offset(req)); 1155 1155 ··· 1347 1347 struct pnfs_layout_segment *lseg) 1348 1348 { 1349 1349 struct nfs_page *first = nfs_list_entry(head->next); 1350 - struct inode *inode = first->wb_context->path.dentry->d_inode; 1350 + struct inode *inode = first->wb_context->dentry->d_inode; 1351 1351 1352 1352 /* Set up the RPC argument and reply structs 1353 1353 * NB: take care not to mess about with data->commit et al. */ ··· 1435 1435 nfs_clear_request_commit(req); 1436 1436 1437 1437 dprintk("NFS: commit (%s/%lld %d@%lld)", 1438 - req->wb_context->path.dentry->d_inode->i_sb->s_id, 1439 - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 1438 + req->wb_context->dentry->d_sb->s_id, 1439 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1440 1440 req->wb_bytes, 1441 1441 (long long)req_offset(req)); 1442 1442 if (status < 0) {
+21 -31
fs/nfsd/nfs4recover.c
··· 191 191 } 192 192 193 193 static int 194 - nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) 194 + nfsd4_list_rec_dir(recdir_func *f) 195 195 { 196 196 const struct cred *original_cred; 197 - struct file *filp; 197 + struct dentry *dir = rec_file->f_path.dentry; 198 198 LIST_HEAD(names); 199 - struct name_list *entry; 200 - struct dentry *dentry; 201 199 int status; 202 - 203 - if (!rec_file) 204 - return 0; 205 200 206 201 status = nfs4_save_creds(&original_cred); 207 202 if (status < 0) 208 203 return status; 209 204 210 - filp = dentry_open(dget(dir), mntget(rec_file->f_path.mnt), O_RDONLY, 211 - current_cred()); 212 - status = PTR_ERR(filp); 213 - if (IS_ERR(filp)) 214 - goto out; 215 - status = vfs_readdir(filp, nfsd4_build_namelist, &names); 216 - fput(filp); 205 + status = vfs_llseek(rec_file, 0, SEEK_SET); 206 + if (status < 0) { 207 + nfs4_reset_creds(original_cred); 208 + return status; 209 + } 210 + 211 + status = vfs_readdir(rec_file, nfsd4_build_namelist, &names); 217 212 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); 218 213 while (!list_empty(&names)) { 214 + struct name_list *entry; 219 215 entry = list_entry(names.next, struct name_list, list); 220 - 221 - dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); 222 - if (IS_ERR(dentry)) { 223 - status = PTR_ERR(dentry); 224 - break; 216 + if (!status) { 217 + struct dentry *dentry; 218 + dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); 219 + if (IS_ERR(dentry)) { 220 + status = PTR_ERR(dentry); 221 + break; 222 + } 223 + status = f(dir, dentry); 224 + dput(dentry); 225 225 } 226 - status = f(dir, dentry); 227 - dput(dentry); 228 - if (status) 229 - break; 230 226 list_del(&entry->list); 231 227 kfree(entry); 232 228 } 233 229 mutex_unlock(&dir->d_inode->i_mutex); 234 - out: 235 - while (!list_empty(&names)) { 236 - entry = list_entry(names.next, struct name_list, list); 237 - list_del(&entry->list); 238 - kfree(entry); 239 - } 240 230 nfs4_reset_creds(original_cred); 241 231 return status; 242 232 } ··· 312 322 status = mnt_want_write(rec_file->f_path.mnt); 313 323 if (status) 314 324 goto out; 315 - status = nfsd4_list_rec_dir(rec_file->f_path.dentry, purge_old); 325 + status = nfsd4_list_rec_dir(purge_old); 316 326 if (status == 0) 317 327 vfs_fsync(rec_file, 0); 318 328 mnt_drop_write(rec_file->f_path.mnt); ··· 342 352 if (!rec_file) 343 353 return 0; 344 354 345 - status = nfsd4_list_rec_dir(rec_file->f_path.dentry, load_recdir); 355 + status = nfsd4_list_rec_dir(load_recdir); 346 356 if (status) 347 357 printk("nfsd4: failed loading clients from recovery" 348 358 " directory %s\n", rec_file->f_path.dentry->d_name.name);
+10 -2
fs/nilfs2/file.c
··· 27 27 #include "nilfs.h" 28 28 #include "segment.h" 29 29 30 - int nilfs_sync_file(struct file *file, int datasync) 30 + int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) 31 31 { 32 32 /* 33 33 * Called from fsync() system call ··· 40 40 struct inode *inode = file->f_mapping->host; 41 41 int err; 42 42 43 - if (!nilfs_inode_dirty(inode)) 43 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 44 + if (err) 45 + return err; 46 + mutex_lock(&inode->i_mutex); 47 + 48 + if (!nilfs_inode_dirty(inode)) { 49 + mutex_unlock(&inode->i_mutex); 44 50 return 0; 51 + } 45 52 46 53 if (datasync) 47 54 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, ··· 56 49 else 57 50 err = nilfs_construct_segment(inode->i_sb); 58 51 52 + mutex_unlock(&inode->i_mutex); 59 53 return err; 60 54 } 61 55
+6 -4
fs/nilfs2/inode.c
··· 259 259 return 0; 260 260 261 261 /* Needs synchronization with the cleaner */ 262 - size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 263 - offset, nr_segs, nilfs_get_block, NULL); 262 + size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 263 + nilfs_get_block); 264 264 265 265 /* 266 266 * In case of error extending write may have instantiated a few ··· 778 778 779 779 if ((iattr->ia_valid & ATTR_SIZE) && 780 780 iattr->ia_size != i_size_read(inode)) { 781 + inode_dio_wait(inode); 782 + 781 783 err = vmtruncate(inode, iattr->ia_size); 782 784 if (unlikely(err)) 783 785 goto out_err; ··· 801 799 return err; 802 800 } 803 801 804 - int nilfs_permission(struct inode *inode, int mask, unsigned int flags) 802 + int nilfs_permission(struct inode *inode, int mask) 805 803 { 806 804 struct nilfs_root *root = NILFS_I(inode)->i_root; 807 805 if ((mask & MAY_WRITE) && root && 808 806 root->cno != NILFS_CPTREE_CURRENT_CNO) 809 807 return -EROFS; /* snapshot is not writable */ 810 808 811 - return generic_permission(inode, mask, flags, NULL); 809 + return generic_permission(inode, mask); 812 810 } 813 811 814 812 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
+1 -6
fs/nilfs2/namei.c
··· 72 72 return ERR_PTR(-ENAMETOOLONG); 73 73 74 74 ino = nilfs_inode_by_name(dir, &dentry->d_name); 75 - inode = NULL; 76 - if (ino) { 77 - inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino); 78 - if (IS_ERR(inode)) 79 - return ERR_CAST(inode); 80 - } 75 + inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL; 81 76 return d_splice_alias(inode, dentry); 82 77 } 83 78
+2 -2
fs/nilfs2/nilfs.h
··· 235 235 struct page *, struct inode *); 236 236 237 237 /* file.c */ 238 - extern int nilfs_sync_file(struct file *, int); 238 + extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); 239 239 240 240 /* ioctl.c */ 241 241 long nilfs_ioctl(struct file *, unsigned int, unsigned long); ··· 264 264 extern void nilfs_truncate(struct inode *); 265 265 extern void nilfs_evict_inode(struct inode *); 266 266 extern int nilfs_setattr(struct dentry *, struct iattr *); 267 - int nilfs_permission(struct inode *inode, int mask, unsigned int flags); 267 + int nilfs_permission(struct inode *inode, int mask); 268 268 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh); 269 269 extern int nilfs_inode_dirty(struct inode *); 270 270 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty);
+9 -1
fs/ntfs/dir.c
··· 1527 1527 * this problem for now. We do write the $BITMAP attribute if it is present 1528 1528 * which is the important one for a directory so things are not too bad. 1529 1529 */ 1530 - static int ntfs_dir_fsync(struct file *filp, int datasync) 1530 + static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, 1531 + int datasync) 1531 1532 { 1532 1533 struct inode *bmp_vi, *vi = filp->f_mapping->host; 1533 1534 int err, ret; 1534 1535 ntfs_attr na; 1535 1536 1536 1537 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 1538 + 1539 + err = filemap_write_and_wait_range(vi->i_mapping, start, end); 1540 + if (err) 1541 + return err; 1542 + mutex_lock(&vi->i_mutex); 1543 + 1537 1544 BUG_ON(!S_ISDIR(vi->i_mode)); 1538 1545 /* If the bitmap attribute inode is in memory sync it, too. */ 1539 1546 na.mft_no = vi->i_ino; ··· 1562 1555 else 1563 1556 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " 1564 1557 "%u.", datasync ? "data" : "", vi->i_ino, -ret); 1558 + mutex_unlock(&vi->i_mutex); 1565 1559 return ret; 1566 1560 } 1567 1561
+10 -3
fs/ntfs/file.c
··· 1832 1832 * fails again. 1833 1833 */ 1834 1834 if (unlikely(NInoTruncateFailed(ni))) { 1835 - down_write(&vi->i_alloc_sem); 1835 + inode_dio_wait(vi); 1836 1836 err = ntfs_truncate(vi); 1837 - up_write(&vi->i_alloc_sem); 1838 1837 if (err || NInoTruncateFailed(ni)) { 1839 1838 if (!err) 1840 1839 err = -EIO; ··· 2152 2153 * with this inode but since we have no simple way of getting to them we ignore 2153 2154 * this problem for now. 2154 2155 */ 2155 - static int ntfs_file_fsync(struct file *filp, int datasync) 2156 + static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, 2157 + int datasync) 2156 2158 { 2157 2159 struct inode *vi = filp->f_mapping->host; 2158 2160 int err, ret = 0; 2159 2161 2160 2162 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); 2163 + 2164 + err = filemap_write_and_wait_range(vi->i_mapping, start, end); 2165 + if (err) 2166 + return err; 2167 + mutex_lock(&vi->i_mutex); 2168 + 2161 2169 BUG_ON(S_ISDIR(vi->i_mode)); 2162 2170 if (!datasync || !NInoNonResident(NTFS_I(vi))) 2163 2171 ret = __ntfs_write_inode(vi, 1); ··· 2182 2176 else 2183 2177 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " 2184 2178 "%u.", datasync ? "data" : "", vi->i_ino, -ret); 2179 + mutex_unlock(&vi->i_mutex); 2185 2180 return ret; 2186 2181 } 2187 2182
+2 -8
fs/ntfs/inode.c
··· 2357 2357 * 2358 2358 * Returns 0 on success or -errno on error. 2359 2359 * 2360 - * Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for 2361 - * writing. The only case in the kernel where ->i_alloc_sem is not held is 2362 - * mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called 2363 - * with the current i_size as the offset. The analogous place in NTFS is in 2364 - * fs/ntfs/file.c::ntfs_file_buffered_write() where we call vmtruncate() again 2365 - * without holding ->i_alloc_sem. 2360 + * Called with ->i_mutex held. 2366 2361 */ 2367 2362 int ntfs_truncate(struct inode *vi) 2368 2363 { ··· 2882 2887 * We also abort all changes of user, group, and mode as we do not implement 2883 2888 * the NTFS ACLs yet. 2884 2889 * 2885 - * Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also 2886 - * called with ->i_alloc_sem held for writing. 2890 + * Called with ->i_mutex held. 2887 2891 */ 2888 2892 int ntfs_setattr(struct dentry *dentry, struct iattr *attr) 2889 2893 {
+2 -2
fs/ocfs2/acl.c
··· 290 290 return ret; 291 291 } 292 292 293 - int ocfs2_check_acl(struct inode *inode, int mask, unsigned int flags) 293 + int ocfs2_check_acl(struct inode *inode, int mask) 294 294 { 295 295 struct ocfs2_super *osb; 296 296 struct buffer_head *di_bh = NULL; 297 297 struct posix_acl *acl; 298 298 int ret = -EAGAIN; 299 299 300 - if (flags & IPERM_FLAG_RCU) 300 + if (mask & MAY_NOT_BLOCK) 301 301 return -ECHILD; 302 302 303 303 osb = OCFS2_SB(inode->i_sb);
+1 -1
fs/ocfs2/acl.h
··· 26 26 __le32 e_id; 27 27 }; 28 28 29 - extern int ocfs2_check_acl(struct inode *, int, unsigned int); 29 + extern int ocfs2_check_acl(struct inode *, int); 30 30 extern int ocfs2_acl_chmod(struct inode *); 31 31 extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, 32 32 struct buffer_head *, struct buffer_head *,
+4 -6
fs/ocfs2/aops.c
··· 551 551 552 552 /* 553 553 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 554 - * particularly interested in the aio/dio case. Like the core uses 555 - * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 556 - * truncation on another. 554 + * particularly interested in the aio/dio case. We use the rw_lock DLM lock 555 + * to protect io on one node from truncation on another. 557 556 */ 558 557 static void ocfs2_dio_end_io(struct kiocb *iocb, 559 558 loff_t offset, ··· 567 568 /* this io's submitter should not have unlocked this before we could */ 568 569 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 569 570 570 - if (ocfs2_iocb_is_sem_locked(iocb)) { 571 - up_read(&inode->i_alloc_sem); 571 + if (ocfs2_iocb_is_sem_locked(iocb)) 572 572 ocfs2_iocb_clear_sem_locked(iocb); 573 - } 574 573 575 574 ocfs2_iocb_clear_rw_locked(iocb); 576 575 ··· 577 580 578 581 if (is_async) 579 582 aio_complete(iocb, ret, 0); 583 + inode_dio_done(inode); 580 584 } 581 585 582 586 /*
+25 -16
fs/ocfs2/file.c
··· 171 171 return 0; 172 172 } 173 173 174 - static int ocfs2_sync_file(struct file *file, int datasync) 174 + static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, 175 + int datasync) 175 176 { 176 177 int err = 0; 177 178 journal_t *journal; ··· 185 184 file->f_path.dentry->d_name.name, 186 185 (unsigned long long)datasync); 187 186 187 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 188 + if (err) 189 + return err; 190 + 191 + /* 192 + * Probably don't need the i_mutex at all in here, just putting it here 193 + * to be consistent with how fsync used to be called, someone more 194 + * familiar with the fs could possibly remove it. 195 + */ 196 + mutex_lock(&inode->i_mutex); 188 197 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { 189 198 /* 190 199 * We still have to flush drive's caches to get data to the ··· 211 200 bail: 212 201 if (err) 213 202 mlog_errno(err); 203 + mutex_unlock(&inode->i_mutex); 214 204 215 205 return (err < 0) ? -EIO : 0; 216 206 } ··· 1154 1142 if (status) 1155 1143 goto bail_unlock; 1156 1144 1145 + inode_dio_wait(inode); 1146 + 1157 1147 if (i_size_read(inode) > attr->ia_size) { 1158 1148 if (ocfs2_should_order_data(inode)) { 1159 1149 status = ocfs2_begin_ordered_truncate(inode, ··· 1293 1279 return err; 1294 1280 } 1295 1281 1296 - int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) 1282 + int ocfs2_permission(struct inode *inode, int mask) 1297 1283 { 1298 1284 int ret; 1299 1285 1300 - if (flags & IPERM_FLAG_RCU) 1286 + if (mask & MAY_NOT_BLOCK) 1301 1287 return -ECHILD; 1302 1288 1303 1289 ret = ocfs2_inode_lock(inode, NULL, 0); ··· 1307 1293 goto out; 1308 1294 } 1309 1295 1310 - ret = generic_permission(inode, mask, flags, ocfs2_check_acl); 1296 + ret = generic_permission(inode, mask); 1311 1297 1312 1298 ocfs2_inode_unlock(inode, 0); 1313 1299 out: ··· 2250 2236 ocfs2_iocb_clear_sem_locked(iocb); 2251 2237 2252 2238 relock: 2253 - /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ 2239 + /* to match setattr's i_mutex -> rw_lock ordering */ 2254 2240 if (direct_io) { 2255 - down_read(&inode->i_alloc_sem); 2256 2241 have_alloc_sem = 1; 2257 2242 /* communicate with ocfs2_dio_end_io */ 2258 2243 ocfs2_iocb_set_sem_locked(iocb); ··· 2303 2290 */ 2304 2291 if (direct_io && !can_do_direct) { 2305 2292 ocfs2_rw_unlock(inode, rw_level); 2306 - up_read(&inode->i_alloc_sem); 2307 2293 2308 2294 have_alloc_sem = 0; 2309 2295 rw_level = -1; ··· 2373 2361 /* 2374 2362 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2375 2363 * function pointer which is called when o_direct io completes so that 2376 - * it can unlock our rw lock. (it's the clustered equivalent of 2377 - * i_alloc_sem; protects truncate from racing with pending ios). 2364 + * it can unlock our rw lock. 2378 2365 * Unfortunately there are error cases which call end_io and others 2379 2366 * that don't. so we don't have to unlock the rw_lock if either an 2380 2367 * async dio is going to do it in the future or an end_io after an ··· 2389 2378 ocfs2_rw_unlock(inode, rw_level); 2390 2379 2391 2380 out_sems: 2392 - if (have_alloc_sem) { 2393 - up_read(&inode->i_alloc_sem); 2381 + if (have_alloc_sem) 2394 2382 ocfs2_iocb_clear_sem_locked(iocb); 2395 - } 2396 2383 2397 2384 mutex_unlock(&inode->i_mutex); 2398 2385 ··· 2540 2531 * need locks to protect pending reads from racing with truncate. 2541 2532 */ 2542 2533 if (filp->f_flags & O_DIRECT) { 2543 - down_read(&inode->i_alloc_sem); 2544 2534 have_alloc_sem = 1; 2545 2535 ocfs2_iocb_set_sem_locked(iocb); 2546 2536 ··· 2582 2574 } 2583 2575 2584 2576 bail: 2585 - if (have_alloc_sem) { 2586 - up_read(&inode->i_alloc_sem); 2577 + if (have_alloc_sem) 2587 2578 ocfs2_iocb_clear_sem_locked(iocb); 2588 - } 2579 + 2589 2580 if (rw_level != -1) 2590 2581 ocfs2_rw_unlock(inode, rw_level); 2591 2582 ··· 2600 2593 .listxattr = ocfs2_listxattr, 2601 2594 .removexattr = generic_removexattr, 2602 2595 .fiemap = ocfs2_fiemap, 2596 + .check_acl = ocfs2_check_acl, 2603 2597 }; 2604 2598 2605 2599 const struct inode_operations ocfs2_special_file_iops = { 2606 2600 .setattr = ocfs2_setattr, 2607 2601 .getattr = ocfs2_getattr, 2608 2602 .permission = ocfs2_permission, 2603 + .check_acl = ocfs2_check_acl, 2609 2604 }; 2610 2605 2611 2606 /*
+1 -1
fs/ocfs2/file.h
··· 61 61 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); 62 62 int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, 63 63 struct kstat *stat); 64 - int ocfs2_permission(struct inode *inode, int mask, unsigned int flags); 64 + int ocfs2_permission(struct inode *inode, int mask); 65 65 66 66 int ocfs2_should_update_atime(struct inode *inode, 67 67 struct vfsmount *vfsmnt);
+1
fs/ocfs2/namei.c
··· 2498 2498 .listxattr = ocfs2_listxattr, 2499 2499 .removexattr = generic_removexattr, 2500 2500 .fiemap = ocfs2_fiemap, 2501 + .check_acl = ocfs2_check_acl, 2501 2502 };
+11 -38
fs/ocfs2/refcounttree.c
··· 4368 4368 return inode_permission(dir, MAY_WRITE | MAY_EXEC); 4369 4369 } 4370 4370 4371 - /* copied from user_path_parent. */ 4372 - static int ocfs2_user_path_parent(const char __user *path, 4373 - struct nameidata *nd, char **name) 4374 - { 4375 - char *s = getname(path); 4376 - int error; 4377 - 4378 - if (IS_ERR(s)) 4379 - return PTR_ERR(s); 4380 - 4381 - error = kern_path_parent(s, nd); 4382 - if (error) 4383 - putname(s); 4384 - else 4385 - *name = s; 4386 - 4387 - return error; 4388 - } 4389 - 4390 4371 /** 4391 4372 * ocfs2_vfs_reflink - Create a reference-counted link 4392 4373 * ··· 4441 4460 bool preserve) 4442 4461 { 4443 4462 struct dentry *new_dentry; 4444 - struct nameidata nd; 4445 - struct path old_path; 4463 + struct path old_path, new_path; 4446 4464 int error; 4447 - char *to = NULL; 4448 4465 4449 4466 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) 4450 4467 return -EOPNOTSUPP; ··· 4453 4474 return error; 4454 4475 } 4455 4476 4456 - error = ocfs2_user_path_parent(newname, &nd, &to); 4457 - if (error) { 4477 + new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0); 4478 + error = PTR_ERR(new_dentry); 4479 + if (IS_ERR(new_dentry)) { 4458 4480 mlog_errno(error); 4459 4481 goto out; 4460 4482 } 4461 4483 4462 4484 error = -EXDEV; 4463 - if (old_path.mnt != nd.path.mnt) 4464 - goto out_release; 4465 - new_dentry = lookup_create(&nd, 0); 4466 - error = PTR_ERR(new_dentry); 4467 - if (IS_ERR(new_dentry)) { 4485 + if (old_path.mnt != new_path.mnt) { 4468 4486 mlog_errno(error); 4469 - goto out_unlock; 4487 + goto out_dput; 4470 4488 } 4471 4489 4472 - error = mnt_want_write(nd.path.mnt); 4490 + error = mnt_want_write(new_path.mnt); 4473 4491 if (error) { 4474 4492 mlog_errno(error); 4475 4493 goto out_dput; 4476 4494 } 4477 4495 4478 4496 error = ocfs2_vfs_reflink(old_path.dentry, 4479 - nd.path.dentry->d_inode, 4497 + new_path.dentry->d_inode, 4480 4498 new_dentry, preserve); 4481 - mnt_drop_write(nd.path.mnt); 4499 + mnt_drop_write(new_path.mnt); 4482 4500 out_dput: 4483 4501 dput(new_dentry); 4484 - out_unlock: 4485 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 4486 - out_release: 4487 - path_put(&nd.path); 4488 - putname(to); 4502 + mutex_unlock(&new_path.dentry->d_inode->i_mutex); 4503 + path_put(&new_path); 4489 4504 out: 4490 4505 path_put(&old_path); 4491 4506
+1 -1
fs/open.c
··· 793 793 return nd->intent.open.file; 794 794 out_err: 795 795 release_open_intent(nd); 796 - nd->intent.open.file = (struct file *)dentry; 796 + nd->intent.open.file = ERR_CAST(dentry); 797 797 goto out; 798 798 } 799 799 EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
+3 -3
fs/proc/base.c
··· 673 673 p->m.private = p; 674 674 p->ns = ns; 675 675 p->root = root; 676 - p->event = ns->event; 676 + p->m.poll_event = ns->event; 677 677 678 678 return 0; 679 679 ··· 2167 2167 * /proc/pid/fd needs a special permission handler so that a process can still 2168 2168 * access /proc/self/fd after it has executed a setuid(). 2169 2169 */ 2170 - static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) 2170 + static int proc_fd_permission(struct inode *inode, int mask) 2171 2171 { 2172 - int rv = generic_permission(inode, mask, flags, NULL); 2172 + int rv = generic_permission(inode, mask); 2173 2173 if (rv == 0) 2174 2174 return 0; 2175 2175 if (task_pid(current) == proc_pid(inode))
+2 -2
fs/proc/proc_sysctl.c
··· 294 294 return ret; 295 295 } 296 296 297 - static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags) 297 + static int proc_sys_permission(struct inode *inode, int mask) 298 298 { 299 299 /* 300 300 * sysctl entries that are not writeable, ··· 316 316 if (!table) /* global root - r-xr-xr-x */ 317 317 error = mask & MAY_WRITE ? -EACCES : 0; 318 318 else /* Use the permissions on the sysctl table entry */ 319 - error = sysctl_perm(head->root, table, mask); 319 + error = sysctl_perm(head->root, table, mask & ~MAY_NOT_BLOCK); 320 320 321 321 sysctl_head_finish(head); 322 322 return error;
+41 -3
fs/read_write.c
··· 64 64 return file->f_pos; 65 65 offset += file->f_pos; 66 66 break; 67 + case SEEK_DATA: 68 + /* 69 + * In the generic case the entire file is data, so as long as 70 + * offset isn't at the end of the file then the offset is data. 71 + */ 72 + if (offset >= inode->i_size) 73 + return -ENXIO; 74 + break; 75 + case SEEK_HOLE: 76 + /* 77 + * There is a virtual hole at the end of the file, so as long as 78 + * offset isn't i_size or larger, return i_size. 79 + */ 80 + if (offset >= inode->i_size) 81 + return -ENXIO; 82 + offset = inode->i_size; 83 + break; 67 84 } 68 85 69 86 if (offset < 0 && !unsigned_offsets(file)) ··· 145 128 146 129 loff_t default_llseek(struct file *file, loff_t offset, int origin) 147 130 { 131 + struct inode *inode = file->f_path.dentry->d_inode; 148 132 loff_t retval; 149 133 150 - mutex_lock(&file->f_dentry->d_inode->i_mutex); 134 + mutex_lock(&inode->i_mutex); 151 135 switch (origin) { 152 136 case SEEK_END: 153 - offset += i_size_read(file->f_path.dentry->d_inode); 137 + offset += i_size_read(inode); 154 138 break; 155 139 case SEEK_CUR: 156 140 if (offset == 0) { ··· 159 141 goto out; 160 142 } 161 143 offset += file->f_pos; 144 + break; 145 + case SEEK_DATA: 146 + /* 147 + * In the generic case the entire file is data, so as 148 + * long as offset isn't at the end of the file then the 149 + * offset is data. 150 + */ 151 + if (offset >= inode->i_size) 152 + return -ENXIO; 153 + break; 154 + case SEEK_HOLE: 155 + /* 156 + * There is a virtual hole at the end of the file, so 157 + * as long as offset isn't i_size or larger, return 158 + * i_size. 159 + */ 160 + if (offset >= inode->i_size) 161 + return -ENXIO; 162 + offset = inode->i_size; 163 + break; 162 164 } 163 165 retval = -EINVAL; 164 166 if (offset >= 0 || unsigned_offsets(file)) { ··· 189 151 retval = offset; 190 152 } 191 153 out: 192 - mutex_unlock(&file->f_dentry->d_inode->i_mutex); 154 + mutex_unlock(&inode->i_mutex); 193 155 return retval; 194 156 } 195 157 EXPORT_SYMBOL(default_llseek);
+11 -2
fs/reiserfs/dir.c
··· 14 14 extern const struct reiserfs_key MIN_KEY; 15 15 16 16 static int reiserfs_readdir(struct file *, void *, filldir_t); 17 - static int reiserfs_dir_fsync(struct file *filp, int datasync); 17 + static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, 18 + int datasync); 18 19 19 20 const struct file_operations reiserfs_dir_operations = { 20 21 .llseek = generic_file_llseek, ··· 28 27 #endif 29 28 }; 30 29 31 - static int reiserfs_dir_fsync(struct file *filp, int datasync) 30 + static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, 31 + int datasync) 32 32 { 33 33 struct inode *inode = filp->f_mapping->host; 34 34 int err; 35 + 36 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 37 + if (err) 38 + return err; 39 + 40 + mutex_lock(&inode->i_mutex); 35 41 reiserfs_write_lock(inode->i_sb); 36 42 err = reiserfs_commit_for_inode(inode); 37 43 reiserfs_write_unlock(inode->i_sb); 44 + mutex_unlock(&inode->i_mutex); 38 45 if (err < 0) 39 46 return err; 40 47 return 0;
+9 -1
fs/reiserfs/file.c
··· 140 140 * be removed... 141 141 */ 142 142 143 - static int reiserfs_sync_file(struct file *filp, int datasync) 143 + static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, 144 + int datasync) 144 145 { 145 146 struct inode *inode = filp->f_mapping->host; 146 147 int err; 147 148 int barrier_done; 148 149 150 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 151 + if (err) 152 + return err; 153 + 154 + mutex_lock(&inode->i_mutex); 149 155 BUG_ON(!S_ISREG(inode->i_mode)); 150 156 err = sync_mapping_buffers(inode->i_mapping); 151 157 reiserfs_write_lock(inode->i_sb); ··· 159 153 reiserfs_write_unlock(inode->i_sb); 160 154 if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) 161 155 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 156 + mutex_unlock(&inode->i_mutex); 162 157 if (barrier_done < 0) 163 158 return barrier_done; 164 159 return (err < 0) ? -EIO : 0; ··· 319 312 .listxattr = reiserfs_listxattr, 320 313 .removexattr = reiserfs_removexattr, 321 314 .permission = reiserfs_permission, 315 + .check_acl = reiserfs_check_acl, 322 316 };
+5 -3
fs/reiserfs/inode.c
··· 3068 3068 struct inode *inode = file->f_mapping->host; 3069 3069 ssize_t ret; 3070 3070 3071 - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3072 - offset, nr_segs, 3073 - reiserfs_get_blocks_direct_io, NULL); 3071 + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, 3072 + reiserfs_get_blocks_direct_io); 3074 3073 3075 3074 /* 3076 3075 * In case of error extending write may have instantiated a few ··· 3113 3114 error = -EFBIG; 3114 3115 goto out; 3115 3116 } 3117 + 3118 + inode_dio_wait(inode); 3119 + 3116 3120 /* fill in hole pointers in the expanding truncate case. */ 3117 3121 if (attr->ia_size > inode->i_size) { 3118 3122 error = generic_cont_expand_simple(inode, attr->ia_size);
+3 -1
fs/reiserfs/namei.c
··· 1529 1529 .listxattr = reiserfs_listxattr, 1530 1530 .removexattr = reiserfs_removexattr, 1531 1531 .permission = reiserfs_permission, 1532 + .check_acl = reiserfs_check_acl, 1532 1533 }; 1533 1534 1534 1535 /* ··· 1546 1545 .listxattr = reiserfs_listxattr, 1547 1546 .removexattr = reiserfs_removexattr, 1548 1547 .permission = reiserfs_permission, 1548 + .check_acl = reiserfs_check_acl, 1549 1549 1550 1550 }; 1551 1551 ··· 1560 1558 .listxattr = reiserfs_listxattr, 1561 1559 .removexattr = reiserfs_removexattr, 1562 1560 .permission = reiserfs_permission, 1563 - 1561 + .check_acl = reiserfs_check_acl, 1564 1562 };
+1
fs/reiserfs/super.c
··· 1643 1643 /* Set default values for options: non-aggressive tails, RO on errors */ 1644 1644 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); 1645 1645 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); 1646 + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); 1646 1647 /* no preallocation minimum, be smart in 1647 1648 reiserfs_file_write instead */ 1648 1649 REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+11 -14
fs/reiserfs/xattr.c
··· 555 555 556 556 reiserfs_write_unlock(inode->i_sb); 557 557 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR); 558 - down_write(&dentry->d_inode->i_alloc_sem); 558 + inode_dio_wait(dentry->d_inode); 559 559 reiserfs_write_lock(inode->i_sb); 560 560 561 561 err = reiserfs_setattr(dentry, &newattrs); 562 - up_write(&dentry->d_inode->i_alloc_sem); 563 562 mutex_unlock(&dentry->d_inode->i_mutex); 564 563 } else 565 564 update_ctime(inode); ··· 867 868 return err; 868 869 } 869 870 870 - static int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags) 871 + int reiserfs_check_acl(struct inode *inode, int mask) 871 872 { 872 873 struct posix_acl *acl; 873 874 int error = -EAGAIN; /* do regular unix permission checks by default */ 874 875 875 - if (flags & IPERM_FLAG_RCU) 876 + /* 877 + * Stat data v1 doesn't support ACLs. 878 + */ 879 + if (get_inode_sd_version(inode) == STAT_DATA_V1) 880 + return -EAGAIN; 881 + 882 + if (mask & MAY_NOT_BLOCK) 876 883 return -ECHILD; 877 884 878 885 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS); ··· 957 952 return 0; 958 953 } 959 954 960 - int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) 955 + int reiserfs_permission(struct inode *inode, int mask) 961 956 { 962 957 /* 963 958 * We don't do permission checks on the internal objects. ··· 966 961 if (IS_PRIVATE(inode)) 967 962 return 0; 968 963 969 - #ifdef CONFIG_REISERFS_FS_XATTR 970 - /* 971 - * Stat data v1 doesn't support ACLs. 972 - */ 973 - if (get_inode_sd_version(inode) != STAT_DATA_V1) 974 - return generic_permission(inode, mask, flags, 975 - reiserfs_check_acl); 976 - #endif 977 - return generic_permission(inode, mask, flags, NULL); 964 + return generic_permission(inode, mask); 978 965 } 979 966 980 967 static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
+1 -9
fs/squashfs/namei.c
··· 220 220 blk, off, ino_num); 221 221 222 222 inode = squashfs_iget(dir->i_sb, ino, ino_num); 223 - if (IS_ERR(inode)) { 224 - err = PTR_ERR(inode); 225 - goto failed; 226 - } 227 - 228 223 goto exit_lookup; 229 224 } 230 225 } ··· 227 232 228 233 exit_lookup: 229 234 kfree(dire); 230 - if (inode) 231 - return d_splice_alias(inode, dentry); 232 - d_add(dentry, inode); 233 - return ERR_PTR(0); 235 + return d_splice_alias(inode, dentry); 234 236 235 237 data_error: 236 238 err = -EIO;
+167 -11
fs/super.c
··· 38 38 LIST_HEAD(super_blocks); 39 39 DEFINE_SPINLOCK(sb_lock); 40 40 41 + /* 42 + * One thing we have to be careful of with a per-sb shrinker is that we don't 43 + * drop the last active reference to the superblock from within the shrinker. 44 + * If that happens we could trigger unregistering the shrinker from within the 45 + * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we 46 + * take a passive reference to the superblock to avoid this from occurring. 47 + */ 48 + static int prune_super(struct shrinker *shrink, struct shrink_control *sc) 49 + { 50 + struct super_block *sb; 51 + int fs_objects = 0; 52 + int total_objects; 53 + 54 + sb = container_of(shrink, struct super_block, s_shrink); 55 + 56 + /* 57 + * Deadlock avoidance. We may hold various FS locks, and we don't want 58 + * to recurse into the FS that called us in clear_inode() and friends.. 59 + */ 60 + if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) 61 + return -1; 62 + 63 + if (!grab_super_passive(sb)) 64 + return -1; 65 + 66 + if (sb->s_op && sb->s_op->nr_cached_objects) 67 + fs_objects = sb->s_op->nr_cached_objects(sb); 68 + 69 + total_objects = sb->s_nr_dentry_unused + 70 + sb->s_nr_inodes_unused + fs_objects + 1; 71 + 72 + if (sc->nr_to_scan) { 73 + int dentries; 74 + int inodes; 75 + 76 + /* proportion the scan between the caches */ 77 + dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / 78 + total_objects; 79 + inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / 80 + total_objects; 81 + if (fs_objects) 82 + fs_objects = (sc->nr_to_scan * fs_objects) / 83 + total_objects; 84 + /* 85 + * prune the dcache first as the icache is pinned by it, then 86 + * prune the icache, followed by the filesystem specific caches 87 + */ 88 + prune_dcache_sb(sb, dentries); 89 + prune_icache_sb(sb, inodes); 90 + 91 + if (fs_objects && sb->s_op->free_cached_objects) { 92 + sb->s_op->free_cached_objects(sb, fs_objects); 93 + fs_objects = sb->s_op->nr_cached_objects(sb); 94 + } 95 + total_objects = sb->s_nr_dentry_unused + 96 + sb->s_nr_inodes_unused + fs_objects; 97 + } 98 + 99 + total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; 100 + drop_super(sb); 101 + return total_objects; 102 + } 103 + 41 104 /** 42 105 * alloc_super - create new superblock 43 106 * @type: filesystem type superblock should belong to ··· 140 77 INIT_HLIST_BL_HEAD(&s->s_anon); 141 78 INIT_LIST_HEAD(&s->s_inodes); 142 79 INIT_LIST_HEAD(&s->s_dentry_lru); 80 + INIT_LIST_HEAD(&s->s_inode_lru); 81 + spin_lock_init(&s->s_inode_lru_lock); 143 82 init_rwsem(&s->s_umount); 144 83 mutex_init(&s->s_lock); 145 84 lockdep_set_class(&s->s_umount, &type->s_umount_key); ··· 179 114 s->s_op = &default_op; 180 115 s->s_time_gran = 1000000000; 181 116 s->cleancache_poolid = -1; 117 + 118 + s->s_shrink.seeks = DEFAULT_SEEKS; 119 + s->s_shrink.shrink = prune_super; 120 + s->s_shrink.batch = 1024; 182 121 } 183 122 out: 184 123 return s; ··· 250 181 if (atomic_dec_and_test(&s->s_active)) { 251 182 cleancache_flush_fs(s); 252 183 fs->kill_sb(s); 184 + 185 + /* caches are now gone, we can safely kill the shrinker now */ 186 + unregister_shrinker(&s->s_shrink); 187 + 253 188 /* 254 189 * We need to call rcu_barrier so all the delayed rcu free 255 190 * inodes are flushed before we release the fs module. ··· 314 241 } 315 242 316 243 /* 244 + * grab_super_passive - acquire a passive reference 245 + * @s: reference we are trying to grab 246 + * 247 + * Tries to acquire a passive reference. This is used in places where we 248 + * cannot take an active reference but we need to ensure that the 249 + * superblock does not go away while we are working on it. It returns 250 + * false if a reference was not gained, and returns true with the s_umount 251 + * lock held in read mode if a reference is gained. On successful return, 252 + * the caller must drop the s_umount lock and the passive reference when 253 + * done. 254 + */ 255 + bool grab_super_passive(struct super_block *sb) 256 + { 257 + spin_lock(&sb_lock); 258 + if (list_empty(&sb->s_instances)) { 259 + spin_unlock(&sb_lock); 260 + return false; 261 + } 262 + 263 + sb->s_count++; 264 + spin_unlock(&sb_lock); 265 + 266 + if (down_read_trylock(&sb->s_umount)) { 267 + if (sb->s_root) 268 + return true; 269 + up_read(&sb->s_umount); 270 + } 271 + 272 + put_super(sb); 273 + return false; 274 + } 275 + 276 + /* 317 277 * Superblock locking. We really ought to get rid of these two. 318 278 */ 319 279 void lock_super(struct super_block * sb) ··· 381 275 void generic_shutdown_super(struct super_block *sb) 382 276 { 383 277 const struct super_operations *sop = sb->s_op; 384 - 385 278 386 279 if (sb->s_root) { 387 280 shrink_dcache_for_umount(sb); ··· 469 364 list_add(&s->s_instances, &type->fs_supers); 470 365 spin_unlock(&sb_lock); 471 366 get_filesystem(type); 367 + register_shrinker(&s->s_shrink); 472 368 return s; 473 369 } 474 370 ··· 556 450 __put_super(p); 557 451 spin_unlock(&sb_lock); 558 452 } 453 + 454 + /** 455 + * iterate_supers_type - call function for superblocks of given type 456 + * @type: fs type 457 + * @f: function to call 458 + * @arg: argument to pass to it 459 + * 460 + * Scans the superblock list and calls given function, passing it 461 + * locked superblock and given argument. 462 + */ 463 + void iterate_supers_type(struct file_system_type *type, 464 + void (*f)(struct super_block *, void *), void *arg) 465 + { 466 + struct super_block *sb, *p = NULL; 467 + 468 + spin_lock(&sb_lock); 469 + list_for_each_entry(sb, &type->fs_supers, s_instances) { 470 + sb->s_count++; 471 + spin_unlock(&sb_lock); 472 + 473 + down_read(&sb->s_umount); 474 + if (sb->s_root) 475 + f(sb, arg); 476 + up_read(&sb->s_umount); 477 + 478 + spin_lock(&sb_lock); 479 + if (p) 480 + __put_super(p); 481 + p = sb; 482 + } 483 + if (p) 484 + __put_super(p); 485 + spin_unlock(&sb_lock); 486 + } 487 + 488 + EXPORT_SYMBOL(iterate_supers_type); 559 489 560 490 /** 561 491 * get_super - get the superblock of a device ··· 799 657 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ 800 658 static int unnamed_dev_start = 0; /* don't bother trying below it */ 801 659 802 - int set_anon_super(struct super_block *s, void *data) 660 + int get_anon_bdev(dev_t *p) 803 661 { 804 662 int dev; 805 663 int error; ··· 826 684 spin_unlock(&unnamed_dev_lock); 827 685 return -EMFILE; 828 686 } 829 - s->s_dev = MKDEV(0, dev & MINORMASK); 830 - s->s_bdi = &noop_backing_dev_info; 687 + *p = MKDEV(0, dev & MINORMASK); 831 688 return 0; 689 + } 690 + EXPORT_SYMBOL(get_anon_bdev); 691 + 692 + void free_anon_bdev(dev_t dev) 693 + { 694 + int slot = MINOR(dev); 695 + spin_lock(&unnamed_dev_lock); 696 + ida_remove(&unnamed_dev_ida, slot); 697 + if (slot < unnamed_dev_start) 698 + unnamed_dev_start = slot; 699 + spin_unlock(&unnamed_dev_lock); 700 + } 701 + EXPORT_SYMBOL(free_anon_bdev); 702 + 703 + int set_anon_super(struct super_block *s, void *data) 704 + { 705 + int error = get_anon_bdev(&s->s_dev); 706 + if (!error) 707 + s->s_bdi = &noop_backing_dev_info; 708 + return error; 832 709 } 833 710 834 711 EXPORT_SYMBOL(set_anon_super); 835 712 836 713 void kill_anon_super(struct super_block *sb) 837 714 { 838 - int slot = MINOR(sb->s_dev); 839 - 715 + dev_t dev = sb->s_dev; 840 716 generic_shutdown_super(sb); 841 - spin_lock(&unnamed_dev_lock); 842 - ida_remove(&unnamed_dev_ida, slot); 843 - if (slot < unnamed_dev_start) 844 - unnamed_dev_start = slot; 845 - spin_unlock(&unnamed_dev_lock); 717 + free_anon_bdev(dev); 846 718 } 847 719 848 720 EXPORT_SYMBOL(kill_anon_super);
+3 -22
fs/sync.c
··· 165 165 */ 166 166 int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) 167 167 { 168 - struct address_space *mapping = file->f_mapping; 169 - int err, ret; 170 - 171 - if (!file->f_op || !file->f_op->fsync) { 172 - ret = -EINVAL; 173 - goto out; 174 - } 175 - 176 - ret = filemap_write_and_wait_range(mapping, start, end); 177 - 178 - /* 179 - * We need to protect against concurrent writers, which could cause 180 - * livelocks in fsync_buffers_list(). 181 - */ 182 - mutex_lock(&mapping->host->i_mutex); 183 - err = file->f_op->fsync(file, datasync); 184 - if (!ret) 185 - ret = err; 186 - mutex_unlock(&mapping->host->i_mutex); 187 - 188 - out: 189 - return ret; 168 + if (!file->f_op || !file->f_op->fsync) 169 + return -EINVAL; 170 + return file->f_op->fsync(file, start, end, datasync); 190 171 } 191 172 EXPORT_SYMBOL(vfs_fsync_range); 192 173
+3 -3
fs/sysfs/inode.c
··· 349 349 return -ENOENT; 350 350 } 351 351 352 - int sysfs_permission(struct inode *inode, int mask, unsigned int flags) 352 + int sysfs_permission(struct inode *inode, int mask) 353 353 { 354 354 struct sysfs_dirent *sd; 355 355 356 - if (flags & IPERM_FLAG_RCU) 356 + if (mask & MAY_NOT_BLOCK) 357 357 return -ECHILD; 358 358 359 359 sd = inode->i_private; ··· 362 362 sysfs_refresh_inode(sd, inode); 363 363 mutex_unlock(&sysfs_mutex); 364 364 365 - return generic_permission(inode, mask, flags, NULL); 365 + return generic_permission(inode, mask); 366 366 }
+1 -1
fs/sysfs/sysfs.h
··· 201 201 struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd); 202 202 void sysfs_evict_inode(struct inode *inode); 203 203 int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr); 204 - int sysfs_permission(struct inode *inode, int mask, unsigned int flags); 204 + int sysfs_permission(struct inode *inode, int mask); 205 205 int sysfs_setattr(struct dentry *dentry, struct iattr *iattr); 206 206 int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); 207 207 int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+11 -10
fs/ubifs/file.c
··· 1304 1304 return NULL; 1305 1305 } 1306 1306 1307 - int ubifs_fsync(struct file *file, int datasync) 1307 + int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1308 1308 { 1309 1309 struct inode *inode = file->f_mapping->host; 1310 1310 struct ubifs_info *c = inode->i_sb->s_fs_info; ··· 1319 1319 */ 1320 1320 return 0; 1321 1321 1322 - /* 1323 - * VFS has already synchronized dirty pages for this inode. Synchronize 1324 - * the inode unless this is a 'datasync()' call. 1325 - */ 1322 + err = filemap_write_and_wait_range(inode->i_mapping, start, end); 1323 + if (err) 1324 + return err; 1325 + mutex_lock(&inode->i_mutex); 1326 + 1327 + /* Synchronize the inode unless this is a 'datasync()' call. */ 1326 1328 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { 1327 1329 err = inode->i_sb->s_op->write_inode(inode, NULL); 1328 1330 if (err) 1329 - return err; 1331 + goto out; 1330 1332 } 1331 1333 1332 1334 /* ··· 1336 1334 * them. 1337 1335 */ 1338 1336 err = ubifs_sync_wbufs_by_inode(c, inode); 1339 - if (err) 1340 - return err; 1341 - 1342 - return 0; 1337 + out: 1338 + mutex_unlock(&inode->i_mutex); 1339 + return err; 1343 1340 } 1344 1341 1345 1342 /**
+1 -1
fs/ubifs/ubifs.h
··· 1729 1729 int ubifs_calc_dark(const struct ubifs_info *c, int spc); 1730 1730 1731 1731 /* file.c */ 1732 - int ubifs_fsync(struct file *file, int datasync); 1732 + int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync); 1733 1733 int ubifs_setattr(struct dentry *dentry, struct iattr *attr); 1734 1734 1735 1735 /* dir.c */
+1 -1
fs/udf/file.c
··· 150 150 long old_block, new_block; 151 151 int result = -EINVAL; 152 152 153 - if (file_permission(filp, MAY_READ) != 0) { 153 + if (inode_permission(inode, MAY_READ) != 0) { 154 154 udf_debug("no permission to access inode %lu\n", inode->i_ino); 155 155 result = -EPERM; 156 156 goto out;
-2
fs/ufs/namei.c
··· 59 59 if (ino) 60 60 inode = ufs_iget(dir->i_sb, ino); 61 61 unlock_ufs(dir->i_sb); 62 - if (IS_ERR(inode)) 63 - return ERR_CAST(inode); 64 62 return d_splice_alias(inode, dentry); 65 63 } 66 64
+2 -2
fs/xfs/linux-2.6/xfs_acl.c
··· 219 219 } 220 220 221 221 int 222 - xfs_check_acl(struct inode *inode, int mask, unsigned int flags) 222 + xfs_check_acl(struct inode *inode, int mask) 223 223 { 224 224 struct xfs_inode *ip; 225 225 struct posix_acl *acl; ··· 235 235 if (!XFS_IFORK_Q(ip)) 236 236 return -EAGAIN; 237 237 238 - if (flags & IPERM_FLAG_RCU) { 238 + if (mask & MAY_NOT_BLOCK) { 239 239 if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) 240 240 return -ECHILD; 241 241 return -EAGAIN;
+3
fs/xfs/linux-2.6/xfs_aops.c
··· 1329 1329 } else { 1330 1330 xfs_finish_ioend_sync(ioend); 1331 1331 } 1332 + 1333 + /* XXX: probably should move into the real I/O completion handler */ 1334 + inode_dio_done(ioend->io_inode); 1332 1335 } 1333 1336 1334 1337 STATIC ssize_t
+8 -9
fs/xfs/linux-2.6/xfs_file.c
··· 127 127 STATIC int 128 128 xfs_file_fsync( 129 129 struct file *file, 130 + loff_t start, 131 + loff_t end, 130 132 int datasync) 131 133 { 132 134 struct inode *inode = file->f_mapping->host; ··· 139 137 int log_flushed = 0; 140 138 141 139 trace_xfs_file_fsync(ip); 140 + 141 + error = filemap_write_and_wait_range(inode->i_mapping, start, end); 142 + if (error) 143 + return error; 142 144 143 145 if (XFS_FORCED_SHUTDOWN(mp)) 144 146 return -XFS_ERROR(EIO); ··· 881 875 /* Handle various SYNC-type writes */ 882 876 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 883 877 loff_t end = pos + ret - 1; 884 - int error, error2; 885 878 886 879 xfs_rw_iunlock(ip, iolock); 887 - error = filemap_write_and_wait_range(mapping, pos, end); 880 + ret = -xfs_file_fsync(file, pos, end, 881 + (file->f_flags & __O_SYNC) ? 0 : 1); 888 882 xfs_rw_ilock(ip, iolock); 889 - 890 - error2 = -xfs_file_fsync(file, 891 - (file->f_flags & __O_SYNC) ? 0 : 1); 892 - if (error) 893 - ret = error; 894 - else if (error2) 895 - ret = error2; 896 883 } 897 884 898 885 out_unlock:
+17 -10
fs/xfs/linux-2.6/xfs_super.c
··· 1024 1024 { 1025 1025 struct xfs_mount *mp = XFS_M(sb); 1026 1026 1027 - /* 1028 - * Unregister the memory shrinker before we tear down the mount 1029 - * structure so we don't have memory reclaim racing with us here. 1030 - */ 1031 - xfs_inode_shrinker_unregister(mp); 1032 1027 xfs_syncd_stop(mp); 1033 1028 1034 1029 /* ··· 1406 1411 sb->s_time_gran = 1; 1407 1412 set_posix_acl_flag(sb); 1408 1413 1409 - xfs_inode_shrinker_register(mp); 1410 - 1411 1414 error = xfs_mountfs(mp); 1412 1415 if (error) 1413 1416 goto out_filestream_unmount; ··· 1432 1439 return 0; 1433 1440 1434 1441 out_filestream_unmount: 1435 - xfs_inode_shrinker_unregister(mp); 1436 1442 xfs_filestream_unmount(mp); 1437 1443 out_free_sb: 1438 1444 xfs_freesb(mp); ··· 1450 1458 out_syncd_stop: 1451 1459 xfs_syncd_stop(mp); 1452 1460 out_unmount: 1453 - xfs_inode_shrinker_unregister(mp); 1454 - 1455 1461 /* 1456 1462 * Blow away any referenced inode in the filestreams cache. 1457 1463 * This can and will cause log traffic as inodes go inactive ··· 1473 1483 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); 1474 1484 } 1475 1485 1486 + static int 1487 + xfs_fs_nr_cached_objects( 1488 + struct super_block *sb) 1489 + { 1490 + return xfs_reclaim_inodes_count(XFS_M(sb)); 1491 + } 1492 + 1493 + static void 1494 + xfs_fs_free_cached_objects( 1495 + struct super_block *sb, 1496 + int nr_to_scan) 1497 + { 1498 + xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); 1499 + } 1500 + 1476 1501 static const struct super_operations xfs_super_operations = { 1477 1502 .alloc_inode = xfs_fs_alloc_inode, 1478 1503 .destroy_inode = xfs_fs_destroy_inode, ··· 1501 1496 .statfs = xfs_fs_statfs, 1502 1497 .remount_fs = xfs_fs_remount, 1503 1498 .show_options = xfs_fs_show_options, 1499 + .nr_cached_objects = xfs_fs_nr_cached_objects, 1500 + .free_cached_objects = xfs_fs_free_cached_objects, 1504 1501 }; 1505 1502 1506 1503 static struct file_system_type xfs_fs_type = {
+27 -44
fs/xfs/linux-2.6/xfs_sync.c
··· 179 179 if (error == EFSCORRUPTED) 180 180 break; 181 181 182 + cond_resched(); 183 + 182 184 } while (nr_found && !done); 183 185 184 186 if (skipped) { ··· 986 984 987 985 *nr_to_scan -= XFS_LOOKUP_BATCH; 988 986 987 + cond_resched(); 988 + 989 989 } while (nr_found && !done && *nr_to_scan > 0); 990 990 991 991 if (trylock && !done) ··· 1005 1001 * ensure that when we get more reclaimers than AGs we block rather 1006 1002 * than spin trying to execute reclaim. 1007 1003 */ 1008 - if (trylock && skipped && *nr_to_scan > 0) { 1004 + if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { 1009 1005 trylock = 0; 1010 1006 goto restart; 1011 1007 } ··· 1023 1019 } 1024 1020 1025 1021 /* 1026 - * Inode cache shrinker. 1022 + * Scan a certain number of inodes for reclaim. 1027 1023 * 1028 1024 * When called we make sure that there is a background (fast) inode reclaim in 1029 - * progress, while we will throttle the speed of reclaim via doiing synchronous 1025 + * progress, while we will throttle the speed of reclaim via doing synchronous 1030 1026 * reclaim of inodes. That means if we come across dirty inodes, we wait for 1031 1027 * them to be cleaned, which we hope will not be very long due to the 1032 1028 * background walker having already kicked the IO off on those dirty inodes. 1033 1029 */ 1034 - static int 1035 - xfs_reclaim_inode_shrink( 1036 - struct shrinker *shrink, 1037 - struct shrink_control *sc) 1030 + void 1031 + xfs_reclaim_inodes_nr( 1032 + struct xfs_mount *mp, 1033 + int nr_to_scan) 1038 1034 { 1039 - struct xfs_mount *mp; 1040 - struct xfs_perag *pag; 1041 - xfs_agnumber_t ag; 1042 - int reclaimable; 1043 - int nr_to_scan = sc->nr_to_scan; 1044 - gfp_t gfp_mask = sc->gfp_mask; 1035 + /* kick background reclaimer and push the AIL */ 1036 + xfs_syncd_queue_reclaim(mp); 1037 + xfs_ail_push_all(mp->m_ail); 1045 1038 1046 - mp = container_of(shrink, struct xfs_mount, m_inode_shrink); 1047 - if (nr_to_scan) { 1048 - /* kick background reclaimer and push the AIL */ 1049 - xfs_syncd_queue_reclaim(mp); 1050 - xfs_ail_push_all(mp->m_ail); 1039 + xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); 1040 + } 1051 1041 1052 - if (!(gfp_mask & __GFP_FS)) 1053 - return -1; 1042 + /* 1043 + * Return the number of reclaimable inodes in the filesystem for 1044 + * the shrinker to determine how much to reclaim. 1045 + */ 1046 + int 1047 + xfs_reclaim_inodes_count( 1048 + struct xfs_mount *mp) 1049 + { 1050 + struct xfs_perag *pag; 1051 + xfs_agnumber_t ag = 0; 1052 + int reclaimable = 0; 1054 1053 1055 - xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, 1056 - &nr_to_scan); 1057 - /* terminate if we don't exhaust the scan */ 1058 - if (nr_to_scan > 0) 1059 - return -1; 1060 - } 1061 - 1062 - reclaimable = 0; 1063 - ag = 0; 1064 1054 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { 1065 1055 ag = pag->pag_agno + 1; 1066 1056 reclaimable += pag->pag_ici_reclaimable; ··· 1063 1065 return reclaimable; 1064 1066 } 1065 1067 1066 - void 1067 - xfs_inode_shrinker_register( 1068 - struct xfs_mount *mp) 1069 - { 1070 - mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; 1071 - mp->m_inode_shrink.seeks = DEFAULT_SEEKS; 1072 - register_shrinker(&mp->m_inode_shrink); 1073 - } 1074 - 1075 - void 1076 - xfs_inode_shrinker_unregister( 1077 - struct xfs_mount *mp) 1078 - { 1079 - unregister_shrinker(&mp->m_inode_shrink); 1080 - }
+2 -3
fs/xfs/linux-2.6/xfs_sync.h
··· 35 35 void xfs_flush_inodes(struct xfs_inode *ip); 36 36 37 37 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); 38 + int xfs_reclaim_inodes_count(struct xfs_mount *mp); 39 + void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); 38 40 39 41 void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); 40 42 void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); ··· 47 45 int xfs_inode_ag_iterator(struct xfs_mount *mp, 48 46 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 49 47 int flags); 50 - 51 - void xfs_inode_shrinker_register(struct xfs_mount *mp); 52 - void xfs_inode_shrinker_unregister(struct xfs_mount *mp); 53 48 54 49 #endif
+1 -1
fs/xfs/xfs_acl.h
··· 42 42 #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) 43 43 44 44 #ifdef CONFIG_XFS_POSIX_ACL 45 - extern int xfs_check_acl(struct inode *inode, int mask, unsigned int flags); 45 + extern int xfs_check_acl(struct inode *inode, int mask); 46 46 extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); 47 47 extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl); 48 48 extern int xfs_acl_chmod(struct inode *inode);
+2
include/linux/anon_inodes.h
··· 8 8 #ifndef _LINUX_ANON_INODES_H 9 9 #define _LINUX_ANON_INODES_H 10 10 11 + struct file_operations; 12 + 11 13 struct file *anon_inode_getfile(const char *name, 12 14 const struct file_operations *fops, 13 15 void *priv, int flags);
+26
include/linux/atomic.h
··· 34 34 } 35 35 #endif 36 36 37 + #ifndef atomic_inc_unless_negative 38 + static inline int atomic_inc_unless_negative(atomic_t *p) 39 + { 40 + int v, v1; 41 + for (v = 0; v >= 0; v = v1) { 42 + v1 = atomic_cmpxchg(p, v, v + 1); 43 + if (likely(v1 == v)) 44 + return 1; 45 + } 46 + return 0; 47 + } 48 + #endif 49 + 50 + #ifndef atomic_dec_unless_positive 51 + static inline int atomic_dec_unless_positive(atomic_t *p) 52 + { 53 + int v, v1; 54 + for (v = 0; v <= 0; v = v1) { 55 + v1 = atomic_cmpxchg(p, v, v - 1); 56 + if (likely(v1 == v)) 57 + return 1; 58 + } 59 + return 0; 60 + } 61 + #endif 62 + 37 63 #ifndef CONFIG_ARCH_HAS_ATOMIC_OR 38 64 static inline void atomic_or(int i, atomic_t *v) 39 65 {
+1
include/linux/binfmts.h
··· 111 111 extern int search_binary_handler(struct linux_binprm *, struct pt_regs *); 112 112 extern int flush_old_exec(struct linux_binprm * bprm); 113 113 extern void setup_new_exec(struct linux_binprm * bprm); 114 + extern void would_dump(struct linux_binprm *, struct file *); 114 115 115 116 extern int suid_dumpable; 116 117 #define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+7 -1
include/linux/dcache.h
··· 216 216 #define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ 217 217 #define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ 218 218 #define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ 219 + #define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */ 219 220 #define DCACHE_MANAGED_DENTRY \ 220 221 (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) 221 222 ··· 417 416 return dentry->d_flags & DCACHE_MOUNTED; 418 417 } 419 418 420 - extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); 419 + static inline bool d_need_lookup(struct dentry *dentry) 420 + { 421 + return dentry->d_flags & DCACHE_NEED_LOOKUP; 422 + } 423 + 424 + extern void d_clear_need_lookup(struct dentry *dentry); 421 425 422 426 extern int sysctl_vfs_cache_pressure; 423 427
+1 -1
include/linux/ext3_fs.h
··· 877 877 extern void ext3_htree_free_dir_info(struct dir_private_info *p); 878 878 879 879 /* fsync.c */ 880 - extern int ext3_sync_file(struct file *, int); 880 + extern int ext3_sync_file(struct file *, loff_t, loff_t, int); 881 881 882 882 /* hash.c */ 883 883 extern int ext3fs_dirhash(const char *name, int len, struct
+2 -1
include/linux/fb.h
··· 1043 1043 struct inode *inode, 1044 1044 struct file *file); 1045 1045 extern void fb_deferred_io_cleanup(struct fb_info *info); 1046 - extern int fb_deferred_io_fsync(struct file *file, int datasync); 1046 + extern int fb_deferred_io_fsync(struct file *file, loff_t start, 1047 + loff_t end, int datasync); 1047 1048 1048 1049 static inline bool fb_be_math(struct fb_info *info) 1049 1050 {
+73 -27
include/linux/fs.h
··· 32 32 #define SEEK_SET 0 /* seek relative to beginning of file */ 33 33 #define SEEK_CUR 1 /* seek relative to current file position */ 34 34 #define SEEK_END 2 /* seek relative to end of file */ 35 - #define SEEK_MAX SEEK_END 35 + #define SEEK_DATA 3 /* seek to the next data */ 36 + #define SEEK_HOLE 4 /* seek to the next hole */ 37 + #define SEEK_MAX SEEK_HOLE 36 38 37 39 struct fstrim_range { 38 40 __u64 start; ··· 65 63 #define MAY_ACCESS 16 66 64 #define MAY_OPEN 32 67 65 #define MAY_CHDIR 64 66 + #define MAY_NOT_BLOCK 128 /* called from RCU mode, don't block */ 68 67 69 68 /* 70 69 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond ··· 395 392 #include <linux/semaphore.h> 396 393 #include <linux/fiemap.h> 397 394 #include <linux/rculist_bl.h> 395 + #include <linux/shrinker.h> 396 + #include <linux/atomic.h> 398 397 399 - #include <asm/atomic.h> 400 398 #include <asm/byteorder.h> 401 399 402 400 struct export_operations; ··· 781 777 struct timespec i_ctime; 782 778 blkcnt_t i_blocks; 783 779 unsigned short i_bytes; 784 - struct rw_semaphore i_alloc_sem; 780 + atomic_t i_dio_count; 785 781 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 786 782 struct file_lock *i_flock; 787 783 struct address_space *i_mapping; ··· 1400 1396 struct list_head s_dentry_lru; /* unused dentry lru */ 1401 1397 int s_nr_dentry_unused; /* # of dentry on lru */ 1402 1398 1399 + /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ 1400 + spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp; 1401 + struct list_head s_inode_lru; /* unused inode lru */ 1402 + int s_nr_inodes_unused; /* # of inodes on lru */ 1403 + 1403 1404 struct block_device *s_bdev; 1404 1405 struct backing_dev_info *s_bdi; 1405 1406 struct mtd_info *s_mtd; ··· 1447 1438 * Saved pool identifier for cleancache (-1 means none) 1448 1439 */ 1449 1440 int cleancache_poolid; 1441 + 1442 + struct shrinker s_shrink; /* per-sb shrinker handle */ 1450 1443 }; 1444 + 1445 + /* superblock cache pruning functions */ 1446 + extern void prune_icache_sb(struct super_block *sb, int nr_to_scan); 1447 + extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan); 1451 1448 1452 1449 extern struct timespec current_fs_time(struct super_block *sb); 1453 1450 ··· 1505 1490 /* 1506 1491 * VFS file helper functions. 1507 1492 */ 1508 - extern int file_permission(struct file *, int); 1509 1493 extern void inode_init_owner(struct inode *inode, const struct inode *dir, 1510 1494 mode_t mode); 1511 1495 /* ··· 1552 1538 #define HAVE_COMPAT_IOCTL 1 1553 1539 #define HAVE_UNLOCKED_IOCTL 1 1554 1540 1555 - /* 1556 - * NOTE: 1557 - * all file operations except setlease can be called without 1558 - * the big kernel lock held in all filesystems. 1559 - */ 1560 1541 struct file_operations { 1561 1542 struct module *owner; 1562 1543 loff_t (*llseek) (struct file *, loff_t, int); ··· 1567 1558 int (*open) (struct inode *, struct file *); 1568 1559 int (*flush) (struct file *, fl_owner_t id); 1569 1560 int (*release) (struct inode *, struct file *); 1570 - int (*fsync) (struct file *, int datasync); 1561 + int (*fsync) (struct file *, loff_t, loff_t, int datasync); 1571 1562 int (*aio_fsync) (struct kiocb *, int datasync); 1572 1563 int (*fasync) (int, struct file *, int); 1573 1564 int (*lock) (struct file *, int, struct file_lock *); ··· 1582 1573 loff_t len); 1583 1574 }; 1584 1575 1585 - #define IPERM_FLAG_RCU 0x0001 1586 - 1587 1576 struct inode_operations { 1588 1577 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); 1589 1578 void * (*follow_link) (struct dentry *, struct nameidata *); 1590 - int (*permission) (struct inode *, int, unsigned int); 1591 - int (*check_acl)(struct inode *, int, unsigned int); 1579 + int (*permission) (struct inode *, int); 1580 + int (*check_acl)(struct inode *, int); 1592 1581 1593 1582 int (*readlink) (struct dentry *, char __user *,int); 1594 1583 void (*put_link) (struct dentry *, struct nameidata *, void *); ··· 1652 1645 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 1653 1646 #endif 1654 1647 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 1648 + int (*nr_cached_objects)(struct super_block *); 1649 + void (*free_cached_objects)(struct super_block *, int); 1655 1650 }; 1656 1651 1657 1652 /* ··· 1702 1693 * set during data writeback, and cleared with a wakeup 1703 1694 * on the bit address once it is done. 1704 1695 * 1696 + * I_REFERENCED Marks the inode as recently references on the LRU list. 1697 + * 1698 + * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). 1699 + * 1705 1700 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1706 1701 */ 1707 1702 #define I_DIRTY_SYNC (1 << 0) ··· 1719 1706 #define __I_SYNC 7 1720 1707 #define I_SYNC (1 << __I_SYNC) 1721 1708 #define I_REFERENCED (1 << 8) 1709 + #define __I_DIO_WAKEUP 9 1710 + #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) 1722 1711 1723 1712 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1724 1713 ··· 1831 1816 struct lock_class_key i_lock_key; 1832 1817 struct lock_class_key i_mutex_key; 1833 1818 struct lock_class_key i_mutex_dir_key; 1834 - struct lock_class_key i_alloc_sem_key; 1835 1819 }; 1836 1820 1837 1821 extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, ··· 1851 1837 void deactivate_super(struct super_block *sb); 1852 1838 void deactivate_locked_super(struct super_block *sb); 1853 1839 int set_anon_super(struct super_block *s, void *data); 1840 + int get_anon_bdev(dev_t *); 1841 + void free_anon_bdev(dev_t); 1854 1842 struct super_block *sget(struct file_system_type *type, 1855 1843 int (*test)(struct super_block *,void *), 1856 1844 int (*set)(struct super_block *,void *), ··· 2204 2188 #endif 2205 2189 extern int notify_change(struct dentry *, struct iattr *); 2206 2190 extern int inode_permission(struct inode *, int); 2207 - extern int generic_permission(struct inode *, int, unsigned int, 2208 - int (*check_acl)(struct inode *, int, unsigned int)); 2191 + extern int generic_permission(struct inode *, int); 2209 2192 2210 2193 static inline bool execute_ok(struct inode *inode) 2211 2194 { 2212 2195 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); 2213 2196 } 2214 2197 2215 - extern int get_write_access(struct inode *); 2216 - extern int deny_write_access(struct file *); 2198 + /* 2199 + * get_write_access() gets write permission for a file. 2200 + * put_write_access() releases this write permission. 2201 + * This is used for regular files. 2202 + * We cannot support write (and maybe mmap read-write shared) accesses and 2203 + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode 2204 + * can have the following values: 2205 + * 0: no writers, no VM_DENYWRITE mappings 2206 + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist 2207 + * > 0: (i_writecount) users are writing to the file. 2208 + * 2209 + * Normally we operate on that counter with atomic_{inc,dec} and it's safe 2210 + * except for the cases where we don't hold i_writecount yet. Then we need to 2211 + * use {get,deny}_write_access() - these functions check the sign and refuse 2212 + * to do the change if sign is wrong. 2213 + */ 2214 + static inline int get_write_access(struct inode *inode) 2215 + { 2216 + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; 2217 + } 2218 + static inline int deny_write_access(struct file *file) 2219 + { 2220 + struct inode *inode = file->f_path.dentry->d_inode; 2221 + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; 2222 + } 2217 2223 static inline void put_write_access(struct inode * inode) 2218 2224 { 2219 2225 atomic_dec(&inode->i_writecount); ··· 2355 2317 /* fs/block_dev.c */ 2356 2318 extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, 2357 2319 unsigned long nr_segs, loff_t pos); 2358 - extern int blkdev_fsync(struct file *filp, int datasync); 2320 + extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 2321 + int datasync); 2359 2322 2360 2323 /* fs/splice.c */ 2361 2324 extern ssize_t generic_file_splice_read(struct file *, loff_t *, ··· 2407 2368 }; 2408 2369 2409 2370 void dio_end_io(struct bio *bio, int error); 2371 + void inode_dio_wait(struct inode *inode); 2372 + void inode_dio_done(struct inode *inode); 2410 2373 2411 2374 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 2412 2375 struct block_device *bdev, const struct iovec *iov, loff_t offset, ··· 2416 2375 dio_submit_t submit_io, int flags); 2417 2376 2418 2377 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 2419 - struct inode *inode, struct block_device *bdev, const struct iovec *iov, 2420 - loff_t offset, unsigned long nr_segs, get_block_t get_block, 2421 - dio_iodone_t end_io) 2378 + struct inode *inode, const struct iovec *iov, loff_t offset, 2379 + unsigned long nr_segs, get_block_t get_block) 2422 2380 { 2423 - return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2424 - nr_segs, get_block, end_io, NULL, 2381 + return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 2382 + offset, nr_segs, get_block, NULL, NULL, 2425 2383 DIO_LOCKING | DIO_SKIP_HOLES); 2384 + } 2385 + #else 2386 + static inline void inode_dio_wait(struct inode *inode) 2387 + { 2426 2388 } 2427 2389 #endif 2428 2390 ··· 2476 2432 extern struct super_block *user_get_super(dev_t); 2477 2433 extern void drop_super(struct super_block *sb); 2478 2434 extern void iterate_supers(void (*)(struct super_block *, void *), void *); 2435 + extern void iterate_supers_type(struct file_system_type *, 2436 + void (*)(struct super_block *, void *), void *); 2479 2437 2480 2438 extern int dcache_dir_open(struct inode *, struct file *); 2481 2439 extern int dcache_dir_close(struct inode *, struct file *); ··· 2490 2444 extern int simple_unlink(struct inode *, struct dentry *); 2491 2445 extern int simple_rmdir(struct inode *, struct dentry *); 2492 2446 extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); 2493 - extern int noop_fsync(struct file *, int); 2447 + extern int noop_fsync(struct file *, loff_t, loff_t, int); 2494 2448 extern int simple_empty(struct dentry *); 2495 2449 extern int simple_readpage(struct file *file, struct page *page); 2496 2450 extern int simple_write_begin(struct file *file, struct address_space *mapping, ··· 2515 2469 extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, 2516 2470 const void __user *from, size_t count); 2517 2471 2518 - extern int generic_file_fsync(struct file *, int); 2472 + extern int generic_file_fsync(struct file *, loff_t, loff_t, int); 2519 2473 2520 2474 extern int generic_check_addressable(unsigned, u64); 2521 2475
+1 -1
include/linux/generic_acl.h
··· 10 10 11 11 int generic_acl_init(struct inode *, struct inode *); 12 12 int generic_acl_chmod(struct inode *); 13 - int generic_check_acl(struct inode *inode, int mask, unsigned int flags); 13 + int generic_check_acl(struct inode *inode, int mask); 14 14 15 15 #endif /* LINUX_GENERIC_ACL_H */
+1 -38
include/linux/mm.h
··· 15 15 #include <linux/range.h> 16 16 #include <linux/pfn.h> 17 17 #include <linux/bit_spinlock.h> 18 + #include <linux/shrinker.h> 18 19 19 20 struct mempolicy; 20 21 struct anon_vma; ··· 1121 1120 { 1122 1121 } 1123 1122 #endif 1124 - 1125 - /* 1126 - * This struct is used to pass information from page reclaim to the shrinkers. 1127 - * We consolidate the values for easier extention later. 1128 - */ 1129 - struct shrink_control { 1130 - gfp_t gfp_mask; 1131 - 1132 - /* How many slab objects shrinker() should scan and try to reclaim */ 1133 - unsigned long nr_to_scan; 1134 - }; 1135 - 1136 - /* 1137 - * A callback you can register to apply pressure to ageable caches. 1138 - * 1139 - * 'sc' is passed shrink_control which includes a count 'nr_to_scan' 1140 - * and a 'gfpmask'. It should look through the least-recently-used 1141 - * 'nr_to_scan' entries and attempt to free them up. It should return 1142 - * the number of objects which remain in the cache. If it returns -1, it means 1143 - * it cannot do any scanning at this time (eg. there is a risk of deadlock). 1144 - * 1145 - * The 'gfpmask' refers to the allocation we are currently trying to 1146 - * fulfil. 1147 - * 1148 - * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is 1149 - * querying the cache size, so a fastpath for that case is appropriate. 1150 - */ 1151 - struct shrinker { 1152 - int (*shrink)(struct shrinker *, struct shrink_control *sc); 1153 - int seeks; /* seeks to recreate an obj */ 1154 - 1155 - /* These are for internal use */ 1156 - struct list_head list; 1157 - long nr; /* objs pending delete */ 1158 - }; 1159 - #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 1160 - extern void register_shrinker(struct shrinker *); 1161 - extern void unregister_shrinker(struct shrinker *); 1162 1123 1163 1124 int vma_wants_writenotify(struct vm_area_struct *vma); 1164 1125
-1
include/linux/mnt_namespace.h
··· 18 18 struct seq_file m; /* must be the first element */ 19 19 struct mnt_namespace *ns; 20 20 struct path root; 21 - int event; 22 21 }; 23 22 24 23 struct fs_struct;
+3 -2
include/linux/namei.h
··· 48 48 */ 49 49 #define LOOKUP_FOLLOW 0x0001 50 50 #define LOOKUP_DIRECTORY 0x0002 51 - #define LOOKUP_CONTINUE 0x0004 52 51 53 52 #define LOOKUP_PARENT 0x0010 54 53 #define LOOKUP_REVAL 0x0020 ··· 74 75 75 76 extern int kern_path(const char *, unsigned, struct path *); 76 77 78 + extern struct dentry *kern_path_create(int, const char *, struct path *, int); 79 + extern struct dentry *user_path_create(int, const char __user *, struct path *, int); 77 80 extern int kern_path_parent(const char *, struct nameidata *); 78 81 extern int vfs_path_lookup(struct dentry *, struct vfsmount *, 79 - const char *, unsigned int, struct nameidata *); 82 + const char *, unsigned int, struct path *); 80 83 81 84 extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, 82 85 int (*open)(struct inode *, struct file *));
+3 -3
include/linux/nfs_fs.h
··· 85 85 struct nfs4_state; 86 86 struct nfs_open_context { 87 87 struct nfs_lock_context lock_context; 88 - struct path path; 88 + struct dentry *dentry; 89 89 struct rpc_cred *cred; 90 90 struct nfs4_state *state; 91 91 fmode_t mode; ··· 360 360 extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); 361 361 extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); 362 362 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 363 - extern int nfs_permission(struct inode *, int, unsigned int); 363 + extern int nfs_permission(struct inode *, int); 364 364 extern int nfs_open(struct inode *, struct file *); 365 365 extern int nfs_release(struct inode *, struct file *); 366 366 extern int nfs_attribute_timeout(struct inode *inode); ··· 372 372 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); 373 373 extern void put_nfs_open_context(struct nfs_open_context *ctx); 374 374 extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); 375 - extern struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode); 375 + extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode); 376 376 extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); 377 377 extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); 378 378 extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
+1
include/linux/nsproxy.h
··· 68 68 void free_nsproxy(struct nsproxy *ns); 69 69 int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, 70 70 struct fs_struct *); 71 + int __init nsproxy_cache_init(void); 71 72 72 73 static inline void put_nsproxy(struct nsproxy *ns) 73 74 {
+3 -1
include/linux/reiserfs_xattr.h
··· 41 41 int reiserfs_lookup_privroot(struct super_block *sb); 42 42 int reiserfs_delete_xattrs(struct inode *inode); 43 43 int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); 44 - int reiserfs_permission(struct inode *inode, int mask, unsigned int flags); 44 + int reiserfs_permission(struct inode *inode, int mask); 45 45 46 46 #ifdef CONFIG_REISERFS_FS_XATTR 47 47 #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) 48 + int reiserfs_check_acl(struct inode *inode, int mask); 48 49 ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, 49 50 void *buffer, size_t size); 50 51 int reiserfs_setxattr(struct dentry *dentry, const char *name, ··· 123 122 #define reiserfs_setxattr NULL 124 123 #define reiserfs_listxattr NULL 125 124 #define reiserfs_removexattr NULL 125 + #define reiserfs_check_acl NULL 126 126 127 127 static inline void reiserfs_init_xattr_rwsem(struct inode *inode) 128 128 {
-10
include/linux/rwsem.h
··· 124 124 */ 125 125 extern void down_read_nested(struct rw_semaphore *sem, int subclass); 126 126 extern void down_write_nested(struct rw_semaphore *sem, int subclass); 127 - /* 128 - * Take/release a lock when not the owner will release it. 129 - * 130 - * [ This API should be avoided as much as possible - the 131 - * proper abstraction for this case is completions. ] 132 - */ 133 - extern void down_read_non_owner(struct rw_semaphore *sem); 134 - extern void up_read_non_owner(struct rw_semaphore *sem); 135 127 #else 136 128 # define down_read_nested(sem, subclass) down_read(sem) 137 129 # define down_write_nested(sem, subclass) down_write(sem) 138 - # define down_read_non_owner(sem) down_read(sem) 139 - # define up_read_non_owner(sem) up_read(sem) 140 130 #endif 141 131 142 132 #endif /* _LINUX_RWSEM_H */
+1 -8
include/linux/security.h
··· 1456 1456 struct inode *new_dir, struct dentry *new_dentry); 1457 1457 int (*inode_readlink) (struct dentry *dentry); 1458 1458 int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); 1459 - int (*inode_permission) (struct inode *inode, int mask, unsigned flags); 1459 + int (*inode_permission) (struct inode *inode, int mask); 1460 1460 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); 1461 1461 int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); 1462 1462 int (*inode_setxattr) (struct dentry *dentry, const char *name, ··· 1720 1720 int security_inode_readlink(struct dentry *dentry); 1721 1721 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); 1722 1722 int security_inode_permission(struct inode *inode, int mask); 1723 - int security_inode_exec_permission(struct inode *inode, unsigned int flags); 1724 1723 int security_inode_setattr(struct dentry *dentry, struct iattr *attr); 1725 1724 int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); 1726 1725 int security_inode_setxattr(struct dentry *dentry, const char *name, ··· 2108 2109 } 2109 2110 2110 2111 static inline int security_inode_permission(struct inode *inode, int mask) 2111 - { 2112 - return 0; 2113 - } 2114 - 2115 - static inline int security_inode_exec_permission(struct inode *inode, 2116 - unsigned int flags) 2117 2112 { 2118 2113 return 0; 2119 2114 }
+1
include/linux/seq_file.h
··· 23 23 u64 version; 24 24 struct mutex lock; 25 25 const struct seq_operations *op; 26 + int poll_event; 26 27 void *private; 27 28 }; 28 29
+42
include/linux/shrinker.h
··· 1 + #ifndef _LINUX_SHRINKER_H 2 + #define _LINUX_SHRINKER_H 3 + 4 + /* 5 + * This struct is used to pass information from page reclaim to the shrinkers. 6 + * We consolidate the values for easier extention later. 7 + */ 8 + struct shrink_control { 9 + gfp_t gfp_mask; 10 + 11 + /* How many slab objects shrinker() should scan and try to reclaim */ 12 + unsigned long nr_to_scan; 13 + }; 14 + 15 + /* 16 + * A callback you can register to apply pressure to ageable caches. 17 + * 18 + * 'sc' is passed shrink_control which includes a count 'nr_to_scan' 19 + * and a 'gfpmask'. It should look through the least-recently-used 20 + * 'nr_to_scan' entries and attempt to free them up. It should return 21 + * the number of objects which remain in the cache. If it returns -1, it means 22 + * it cannot do any scanning at this time (eg. there is a risk of deadlock). 23 + * 24 + * The 'gfpmask' refers to the allocation we are currently trying to 25 + * fulfil. 26 + * 27 + * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is 28 + * querying the cache size, so a fastpath for that case is appropriate. 29 + */ 30 + struct shrinker { 31 + int (*shrink)(struct shrinker *, struct shrink_control *sc); 32 + int seeks; /* seeks to recreate an obj */ 33 + long batch; /* reclaim batch size, 0 = default */ 34 + 35 + /* These are for internal use */ 36 + struct list_head list; 37 + long nr; /* objs pending delete */ 38 + }; 39 + #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ 40 + extern void register_shrinker(struct shrinker *); 41 + extern void unregister_shrinker(struct shrinker *); 42 + #endif
+77
include/trace/events/vmscan.h
··· 179 179 TP_ARGS(nr_reclaimed) 180 180 ); 181 181 182 + TRACE_EVENT(mm_shrink_slab_start, 183 + TP_PROTO(struct shrinker *shr, struct shrink_control *sc, 184 + long nr_objects_to_shrink, unsigned long pgs_scanned, 185 + unsigned long lru_pgs, unsigned long cache_items, 186 + unsigned long long delta, unsigned long total_scan), 187 + 188 + TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, 189 + cache_items, delta, total_scan), 190 + 191 + TP_STRUCT__entry( 192 + __field(struct shrinker *, shr) 193 + __field(void *, shrink) 194 + __field(long, nr_objects_to_shrink) 195 + __field(gfp_t, gfp_flags) 196 + __field(unsigned long, pgs_scanned) 197 + __field(unsigned long, lru_pgs) 198 + __field(unsigned long, cache_items) 199 + __field(unsigned long long, delta) 200 + __field(unsigned long, total_scan) 201 + ), 202 + 203 + TP_fast_assign( 204 + __entry->shr = shr; 205 + __entry->shrink = shr->shrink; 206 + __entry->nr_objects_to_shrink = nr_objects_to_shrink; 207 + __entry->gfp_flags = sc->gfp_mask; 208 + __entry->pgs_scanned = pgs_scanned; 209 + __entry->lru_pgs = lru_pgs; 210 + __entry->cache_items = cache_items; 211 + __entry->delta = delta; 212 + __entry->total_scan = total_scan; 213 + ), 214 + 215 + TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", 216 + __entry->shrink, 217 + __entry->shr, 218 + __entry->nr_objects_to_shrink, 219 + show_gfp_flags(__entry->gfp_flags), 220 + __entry->pgs_scanned, 221 + __entry->lru_pgs, 222 + __entry->cache_items, 223 + __entry->delta, 224 + __entry->total_scan) 225 + ); 226 + 227 + TRACE_EVENT(mm_shrink_slab_end, 228 + TP_PROTO(struct shrinker *shr, int shrinker_retval, 229 + long unused_scan_cnt, long new_scan_cnt), 230 + 231 + TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt), 232 + 233 + TP_STRUCT__entry( 234 + __field(struct shrinker *, shr) 235 + __field(void *, shrink) 236 + __field(long, unused_scan) 237 + __field(long, new_scan) 238 + __field(int, retval) 239 + __field(long, total_scan) 240 + ), 241 + 242 + TP_fast_assign( 243 + __entry->shr = shr; 244 + __entry->shrink = shr->shrink; 245 + __entry->unused_scan = unused_scan_cnt; 246 + __entry->new_scan = new_scan_cnt; 247 + __entry->retval = shrinker_retval; 248 + __entry->total_scan = new_scan_cnt - unused_scan_cnt; 249 + ), 250 + 251 + TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", 252 + __entry->shrink, 253 + __entry->shr, 254 + __entry->unused_scan, 255 + __entry->new_scan, 256 + __entry->total_scan, 257 + __entry->retval) 258 + ); 182 259 183 260 DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, 184 261
+2 -2
ipc/shm.c
··· 277 277 return 0; 278 278 } 279 279 280 - static int shm_fsync(struct file *file, int datasync) 280 + static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) 281 281 { 282 282 struct shm_file_data *sfd = shm_file_data(file); 283 283 284 284 if (!sfd->file->f_op->fsync) 285 285 return -EINVAL; 286 - return sfd->file->f_op->fsync(sfd->file, datasync); 286 + return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 287 287 } 288 288 289 289 static unsigned long shm_get_unmapped_area(struct file *file,
+2 -1
kernel/cgroup.c
··· 3542 3542 } 3543 3543 3544 3544 /* the process need read permission on control file */ 3545 - ret = file_permission(cfile, MAY_READ); 3545 + /* AV: shouldn't we check that it's been opened for read instead? */ 3546 + ret = inode_permission(cfile->f_path.dentry->d_inode, MAY_READ); 3546 3547 if (ret < 0) 3547 3548 goto fail; 3548 3549
+1
kernel/fork.c
··· 1585 1585 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); 1586 1586 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1587 1587 mmap_init(); 1588 + nsproxy_cache_init(); 1588 1589 } 1589 1590 1590 1591 /*
+1 -3
kernel/nsproxy.c
··· 271 271 return err; 272 272 } 273 273 274 - static int __init nsproxy_cache_init(void) 274 + int __init nsproxy_cache_init(void) 275 275 { 276 276 nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC); 277 277 return 0; 278 278 } 279 - 280 - module_init(nsproxy_cache_init);
-16
kernel/rwsem.c
··· 117 117 118 118 EXPORT_SYMBOL(down_read_nested); 119 119 120 - void down_read_non_owner(struct rw_semaphore *sem) 121 - { 122 - might_sleep(); 123 - 124 - __down_read(sem); 125 - } 126 - 127 - EXPORT_SYMBOL(down_read_non_owner); 128 - 129 120 void down_write_nested(struct rw_semaphore *sem, int subclass) 130 121 { 131 122 might_sleep(); ··· 126 135 } 127 136 128 137 EXPORT_SYMBOL(down_write_nested); 129 - 130 - void up_read_non_owner(struct rw_semaphore *sem) 131 - { 132 - __up_read(sem); 133 - } 134 - 135 - EXPORT_SYMBOL(up_read_non_owner); 136 138 137 139 #endif 138 140
-3
mm/filemap.c
··· 78 78 * ->i_mutex (generic_file_buffered_write) 79 79 * ->mmap_sem (fault_in_pages_readable->do_page_fault) 80 80 * 81 - * ->i_mutex 82 - * ->i_alloc_sem (various) 83 - * 84 81 * inode_wb_list_lock 85 82 * sb_lock (fs/fs-writeback.c) 86 83 * ->mapping->tree_lock (__sync_single_inode)
+1 -1
mm/madvise.c
··· 218 218 endoff = (loff_t)(end - vma->vm_start - 1) 219 219 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 220 220 221 - /* vmtruncate_range needs to take i_mutex and i_alloc_sem */ 221 + /* vmtruncate_range needs to take i_mutex */ 222 222 up_read(&current->mm->mmap_sem); 223 223 error = vmtruncate_range(mapping->host, offset, endoff); 224 224 down_read(&current->mm->mmap_sem);
-1
mm/rmap.c
··· 21 21 * Lock ordering in mm: 22 22 * 23 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 - * inode->i_alloc_sem (vmtruncate_range) 25 24 * mm->mmap_sem 26 25 * page->flags PG_locked (lock_page) 27 26 * mapping->i_mmap_mutex
+8 -21
mm/swapfile.c
··· 1681 1681 } 1682 1682 1683 1683 #ifdef CONFIG_PROC_FS 1684 - struct proc_swaps { 1685 - struct seq_file seq; 1686 - int event; 1687 - }; 1688 - 1689 1684 static unsigned swaps_poll(struct file *file, poll_table *wait) 1690 1685 { 1691 - struct proc_swaps *s = file->private_data; 1686 + struct seq_file *seq = file->private_data; 1692 1687 1693 1688 poll_wait(file, &proc_poll_wait, wait); 1694 1689 1695 - if (s->event != atomic_read(&proc_poll_event)) { 1696 - s->event = atomic_read(&proc_poll_event); 1690 + if (seq->poll_event != atomic_read(&proc_poll_event)) { 1691 + seq->poll_event = atomic_read(&proc_poll_event); 1697 1692 return POLLIN | POLLRDNORM | POLLERR | POLLPRI; 1698 1693 } 1699 1694 ··· 1778 1783 1779 1784 static int swaps_open(struct inode *inode, struct file *file) 1780 1785 { 1781 - struct proc_swaps *s; 1786 + struct seq_file *seq; 1782 1787 int ret; 1783 1788 1784 - s = kmalloc(sizeof(struct proc_swaps), GFP_KERNEL); 1785 - if (!s) 1786 - return -ENOMEM; 1787 - 1788 - file->private_data = s; 1789 - 1790 1789 ret = seq_open(file, &swaps_op); 1791 - if (ret) { 1792 - kfree(s); 1790 + if (ret) 1793 1791 return ret; 1794 - } 1795 1792 1796 - s->seq.private = s; 1797 - s->event = atomic_read(&proc_poll_event); 1798 - return ret; 1793 + seq = file->private_data; 1794 + seq->poll_event = atomic_read(&proc_poll_event); 1795 + return 0; 1799 1796 } 1800 1797 1801 1798 static const struct file_operations proc_swaps_operations = {
+1 -2
mm/truncate.c
··· 622 622 return -ENOSYS; 623 623 624 624 mutex_lock(&inode->i_mutex); 625 - down_write(&inode->i_alloc_sem); 625 + inode_dio_wait(inode); 626 626 unmap_mapping_range(mapping, offset, (end - offset), 1); 627 627 inode->i_op->truncate_range(inode, offset, end); 628 628 /* unmap again to remove racily COWed private pages */ 629 629 unmap_mapping_range(mapping, offset, (end - offset), 1); 630 - up_write(&inode->i_alloc_sem); 631 630 mutex_unlock(&inode->i_mutex); 632 631 633 632 return 0;
+56 -15
mm/vmscan.c
··· 250 250 unsigned long long delta; 251 251 unsigned long total_scan; 252 252 unsigned long max_pass; 253 + int shrink_ret = 0; 254 + long nr; 255 + long new_nr; 256 + long batch_size = shrinker->batch ? shrinker->batch 257 + : SHRINK_BATCH; 253 258 259 + /* 260 + * copy the current shrinker scan count into a local variable 261 + * and zero it so that other concurrent shrinker invocations 262 + * don't also do this scanning work. 263 + */ 264 + do { 265 + nr = shrinker->nr; 266 + } while (cmpxchg(&shrinker->nr, nr, 0) != nr); 267 + 268 + total_scan = nr; 254 269 max_pass = do_shrinker_shrink(shrinker, shrink, 0); 255 270 delta = (4 * nr_pages_scanned) / shrinker->seeks; 256 271 delta *= max_pass; 257 272 do_div(delta, lru_pages + 1); 258 - shrinker->nr += delta; 259 - if (shrinker->nr < 0) { 273 + total_scan += delta; 274 + if (total_scan < 0) { 260 275 printk(KERN_ERR "shrink_slab: %pF negative objects to " 261 276 "delete nr=%ld\n", 262 - shrinker->shrink, shrinker->nr); 263 - shrinker->nr = max_pass; 277 + shrinker->shrink, total_scan); 278 + total_scan = max_pass; 264 279 } 280 + 281 + /* 282 + * We need to avoid excessive windup on filesystem shrinkers 283 + * due to large numbers of GFP_NOFS allocations causing the 284 + * shrinkers to return -1 all the time. This results in a large 285 + * nr being built up so when a shrink that can do some work 286 + * comes along it empties the entire cache due to nr >>> 287 + * max_pass. This is bad for sustaining a working set in 288 + * memory. 289 + * 290 + * Hence only allow the shrinker to scan the entire cache when 291 + * a large delta change is calculated directly. 292 + */ 293 + if (delta < max_pass / 4) 294 + total_scan = min(total_scan, max_pass / 2); 265 295 266 296 /* 267 297 * Avoid risking looping forever due to too large nr value: 268 298 * never try to free more than twice the estimate number of 269 299 * freeable entries. 270 300 */ 271 - if (shrinker->nr > max_pass * 2) 272 - shrinker->nr = max_pass * 2; 301 + if (total_scan > max_pass * 2) 302 + total_scan = max_pass * 2; 273 303 274 - total_scan = shrinker->nr; 275 - shrinker->nr = 0; 304 + trace_mm_shrink_slab_start(shrinker, shrink, nr, 305 + nr_pages_scanned, lru_pages, 306 + max_pass, delta, total_scan); 276 307 277 - while (total_scan >= SHRINK_BATCH) { 278 - long this_scan = SHRINK_BATCH; 279 - int shrink_ret; 308 + while (total_scan >= batch_size) { 280 309 int nr_before; 281 310 282 311 nr_before = do_shrinker_shrink(shrinker, shrink, 0); 283 312 shrink_ret = do_shrinker_shrink(shrinker, shrink, 284 - this_scan); 313 + batch_size); 285 314 if (shrink_ret == -1) 286 315 break; 287 316 if (shrink_ret < nr_before) 288 317 ret += nr_before - shrink_ret; 289 - count_vm_events(SLABS_SCANNED, this_scan); 290 - total_scan -= this_scan; 318 + count_vm_events(SLABS_SCANNED, batch_size); 319 + total_scan -= batch_size; 291 320 292 321 cond_resched(); 293 322 } 294 323 295 - shrinker->nr += total_scan; 324 + /* 325 + * move the unused scan count back into the shrinker in a 326 + * manner that handles concurrent updates. If we exhausted the 327 + * scan, there is no need to do an update. 328 + */ 329 + do { 330 + nr = shrinker->nr; 331 + new_nr = total_scan + nr; 332 + if (total_scan <= 0) 333 + break; 334 + } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr); 335 + 336 + trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr); 296 337 } 297 338 up_read(&shrinker_rwsem); 298 339 out:
+5 -6
net/sunrpc/clnt.c
··· 97 97 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 98 98 { 99 99 static uint32_t clntid; 100 - struct nameidata nd; 101 - struct path path; 100 + struct path path, dir; 102 101 char name[15]; 103 102 struct qstr q = { 104 103 .name = name, ··· 112 113 path.mnt = rpc_get_mount(); 113 114 if (IS_ERR(path.mnt)) 114 115 return PTR_ERR(path.mnt); 115 - error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); 116 + error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &dir); 116 117 if (error) 117 118 goto err; 118 119 ··· 120 121 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 121 122 name[sizeof(name) - 1] = '\0'; 122 123 q.hash = full_name_hash(q.name, q.len); 123 - path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); 124 + path.dentry = rpc_create_client_dir(dir.dentry, &q, clnt); 124 125 if (!IS_ERR(path.dentry)) 125 126 break; 126 127 error = PTR_ERR(path.dentry); ··· 131 132 goto err_path_put; 132 133 } 133 134 } 134 - path_put(&nd.path); 135 + path_put(&dir); 135 136 clnt->cl_path = path; 136 137 return 0; 137 138 err_path_put: 138 - path_put(&nd.path); 139 + path_put(&dir); 139 140 err: 140 141 rpc_put_mount(); 141 142 return error;
+17 -21
net/unix/af_unix.c
··· 808 808 struct net *net = sock_net(sk); 809 809 struct unix_sock *u = unix_sk(sk); 810 810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 811 + char *sun_path = sunaddr->sun_path; 811 812 struct dentry *dentry = NULL; 812 - struct nameidata nd; 813 + struct path path; 813 814 int err; 814 815 unsigned hash; 815 816 struct unix_address *addr; ··· 846 845 addr->hash = hash ^ sk->sk_type; 847 846 atomic_set(&addr->refcnt, 1); 848 847 849 - if (sunaddr->sun_path[0]) { 848 + if (sun_path[0]) { 850 849 unsigned int mode; 851 850 err = 0; 852 851 /* 853 852 * Get the parent directory, calculate the hash for last 854 853 * component. 855 854 */ 856 - err = kern_path_parent(sunaddr->sun_path, &nd); 857 - if (err) 858 - goto out_mknod_parent; 859 - 860 - dentry = lookup_create(&nd, 0); 855 + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); 861 856 err = PTR_ERR(dentry); 862 857 if (IS_ERR(dentry)) 863 - goto out_mknod_unlock; 858 + goto out_mknod_parent; 864 859 865 860 /* 866 861 * All right, let's create it. 867 862 */ 868 863 mode = S_IFSOCK | 869 864 (SOCK_INODE(sock)->i_mode & ~current_umask()); 870 - err = mnt_want_write(nd.path.mnt); 865 + err = mnt_want_write(path.mnt); 871 866 if (err) 872 867 goto out_mknod_dput; 873 - err = security_path_mknod(&nd.path, dentry, mode, 0); 868 + err = security_path_mknod(&path, dentry, mode, 0); 874 869 if (err) 875 870 goto out_mknod_drop_write; 876 - err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); 871 + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); 877 872 out_mknod_drop_write: 878 - mnt_drop_write(nd.path.mnt); 873 + mnt_drop_write(path.mnt); 879 874 if (err) 880 875 goto out_mknod_dput; 881 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 882 - dput(nd.path.dentry); 883 - nd.path.dentry = dentry; 876 + mutex_unlock(&path.dentry->d_inode->i_mutex); 877 + dput(path.dentry); 878 + path.dentry = dentry; 884 879 885 880 addr->hash = UNIX_HASH_SIZE; 886 881 } 887 882 888 883 spin_lock(&unix_table_lock); 889 884 890 - if (!sunaddr->sun_path[0]) { 885 + if (!sun_path[0]) { 891 886 err = -EADDRINUSE; 892 887 if (__unix_find_socket_byname(net, sunaddr, addr_len, 893 888 sk->sk_type, hash)) { ··· 894 897 list = &unix_socket_table[addr->hash]; 895 898 } else { 896 899 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; 897 - u->dentry = nd.path.dentry; 898 - u->mnt = nd.path.mnt; 900 + u->dentry = path.dentry; 901 + u->mnt = path.mnt; 899 902 } 900 903 901 904 err = 0; ··· 912 915 913 916 out_mknod_dput: 914 917 dput(dentry); 915 - out_mknod_unlock: 916 - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 917 - path_put(&nd.path); 918 + mutex_unlock(&path.dentry->d_inode->i_mutex); 919 + path_put(&path); 918 920 out_mknod_parent: 919 921 if (err == -EEXIST) 920 922 err = -EADDRINUSE;
+1 -1
security/capability.c
··· 181 181 return 0; 182 182 } 183 183 184 - static int cap_inode_permission(struct inode *inode, int mask, unsigned flags) 184 + static int cap_inode_permission(struct inode *inode, int mask) 185 185 { 186 186 return 0; 187 187 }
+1 -8
security/security.c
··· 518 518 { 519 519 if (unlikely(IS_PRIVATE(inode))) 520 520 return 0; 521 - return security_ops->inode_permission(inode, mask, 0); 522 - } 523 - 524 - int security_inode_exec_permission(struct inode *inode, unsigned int flags) 525 - { 526 - if (unlikely(IS_PRIVATE(inode))) 527 - return 0; 528 - return security_ops->inode_permission(inode, MAY_EXEC, flags); 521 + return security_ops->inode_permission(inode, mask); 529 522 } 530 523 531 524 int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
+1 -1
security/selinux/avc.c
··· 527 527 * happened a little later. 528 528 */ 529 529 if ((a->type == LSM_AUDIT_DATA_INODE) && 530 - (flags & IPERM_FLAG_RCU)) 530 + (flags & MAY_NOT_BLOCK)) 531 531 return -ECHILD; 532 532 533 533 a->selinux_audit_data.tclass = tclass;
+2 -1
security/selinux/hooks.c
··· 2659 2659 return dentry_has_perm(cred, dentry, FILE__READ); 2660 2660 } 2661 2661 2662 - static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags) 2662 + static int selinux_inode_permission(struct inode *inode, int mask) 2663 2663 { 2664 2664 const struct cred *cred = current_cred(); 2665 2665 struct common_audit_data ad; 2666 2666 u32 perms; 2667 2667 bool from_access; 2668 + unsigned flags = mask & MAY_NOT_BLOCK; 2668 2669 2669 2670 from_access = mask & MAY_ACCESS; 2670 2671 mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+3 -2
security/smack/smack_lsm.c
··· 689 689 * 690 690 * Returns 0 if access is permitted, -EACCES otherwise 691 691 */ 692 - static int smack_inode_permission(struct inode *inode, int mask, unsigned flags) 692 + static int smack_inode_permission(struct inode *inode, int mask) 693 693 { 694 694 struct smk_audit_info ad; 695 + int no_block = mask & MAY_NOT_BLOCK; 695 696 696 697 mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); 697 698 /* ··· 702 701 return 0; 703 702 704 703 /* May be droppable after audit */ 705 - if (flags & IPERM_FLAG_RCU) 704 + if (no_block) 706 705 return -ECHILD; 707 706 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); 708 707 smk_ad_setfield_u_fs_inode(&ad, inode);
+1 -1
security/tomoyo/realpath.c
··· 103 103 if (!buf) 104 104 break; 105 105 /* Get better name for socket. */ 106 - if (dentry->d_sb && dentry->d_sb->s_magic == SOCKFS_MAGIC) { 106 + if (dentry->d_sb->s_magic == SOCKFS_MAGIC) { 107 107 struct inode *inode = dentry->d_inode; 108 108 struct socket *sock = inode ? SOCKET_I(inode) : NULL; 109 109 struct sock *sk = sock ? sock->sk : NULL;