at v2.6.37 532 lines 22 kB view raw
1 The text below describes the locking rules for VFS-related methods. 2It is (believed to be) up-to-date. *Please*, if you change anything in 3prototypes or locking protocols - update this file. And update the relevant 4instances in the tree, don't leave that to maintainers of filesystems/devices/ 5etc. At the very least, put the list of dubious cases in the end of this file. 6Don't turn it into log - maintainers of out-of-the-tree code are supposed to 7be able to use diff(1). 8 Thing currently missing here: socket operations. Alexey? 9 10--------------------------- dentry_operations -------------------------- 11prototypes: 12 int (*d_revalidate)(struct dentry *, int); 13 int (*d_hash) (struct dentry *, struct qstr *); 14 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 15 int (*d_delete)(struct dentry *); 16 void (*d_release)(struct dentry *); 17 void (*d_iput)(struct dentry *, struct inode *); 18 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); 19 20locking rules: 21 dcache_lock rename_lock ->d_lock may block 22d_revalidate: no no no yes 23d_hash no no no yes 24d_compare: no yes no no 25d_delete: yes no yes no 26d_release: no no no yes 27d_iput: no no no yes 28d_dname: no no no no 29 30--------------------------- inode_operations --------------------------- 31prototypes: 32 int (*create) (struct inode *,struct dentry *,int, struct nameidata *); 33 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid 34ata *); 35 int (*link) (struct dentry *,struct inode *,struct dentry *); 36 int (*unlink) (struct inode *,struct dentry *); 37 int (*symlink) (struct inode *,struct dentry *,const char *); 38 int (*mkdir) (struct inode *,struct dentry *,int); 39 int (*rmdir) (struct inode *,struct dentry *); 40 int (*mknod) (struct inode *,struct dentry *,int,dev_t); 41 int (*rename) (struct inode *, struct dentry *, 42 struct inode *, struct dentry *); 43 int (*readlink) (struct dentry *, char __user *,int); 44 void * (*follow_link) (struct dentry *, struct nameidata *); 45 void (*put_link) (struct dentry *, struct nameidata *, void *); 46 void (*truncate) (struct inode *); 47 int (*permission) (struct inode *, int, struct nameidata *); 48 int (*check_acl)(struct inode *, int); 49 int (*setattr) (struct dentry *, struct iattr *); 50 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 51 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 52 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 53 ssize_t (*listxattr) (struct dentry *, char *, size_t); 54 int (*removexattr) (struct dentry *, const char *); 55 void (*truncate_range)(struct inode *, loff_t, loff_t); 56 long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len); 57 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 58 59locking rules: 60 all may block 61 i_mutex(inode) 62lookup: yes 63create: yes 64link: yes (both) 65mknod: yes 66symlink: yes 67mkdir: yes 68unlink: yes (both) 69rmdir: yes (both) (see below) 70rename: yes (all) (see below) 71readlink: no 72follow_link: no 73put_link: no 74truncate: yes (see below) 75setattr: yes 76permission: no 77check_acl: no 78getattr: no 79setxattr: yes 80getxattr: no 81listxattr: no 82removexattr: yes 83truncate_range: yes 84fallocate: no 85fiemap: no 86 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 87victim. 88 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 89 ->truncate() is never called directly - it's a callback, not a 90method. It's called by vmtruncate() - deprecated library function used by 91->setattr(). Locking information above applies to that call (i.e. is 92inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been 93passed). 94 95See Documentation/filesystems/directory-locking for more detailed discussion 96of the locking scheme for directory operations. 97 98--------------------------- super_operations --------------------------- 99prototypes: 100 struct inode *(*alloc_inode)(struct super_block *sb); 101 void (*destroy_inode)(struct inode *); 102 void (*dirty_inode) (struct inode *); 103 int (*write_inode) (struct inode *, struct writeback_control *wbc); 104 int (*drop_inode) (struct inode *); 105 void (*evict_inode) (struct inode *); 106 void (*put_super) (struct super_block *); 107 void (*write_super) (struct super_block *); 108 int (*sync_fs)(struct super_block *sb, int wait); 109 int (*freeze_fs) (struct super_block *); 110 int (*unfreeze_fs) (struct super_block *); 111 int (*statfs) (struct dentry *, struct kstatfs *); 112 int (*remount_fs) (struct super_block *, int *, char *); 113 void (*umount_begin) (struct super_block *); 114 int (*show_options)(struct seq_file *, struct vfsmount *); 115 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 116 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 117 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 118 119locking rules: 120 All may block [not true, see below] 121 s_umount 122alloc_inode: 123destroy_inode: 124dirty_inode: (must not sleep) 125write_inode: 126drop_inode: !!!inode_lock!!! 127evict_inode: 128put_super: write 129write_super: read 130sync_fs: read 131freeze_fs: read 132unfreeze_fs: read 133statfs: maybe(read) (see below) 134remount_fs: write 135umount_begin: no 136show_options: no (namespace_sem) 137quota_read: no (see below) 138quota_write: no (see below) 139bdev_try_to_free_page: no (see below) 140 141->statfs() has s_umount (shared) when called by ustat(2) (native or 142compat), but that's an accident of bad API; s_umount is used to pin 143the superblock down when we only have dev_t given us by userland to 144identify the superblock. Everything else (statfs(), fstatfs(), etc.) 145doesn't hold it when calling ->statfs() - superblock is pinned down 146by resolving the pathname passed to syscall. 147->quota_read() and ->quota_write() functions are both guaranteed to 148be the only ones operating on the quota file by the quota code (via 149dqio_sem) (unless an admin really wants to screw up something and 150writes to quota files with quotas on). For other details about locking 151see also dquot_operations section. 152->bdev_try_to_free_page is called from the ->releasepage handler of 153the block device inode. See there for more details. 154 155--------------------------- file_system_type --------------------------- 156prototypes: 157 int (*get_sb) (struct file_system_type *, int, 158 const char *, void *, struct vfsmount *); 159 struct dentry *(*mount) (struct file_system_type *, int, 160 const char *, void *); 161 void (*kill_sb) (struct super_block *); 162locking rules: 163 may block 164get_sb yes 165mount yes 166kill_sb yes 167 168->get_sb() returns error or 0 with locked superblock attached to the vfsmount 169(exclusive on ->s_umount). 170->mount() returns ERR_PTR or the root dentry. 171->kill_sb() takes a write-locked superblock, does all shutdown work on it, 172unlocks and drops the reference. 173 174--------------------------- address_space_operations -------------------------- 175prototypes: 176 int (*writepage)(struct page *page, struct writeback_control *wbc); 177 int (*readpage)(struct file *, struct page *); 178 int (*sync_page)(struct page *); 179 int (*writepages)(struct address_space *, struct writeback_control *); 180 int (*set_page_dirty)(struct page *page); 181 int (*readpages)(struct file *filp, struct address_space *mapping, 182 struct list_head *pages, unsigned nr_pages); 183 int (*write_begin)(struct file *, struct address_space *mapping, 184 loff_t pos, unsigned len, unsigned flags, 185 struct page **pagep, void **fsdata); 186 int (*write_end)(struct file *, struct address_space *mapping, 187 loff_t pos, unsigned len, unsigned copied, 188 struct page *page, void *fsdata); 189 sector_t (*bmap)(struct address_space *, sector_t); 190 int (*invalidatepage) (struct page *, unsigned long); 191 int (*releasepage) (struct page *, int); 192 void (*freepage)(struct page *); 193 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 194 loff_t offset, unsigned long nr_segs); 195 int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, 196 unsigned long *); 197 int (*migratepage)(struct address_space *, struct page *, struct page *); 198 int (*launder_page)(struct page *); 199 int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long); 200 int (*error_remove_page)(struct address_space *, struct page *); 201 202locking rules: 203 All except set_page_dirty and freepage may block 204 205 PageLocked(page) i_mutex 206writepage: yes, unlocks (see below) 207readpage: yes, unlocks 208sync_page: maybe 209writepages: 210set_page_dirty no 211readpages: 212write_begin: locks the page yes 213write_end: yes, unlocks yes 214bmap: 215invalidatepage: yes 216releasepage: yes 217freepage: yes 218direct_IO: 219get_xip_mem: maybe 220migratepage: yes (both) 221launder_page: yes 222is_partially_uptodate: yes 223error_remove_page: yes 224 225 ->write_begin(), ->write_end(), ->sync_page() and ->readpage() 226may be called from the request handler (/dev/loop). 227 228 ->readpage() unlocks the page, either synchronously or via I/O 229completion. 230 231 ->readpages() populates the pagecache with the passed pages and starts 232I/O against them. They come unlocked upon I/O completion. 233 234 ->writepage() is used for two purposes: for "memory cleansing" and for 235"sync". These are quite different operations and the behaviour may differ 236depending upon the mode. 237 238If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then 239it *must* start I/O against the page, even if that would involve 240blocking on in-progress I/O. 241 242If writepage is called for memory cleansing (sync_mode == 243WBC_SYNC_NONE) then its role is to get as much writeout underway as 244possible. So writepage should try to avoid blocking against 245currently-in-progress I/O. 246 247If the filesystem is not called for "sync" and it determines that it 248would need to block against in-progress I/O to be able to start new I/O 249against the page the filesystem should redirty the page with 250redirty_page_for_writepage(), then unlock the page and return zero. 251This may also be done to avoid internal deadlocks, but rarely. 252 253If the filesystem is called for sync then it must wait on any 254in-progress I/O and then start new I/O. 255 256The filesystem should unlock the page synchronously, before returning to the 257caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE 258value. WRITEPAGE_ACTIVATE means that page cannot really be written out 259currently, and VM should stop calling ->writepage() on this page for some 260time. VM does this by moving page to the head of the active list, hence the 261name. 262 263Unless the filesystem is going to redirty_page_for_writepage(), unlock the page 264and return zero, writepage *must* run set_page_writeback() against the page, 265followed by unlocking it. Once set_page_writeback() has been run against the 266page, write I/O can be submitted and the write I/O completion handler must run 267end_page_writeback() once the I/O is complete. If no I/O is submitted, the 268filesystem must run end_page_writeback() against the page before returning from 269writepage. 270 271That is: after 2.5.12, pages which are under writeout are *not* locked. Note, 272if the filesystem needs the page to be locked during writeout, that is ok, too, 273the page is allowed to be unlocked at any point in time between the calls to 274set_page_writeback() and end_page_writeback(). 275 276Note, failure to run either redirty_page_for_writepage() or the combination of 277set_page_writeback()/end_page_writeback() on a page submitted to writepage 278will leave the page itself marked clean but it will be tagged as dirty in the 279radix tree. This incoherency can lead to all sorts of hard-to-debug problems 280in the filesystem like having dirty inodes at umount and losing written data. 281 282 ->sync_page() locking rules are not well-defined - usually it is called 283with lock on page, but that is not guaranteed. Considering the currently 284existing instances of this method ->sync_page() itself doesn't look 285well-defined... 286 287 ->writepages() is used for periodic writeback and for syscall-initiated 288sync operations. The address_space should start I/O against at least 289*nr_to_write pages. *nr_to_write must be decremented for each page which is 290written. The address_space implementation may write more (or less) pages 291than *nr_to_write asks for, but it should try to be reasonably close. If 292nr_to_write is NULL, all dirty pages must be written. 293 294writepages should _only_ write pages which are present on 295mapping->io_pages. 296 297 ->set_page_dirty() is called from various places in the kernel 298when the target page is marked as needing writeback. It may be called 299under spinlock (it cannot block) and is sometimes called with the page 300not locked. 301 302 ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some 303filesystems and by the swapper. The latter will eventually go away. Please, 304keep it that way and don't breed new callers. 305 306 ->invalidatepage() is called when the filesystem must attempt to drop 307some or all of the buffers from the page when it is being truncated. It 308returns zero on success. If ->invalidatepage is zero, the kernel uses 309block_invalidatepage() instead. 310 311 ->releasepage() is called when the kernel is about to try to drop the 312buffers from the page in preparation for freeing it. It returns zero to 313indicate that the buffers are (or may be) freeable. If ->releasepage is zero, 314the kernel assumes that the fs has no private interest in the buffers. 315 316 ->freepage() is called when the kernel is done dropping the page 317from the page cache. 318 319 ->launder_page() may be called prior to releasing a page if 320it is still found to be dirty. It returns zero if the page was successfully 321cleaned, or an error value if not. Note that in order to prevent the page 322getting mapped back in and redirtied, it needs to be kept locked 323across the entire operation. 324 325----------------------- file_lock_operations ------------------------------ 326prototypes: 327 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 328 void (*fl_release_private)(struct file_lock *); 329 330 331locking rules: 332 file_lock_lock may block 333fl_copy_lock: yes no 334fl_release_private: maybe no 335 336----------------------- lock_manager_operations --------------------------- 337prototypes: 338 int (*fl_compare_owner)(struct file_lock *, struct file_lock *); 339 void (*fl_notify)(struct file_lock *); /* unblock callback */ 340 int (*fl_grant)(struct file_lock *, struct file_lock *, int); 341 void (*fl_release_private)(struct file_lock *); 342 void (*fl_break)(struct file_lock *); /* break_lease callback */ 343 int (*fl_mylease)(struct file_lock *, struct file_lock *); 344 int (*fl_change)(struct file_lock **, int); 345 346locking rules: 347 file_lock_lock may block 348fl_compare_owner: yes no 349fl_notify: yes no 350fl_grant: no no 351fl_release_private: maybe no 352fl_break: yes no 353fl_mylease: yes no 354fl_change yes no 355 356--------------------------- buffer_head ----------------------------------- 357prototypes: 358 void (*b_end_io)(struct buffer_head *bh, int uptodate); 359 360locking rules: 361 called from interrupts. In other words, extreme care is needed here. 362bh is locked, but that's all warranties we have here. Currently only RAID1, 363highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices 364call this method upon the IO completion. 365 366--------------------------- block_device_operations ----------------------- 367prototypes: 368 int (*open) (struct block_device *, fmode_t); 369 int (*release) (struct gendisk *, fmode_t); 370 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 371 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 372 int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); 373 int (*media_changed) (struct gendisk *); 374 void (*unlock_native_capacity) (struct gendisk *); 375 int (*revalidate_disk) (struct gendisk *); 376 int (*getgeo)(struct block_device *, struct hd_geometry *); 377 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 378 379locking rules: 380 bd_mutex 381open: yes 382release: yes 383ioctl: no 384compat_ioctl: no 385direct_access: no 386media_changed: no 387unlock_native_capacity: no 388revalidate_disk: no 389getgeo: no 390swap_slot_free_notify: no (see below) 391 392media_changed, unlock_native_capacity and revalidate_disk are called only from 393check_disk_change(). 394 395swap_slot_free_notify is called with swap_lock and sometimes the page lock 396held. 397 398 399--------------------------- file_operations ------------------------------- 400prototypes: 401 loff_t (*llseek) (struct file *, loff_t, int); 402 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 403 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 404 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 405 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 406 int (*readdir) (struct file *, void *, filldir_t); 407 unsigned int (*poll) (struct file *, struct poll_table_struct *); 408 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 409 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 410 int (*mmap) (struct file *, struct vm_area_struct *); 411 int (*open) (struct inode *, struct file *); 412 int (*flush) (struct file *); 413 int (*release) (struct inode *, struct file *); 414 int (*fsync) (struct file *, int datasync); 415 int (*aio_fsync) (struct kiocb *, int datasync); 416 int (*fasync) (int, struct file *, int); 417 int (*lock) (struct file *, int, struct file_lock *); 418 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, 419 loff_t *); 420 ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, 421 loff_t *); 422 ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, 423 void __user *); 424 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, 425 loff_t *, int); 426 unsigned long (*get_unmapped_area)(struct file *, unsigned long, 427 unsigned long, unsigned long, unsigned long); 428 int (*check_flags)(int); 429 int (*flock) (struct file *, int, struct file_lock *); 430 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, 431 size_t, unsigned int); 432 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, 433 size_t, unsigned int); 434 int (*setlease)(struct file *, long, struct file_lock **); 435}; 436 437locking rules: 438 All may block except for ->setlease. 439 No VFS locks held on entry except for ->fsync and ->setlease. 440 441->fsync() has i_mutex on inode. 442 443->setlease has the file_list_lock held and must not sleep. 444 445->llseek() locking has moved from llseek to the individual llseek 446implementations. If your fs is not using generic_file_llseek, you 447need to acquire and release the appropriate locks in your ->llseek(). 448For many filesystems, it is probably safe to acquire the inode 449mutex or just to use i_size_read() instead. 450Note: this does not protect the file->f_pos against concurrent modifications 451since this is something the userspace has to take care about. 452 453->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags. 454Most instances call fasync_helper(), which does that maintenance, so it's 455not normally something one needs to worry about. Return values > 0 will be 456mapped to zero in the VFS layer. 457 458->readdir() and ->ioctl() on directories must be changed. Ideally we would 459move ->readdir() to inode_operations and use a separate method for directory 460->ioctl() or kill the latter completely. One of the problems is that for 461anything that resembles union-mount we won't have a struct file for all 462components. And there are other reasons why the current interface is a mess... 463 464->read on directories probably must go away - we should just enforce -EISDIR 465in sys_read() and friends. 466 467--------------------------- dquot_operations ------------------------------- 468prototypes: 469 int (*write_dquot) (struct dquot *); 470 int (*acquire_dquot) (struct dquot *); 471 int (*release_dquot) (struct dquot *); 472 int (*mark_dirty) (struct dquot *); 473 int (*write_info) (struct super_block *, int); 474 475These operations are intended to be more or less wrapping functions that ensure 476a proper locking wrt the filesystem and call the generic quota operations. 477 478What filesystem should expect from the generic quota functions: 479 480 FS recursion Held locks when called 481write_dquot: yes dqonoff_sem or dqptr_sem 482acquire_dquot: yes dqonoff_sem or dqptr_sem 483release_dquot: yes dqonoff_sem or dqptr_sem 484mark_dirty: no - 485write_info: yes dqonoff_sem 486 487FS recursion means calling ->quota_read() and ->quota_write() from superblock 488operations. 489 490More details about quota locking can be found in fs/dquot.c. 491 492--------------------------- vm_operations_struct ----------------------------- 493prototypes: 494 void (*open)(struct vm_area_struct*); 495 void (*close)(struct vm_area_struct*); 496 int (*fault)(struct vm_area_struct*, struct vm_fault *); 497 int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *); 498 int (*access)(struct vm_area_struct *, unsigned long, void*, int, int); 499 500locking rules: 501 mmap_sem PageLocked(page) 502open: yes 503close: yes 504fault: yes can return with page locked 505page_mkwrite: yes can return with page locked 506access: yes 507 508 ->fault() is called when a previously not present pte is about 509to be faulted in. The filesystem must find and return the page associated 510with the passed in "pgoff" in the vm_fault structure. If it is possible that 511the page may be truncated and/or invalidated, then the filesystem must lock 512the page, then ensure it is not already truncated (the page lock will block 513subsequent truncate), and then return with VM_FAULT_LOCKED, and the page 514locked. The VM will unlock the page. 515 516 ->page_mkwrite() is called when a previously read-only pte is 517about to become writeable. The filesystem again must ensure that there are 518no truncate/invalidate races, and then return with the page locked. If 519the page has been truncated, the filesystem should not look up a new page 520like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which 521will cause the VM to retry the fault. 522 523 ->access() is called when get_user_pages() fails in 524acces_process_vm(), typically used to debug a process through 525/proc/pid/mem or ptrace. This function is needed only for 526VM_IO | VM_PFNMAP VMAs. 527 528================================================================================ 529 Dubious stuff 530 531(if you break something or notice that it is broken and do not fix it yourself 532- at least put it here)