at v2.6.24 543 lines 22 kB view raw
1 The text below describes the locking rules for VFS-related methods. 2It is (believed to be) up-to-date. *Please*, if you change anything in 3prototypes or locking protocols - update this file. And update the relevant 4instances in the tree, don't leave that to maintainers of filesystems/devices/ 5etc. At the very least, put the list of dubious cases in the end of this file. 6Don't turn it into log - maintainers of out-of-the-tree code are supposed to 7be able to use diff(1). 8 Thing currently missing here: socket operations. Alexey? 9 10--------------------------- dentry_operations -------------------------- 11prototypes: 12 int (*d_revalidate)(struct dentry *, int); 13 int (*d_hash) (struct dentry *, struct qstr *); 14 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 15 int (*d_delete)(struct dentry *); 16 void (*d_release)(struct dentry *); 17 void (*d_iput)(struct dentry *, struct inode *); 18 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); 19 20locking rules: 21 none have BKL 22 dcache_lock rename_lock ->d_lock may block 23d_revalidate: no no no yes 24d_hash no no no yes 25d_compare: no yes no no 26d_delete: yes no yes no 27d_release: no no no yes 28d_iput: no no no yes 29d_dname: no no no no 30 31--------------------------- inode_operations --------------------------- 32prototypes: 33 int (*create) (struct inode *,struct dentry *,int, struct nameidata *); 34 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid 35ata *); 36 int (*link) (struct dentry *,struct inode *,struct dentry *); 37 int (*unlink) (struct inode *,struct dentry *); 38 int (*symlink) (struct inode *,struct dentry *,const char *); 39 int (*mkdir) (struct inode *,struct dentry *,int); 40 int (*rmdir) (struct inode *,struct dentry *); 41 int (*mknod) (struct inode *,struct dentry *,int,dev_t); 42 int (*rename) (struct inode *, struct dentry *, 43 struct inode *, struct dentry *); 44 int (*readlink) (struct dentry *, char __user *,int); 45 int (*follow_link) (struct dentry *, struct nameidata *); 46 void (*truncate) (struct inode *); 47 int (*permission) (struct inode *, int, struct nameidata *); 48 int (*setattr) (struct dentry *, struct iattr *); 49 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 50 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 51 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 52 ssize_t (*listxattr) (struct dentry *, char *, size_t); 53 int (*removexattr) (struct dentry *, const char *); 54 55locking rules: 56 all may block, none have BKL 57 i_mutex(inode) 58lookup: yes 59create: yes 60link: yes (both) 61mknod: yes 62symlink: yes 63mkdir: yes 64unlink: yes (both) 65rmdir: yes (both) (see below) 66rename: yes (all) (see below) 67readlink: no 68follow_link: no 69truncate: yes (see below) 70setattr: yes 71permission: no 72getattr: no 73setxattr: yes 74getxattr: no 75listxattr: no 76removexattr: yes 77 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 78victim. 79 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 80 ->truncate() is never called directly - it's a callback, not a 81method. It's called by vmtruncate() - library function normally used by 82->setattr(). Locking information above applies to that call (i.e. is 83inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been 84passed). 85 86See Documentation/filesystems/directory-locking for more detailed discussion 87of the locking scheme for directory operations. 88 89--------------------------- super_operations --------------------------- 90prototypes: 91 struct inode *(*alloc_inode)(struct super_block *sb); 92 void (*destroy_inode)(struct inode *); 93 void (*read_inode) (struct inode *); 94 void (*dirty_inode) (struct inode *); 95 int (*write_inode) (struct inode *, int); 96 void (*put_inode) (struct inode *); 97 void (*drop_inode) (struct inode *); 98 void (*delete_inode) (struct inode *); 99 void (*put_super) (struct super_block *); 100 void (*write_super) (struct super_block *); 101 int (*sync_fs)(struct super_block *sb, int wait); 102 void (*write_super_lockfs) (struct super_block *); 103 void (*unlockfs) (struct super_block *); 104 int (*statfs) (struct dentry *, struct kstatfs *); 105 int (*remount_fs) (struct super_block *, int *, char *); 106 void (*clear_inode) (struct inode *); 107 void (*umount_begin) (struct super_block *); 108 int (*show_options)(struct seq_file *, struct vfsmount *); 109 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 110 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 111 112locking rules: 113 All may block. 114 BKL s_lock s_umount 115alloc_inode: no no no 116destroy_inode: no 117read_inode: no (see below) 118dirty_inode: no (must not sleep) 119write_inode: no 120put_inode: no 121drop_inode: no !!!inode_lock!!! 122delete_inode: no 123put_super: yes yes no 124write_super: no yes read 125sync_fs: no no read 126write_super_lockfs: ? 127unlockfs: ? 128statfs: no no no 129remount_fs: yes yes maybe (see below) 130clear_inode: no 131umount_begin: yes no no 132show_options: no (vfsmount->sem) 133quota_read: no no no (see below) 134quota_write: no no no (see below) 135 136->read_inode() is not a method - it's a callback used in iget(). 137->remount_fs() will have the s_umount lock if it's already mounted. 138When called from get_sb_single, it does NOT have the s_umount lock. 139->quota_read() and ->quota_write() functions are both guaranteed to 140be the only ones operating on the quota file by the quota code (via 141dqio_sem) (unless an admin really wants to screw up something and 142writes to quota files with quotas on). For other details about locking 143see also dquot_operations section. 144 145--------------------------- file_system_type --------------------------- 146prototypes: 147 int (*get_sb) (struct file_system_type *, int, 148 const char *, void *, struct vfsmount *); 149 void (*kill_sb) (struct super_block *); 150locking rules: 151 may block BKL 152get_sb yes yes 153kill_sb yes yes 154 155->get_sb() returns error or 0 with locked superblock attached to the vfsmount 156(exclusive on ->s_umount). 157->kill_sb() takes a write-locked superblock, does all shutdown work on it, 158unlocks and drops the reference. 159 160--------------------------- address_space_operations -------------------------- 161prototypes: 162 int (*writepage)(struct page *page, struct writeback_control *wbc); 163 int (*readpage)(struct file *, struct page *); 164 int (*sync_page)(struct page *); 165 int (*writepages)(struct address_space *, struct writeback_control *); 166 int (*set_page_dirty)(struct page *page); 167 int (*readpages)(struct file *filp, struct address_space *mapping, 168 struct list_head *pages, unsigned nr_pages); 169 int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); 170 int (*commit_write)(struct file *, struct page *, unsigned, unsigned); 171 sector_t (*bmap)(struct address_space *, sector_t); 172 int (*invalidatepage) (struct page *, unsigned long); 173 int (*releasepage) (struct page *, int); 174 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 175 loff_t offset, unsigned long nr_segs); 176 int (*launder_page) (struct page *); 177 178locking rules: 179 All except set_page_dirty may block 180 181 BKL PageLocked(page) i_sem 182writepage: no yes, unlocks (see below) 183readpage: no yes, unlocks 184sync_page: no maybe 185writepages: no 186set_page_dirty no no 187readpages: no 188prepare_write: no yes yes 189commit_write: no yes yes 190write_begin: no locks the page yes 191write_end: no yes, unlocks yes 192perform_write: no n/a yes 193bmap: yes 194invalidatepage: no yes 195releasepage: no yes 196direct_IO: no 197launder_page: no yes 198 199 ->prepare_write(), ->commit_write(), ->sync_page() and ->readpage() 200may be called from the request handler (/dev/loop). 201 202 ->readpage() unlocks the page, either synchronously or via I/O 203completion. 204 205 ->readpages() populates the pagecache with the passed pages and starts 206I/O against them. They come unlocked upon I/O completion. 207 208 ->writepage() is used for two purposes: for "memory cleansing" and for 209"sync". These are quite different operations and the behaviour may differ 210depending upon the mode. 211 212If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then 213it *must* start I/O against the page, even if that would involve 214blocking on in-progress I/O. 215 216If writepage is called for memory cleansing (sync_mode == 217WBC_SYNC_NONE) then its role is to get as much writeout underway as 218possible. So writepage should try to avoid blocking against 219currently-in-progress I/O. 220 221If the filesystem is not called for "sync" and it determines that it 222would need to block against in-progress I/O to be able to start new I/O 223against the page the filesystem should redirty the page with 224redirty_page_for_writepage(), then unlock the page and return zero. 225This may also be done to avoid internal deadlocks, but rarely. 226 227If the filesystem is called for sync then it must wait on any 228in-progress I/O and then start new I/O. 229 230The filesystem should unlock the page synchronously, before returning to the 231caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE 232value. WRITEPAGE_ACTIVATE means that page cannot really be written out 233currently, and VM should stop calling ->writepage() on this page for some 234time. VM does this by moving page to the head of the active list, hence the 235name. 236 237Unless the filesystem is going to redirty_page_for_writepage(), unlock the page 238and return zero, writepage *must* run set_page_writeback() against the page, 239followed by unlocking it. Once set_page_writeback() has been run against the 240page, write I/O can be submitted and the write I/O completion handler must run 241end_page_writeback() once the I/O is complete. If no I/O is submitted, the 242filesystem must run end_page_writeback() against the page before returning from 243writepage. 244 245That is: after 2.5.12, pages which are under writeout are *not* locked. Note, 246if the filesystem needs the page to be locked during writeout, that is ok, too, 247the page is allowed to be unlocked at any point in time between the calls to 248set_page_writeback() and end_page_writeback(). 249 250Note, failure to run either redirty_page_for_writepage() or the combination of 251set_page_writeback()/end_page_writeback() on a page submitted to writepage 252will leave the page itself marked clean but it will be tagged as dirty in the 253radix tree. This incoherency can lead to all sorts of hard-to-debug problems 254in the filesystem like having dirty inodes at umount and losing written data. 255 256 ->sync_page() locking rules are not well-defined - usually it is called 257with lock on page, but that is not guaranteed. Considering the currently 258existing instances of this method ->sync_page() itself doesn't look 259well-defined... 260 261 ->writepages() is used for periodic writeback and for syscall-initiated 262sync operations. The address_space should start I/O against at least 263*nr_to_write pages. *nr_to_write must be decremented for each page which is 264written. The address_space implementation may write more (or less) pages 265than *nr_to_write asks for, but it should try to be reasonably close. If 266nr_to_write is NULL, all dirty pages must be written. 267 268writepages should _only_ write pages which are present on 269mapping->io_pages. 270 271 ->set_page_dirty() is called from various places in the kernel 272when the target page is marked as needing writeback. It may be called 273under spinlock (it cannot block) and is sometimes called with the page 274not locked. 275 276 ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some 277filesystems and by the swapper. The latter will eventually go away. All 278instances do not actually need the BKL. Please, keep it that way and don't 279breed new callers. 280 281 ->invalidatepage() is called when the filesystem must attempt to drop 282some or all of the buffers from the page when it is being truncated. It 283returns zero on success. If ->invalidatepage is zero, the kernel uses 284block_invalidatepage() instead. 285 286 ->releasepage() is called when the kernel is about to try to drop the 287buffers from the page in preparation for freeing it. It returns zero to 288indicate that the buffers are (or may be) freeable. If ->releasepage is zero, 289the kernel assumes that the fs has no private interest in the buffers. 290 291 ->launder_page() may be called prior to releasing a page if 292it is still found to be dirty. It returns zero if the page was successfully 293cleaned, or an error value if not. Note that in order to prevent the page 294getting mapped back in and redirtied, it needs to be kept locked 295across the entire operation. 296 297 Note: currently almost all instances of address_space methods are 298using BKL for internal serialization and that's one of the worst sources 299of contention. Normally they are calling library functions (in fs/buffer.c) 300and pass foo_get_block() as a callback (on local block-based filesystems, 301indeed). BKL is not needed for library stuff and is usually taken by 302foo_get_block(). It's an overkill, since block bitmaps can be protected by 303internal fs locking and real critical areas are much smaller than the areas 304filesystems protect now. 305 306----------------------- file_lock_operations ------------------------------ 307prototypes: 308 void (*fl_insert)(struct file_lock *); /* lock insertion callback */ 309 void (*fl_remove)(struct file_lock *); /* lock removal callback */ 310 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 311 void (*fl_release_private)(struct file_lock *); 312 313 314locking rules: 315 BKL may block 316fl_insert: yes no 317fl_remove: yes no 318fl_copy_lock: yes no 319fl_release_private: yes yes 320 321----------------------- lock_manager_operations --------------------------- 322prototypes: 323 int (*fl_compare_owner)(struct file_lock *, struct file_lock *); 324 void (*fl_notify)(struct file_lock *); /* unblock callback */ 325 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 326 void (*fl_release_private)(struct file_lock *); 327 void (*fl_break)(struct file_lock *); /* break_lease callback */ 328 329locking rules: 330 BKL may block 331fl_compare_owner: yes no 332fl_notify: yes no 333fl_copy_lock: yes no 334fl_release_private: yes yes 335fl_break: yes no 336 337 Currently only NFSD and NLM provide instances of this class. None of the 338them block. If you have out-of-tree instances - please, show up. Locking 339in that area will change. 340--------------------------- buffer_head ----------------------------------- 341prototypes: 342 void (*b_end_io)(struct buffer_head *bh, int uptodate); 343 344locking rules: 345 called from interrupts. In other words, extreme care is needed here. 346bh is locked, but that's all warranties we have here. Currently only RAID1, 347highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices 348call this method upon the IO completion. 349 350--------------------------- block_device_operations ----------------------- 351prototypes: 352 int (*open) (struct inode *, struct file *); 353 int (*release) (struct inode *, struct file *); 354 int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); 355 int (*media_changed) (struct gendisk *); 356 int (*revalidate_disk) (struct gendisk *); 357 358locking rules: 359 BKL bd_sem 360open: yes yes 361release: yes yes 362ioctl: yes no 363media_changed: no no 364revalidate_disk: no no 365 366The last two are called only from check_disk_change(). 367 368--------------------------- file_operations ------------------------------- 369prototypes: 370 loff_t (*llseek) (struct file *, loff_t, int); 371 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 372 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 373 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 374 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 375 int (*readdir) (struct file *, void *, filldir_t); 376 unsigned int (*poll) (struct file *, struct poll_table_struct *); 377 int (*ioctl) (struct inode *, struct file *, unsigned int, 378 unsigned long); 379 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 380 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 381 int (*mmap) (struct file *, struct vm_area_struct *); 382 int (*open) (struct inode *, struct file *); 383 int (*flush) (struct file *); 384 int (*release) (struct inode *, struct file *); 385 int (*fsync) (struct file *, struct dentry *, int datasync); 386 int (*aio_fsync) (struct kiocb *, int datasync); 387 int (*fasync) (int, struct file *, int); 388 int (*lock) (struct file *, int, struct file_lock *); 389 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, 390 loff_t *); 391 ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, 392 loff_t *); 393 ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, 394 void __user *); 395 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, 396 loff_t *, int); 397 unsigned long (*get_unmapped_area)(struct file *, unsigned long, 398 unsigned long, unsigned long, unsigned long); 399 int (*check_flags)(int); 400 int (*dir_notify)(struct file *, unsigned long); 401}; 402 403locking rules: 404 All except ->poll() may block. 405 BKL 406llseek: no (see below) 407read: no 408aio_read: no 409write: no 410aio_write: no 411readdir: no 412poll: no 413ioctl: yes (see below) 414unlocked_ioctl: no (see below) 415compat_ioctl: no 416mmap: no 417open: maybe (see below) 418flush: no 419release: no 420fsync: no (see below) 421aio_fsync: no 422fasync: yes (see below) 423lock: yes 424readv: no 425writev: no 426sendfile: no 427sendpage: no 428get_unmapped_area: no 429check_flags: no 430dir_notify: no 431 432->llseek() locking has moved from llseek to the individual llseek 433implementations. If your fs is not using generic_file_llseek, you 434need to acquire and release the appropriate locks in your ->llseek(). 435For many filesystems, it is probably safe to acquire the inode 436semaphore. Note some filesystems (i.e. remote ones) provide no 437protection for i_size so you will need to use the BKL. 438 439->open() locking is in-transit: big lock partially moved into the methods. 440The only exception is ->open() in the instances of file_operations that never 441end up in ->i_fop/->proc_fops, i.e. ones that belong to character devices 442(chrdev_open() takes lock before replacing ->f_op and calling the secondary 443method. As soon as we fix the handling of module reference counters all 444instances of ->open() will be called without the BKL. 445 446Note: ext2_release() was *the* source of contention on fs-intensive 447loads and dropping BKL on ->release() helps to get rid of that (we still 448grab BKL for cases when we close a file that had been opened r/w, but that 449can and should be done using the internal locking with smaller critical areas). 450Current worst offender is ext2_get_block()... 451 452->fasync() is a mess. This area needs a big cleanup and that will probably 453affect locking. 454 455->readdir() and ->ioctl() on directories must be changed. Ideally we would 456move ->readdir() to inode_operations and use a separate method for directory 457->ioctl() or kill the latter completely. One of the problems is that for 458anything that resembles union-mount we won't have a struct file for all 459components. And there are other reasons why the current interface is a mess... 460 461->ioctl() on regular files is superceded by the ->unlocked_ioctl() that 462doesn't take the BKL. 463 464->read on directories probably must go away - we should just enforce -EISDIR 465in sys_read() and friends. 466 467->fsync() has i_mutex on inode. 468 469--------------------------- dquot_operations ------------------------------- 470prototypes: 471 int (*initialize) (struct inode *, int); 472 int (*drop) (struct inode *); 473 int (*alloc_space) (struct inode *, qsize_t, int); 474 int (*alloc_inode) (const struct inode *, unsigned long); 475 int (*free_space) (struct inode *, qsize_t); 476 int (*free_inode) (const struct inode *, unsigned long); 477 int (*transfer) (struct inode *, struct iattr *); 478 int (*write_dquot) (struct dquot *); 479 int (*acquire_dquot) (struct dquot *); 480 int (*release_dquot) (struct dquot *); 481 int (*mark_dirty) (struct dquot *); 482 int (*write_info) (struct super_block *, int); 483 484These operations are intended to be more or less wrapping functions that ensure 485a proper locking wrt the filesystem and call the generic quota operations. 486 487What filesystem should expect from the generic quota functions: 488 489 FS recursion Held locks when called 490initialize: yes maybe dqonoff_sem 491drop: yes - 492alloc_space: ->mark_dirty() - 493alloc_inode: ->mark_dirty() - 494free_space: ->mark_dirty() - 495free_inode: ->mark_dirty() - 496transfer: yes - 497write_dquot: yes dqonoff_sem or dqptr_sem 498acquire_dquot: yes dqonoff_sem or dqptr_sem 499release_dquot: yes dqonoff_sem or dqptr_sem 500mark_dirty: no - 501write_info: yes dqonoff_sem 502 503FS recursion means calling ->quota_read() and ->quota_write() from superblock 504operations. 505 506->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called 507only directly by the filesystem and do not call any fs functions only 508the ->mark_dirty() operation. 509 510More details about quota locking can be found in fs/dquot.c. 511 512--------------------------- vm_operations_struct ----------------------------- 513prototypes: 514 void (*open)(struct vm_area_struct*); 515 void (*close)(struct vm_area_struct*); 516 int (*fault)(struct vm_area_struct*, struct vm_fault *); 517 struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *); 518 int (*page_mkwrite)(struct vm_area_struct *, struct page *); 519 520locking rules: 521 BKL mmap_sem PageLocked(page) 522open: no yes 523close: no yes 524fault: no yes 525nopage: no yes 526page_mkwrite: no yes no 527 528 ->page_mkwrite() is called when a previously read-only page is 529about to become writeable. The file system is responsible for 530protecting against truncate races. Once appropriate action has been 531taking to lock out truncate, the page range should be verified to be 532within i_size. The page mapping should also be checked that it is not 533NULL. 534 535================================================================================ 536 Dubious stuff 537 538(if you break something or notice that it is broken and do not fix it yourself 539- at least put it here) 540 541ipc/shm.c::shm_delete() - may need BKL. 542->read() and ->write() in many drivers are (probably) missing BKL. 543drivers/sgi/char/graphics.c::sgi_graphics_nopage() - may need BKL.