at v2.6.22 529 lines 21 kB view raw
1 The text below describes the locking rules for VFS-related methods. 2It is (believed to be) up-to-date. *Please*, if you change anything in 3prototypes or locking protocols - update this file. And update the relevant 4instances in the tree, don't leave that to maintainers of filesystems/devices/ 5etc. At the very least, put the list of dubious cases in the end of this file. 6Don't turn it into log - maintainers of out-of-the-tree code are supposed to 7be able to use diff(1). 8 Thing currently missing here: socket operations. Alexey? 9 10--------------------------- dentry_operations -------------------------- 11prototypes: 12 int (*d_revalidate)(struct dentry *, int); 13 int (*d_hash) (struct dentry *, struct qstr *); 14 int (*d_compare) (struct dentry *, struct qstr *, struct qstr *); 15 int (*d_delete)(struct dentry *); 16 void (*d_release)(struct dentry *); 17 void (*d_iput)(struct dentry *, struct inode *); 18 char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); 19 20locking rules: 21 none have BKL 22 dcache_lock rename_lock ->d_lock may block 23d_revalidate: no no no yes 24d_hash no no no yes 25d_compare: no yes no no 26d_delete: yes no yes no 27d_release: no no no yes 28d_iput: no no no yes 29d_dname: no no no no 30 31--------------------------- inode_operations --------------------------- 32prototypes: 33 int (*create) (struct inode *,struct dentry *,int, struct nameidata *); 34 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid 35ata *); 36 int (*link) (struct dentry *,struct inode *,struct dentry *); 37 int (*unlink) (struct inode *,struct dentry *); 38 int (*symlink) (struct inode *,struct dentry *,const char *); 39 int (*mkdir) (struct inode *,struct dentry *,int); 40 int (*rmdir) (struct inode *,struct dentry *); 41 int (*mknod) (struct inode *,struct dentry *,int,dev_t); 42 int (*rename) (struct inode *, struct dentry *, 43 struct inode *, struct dentry *); 44 int (*readlink) (struct dentry *, char __user *,int); 45 int (*follow_link) (struct dentry *, struct nameidata *); 46 void (*truncate) (struct inode *); 47 int (*permission) (struct inode *, int, struct nameidata *); 48 int (*setattr) (struct dentry *, struct iattr *); 49 int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); 50 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 51 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 52 ssize_t (*listxattr) (struct dentry *, char *, size_t); 53 int (*removexattr) (struct dentry *, const char *); 54 55locking rules: 56 all may block, none have BKL 57 i_mutex(inode) 58lookup: yes 59create: yes 60link: yes (both) 61mknod: yes 62symlink: yes 63mkdir: yes 64unlink: yes (both) 65rmdir: yes (both) (see below) 66rename: yes (all) (see below) 67readlink: no 68follow_link: no 69truncate: yes (see below) 70setattr: yes 71permission: no 72getattr: no 73setxattr: yes 74getxattr: no 75listxattr: no 76removexattr: yes 77 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 78victim. 79 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 80 ->truncate() is never called directly - it's a callback, not a 81method. It's called by vmtruncate() - library function normally used by 82->setattr(). Locking information above applies to that call (i.e. is 83inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been 84passed). 85 86See Documentation/filesystems/directory-locking for more detailed discussion 87of the locking scheme for directory operations. 88 89--------------------------- super_operations --------------------------- 90prototypes: 91 struct inode *(*alloc_inode)(struct super_block *sb); 92 void (*destroy_inode)(struct inode *); 93 void (*read_inode) (struct inode *); 94 void (*dirty_inode) (struct inode *); 95 int (*write_inode) (struct inode *, int); 96 void (*put_inode) (struct inode *); 97 void (*drop_inode) (struct inode *); 98 void (*delete_inode) (struct inode *); 99 void (*put_super) (struct super_block *); 100 void (*write_super) (struct super_block *); 101 int (*sync_fs)(struct super_block *sb, int wait); 102 void (*write_super_lockfs) (struct super_block *); 103 void (*unlockfs) (struct super_block *); 104 int (*statfs) (struct dentry *, struct kstatfs *); 105 int (*remount_fs) (struct super_block *, int *, char *); 106 void (*clear_inode) (struct inode *); 107 void (*umount_begin) (struct super_block *); 108 int (*show_options)(struct seq_file *, struct vfsmount *); 109 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 110 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 111 112locking rules: 113 All may block. 114 BKL s_lock s_umount 115alloc_inode: no no no 116destroy_inode: no 117read_inode: no (see below) 118dirty_inode: no (must not sleep) 119write_inode: no 120put_inode: no 121drop_inode: no !!!inode_lock!!! 122delete_inode: no 123put_super: yes yes no 124write_super: no yes read 125sync_fs: no no read 126write_super_lockfs: ? 127unlockfs: ? 128statfs: no no no 129remount_fs: yes yes maybe (see below) 130clear_inode: no 131umount_begin: yes no no 132show_options: no (vfsmount->sem) 133quota_read: no no no (see below) 134quota_write: no no no (see below) 135 136->read_inode() is not a method - it's a callback used in iget(). 137->remount_fs() will have the s_umount lock if it's already mounted. 138When called from get_sb_single, it does NOT have the s_umount lock. 139->quota_read() and ->quota_write() functions are both guaranteed to 140be the only ones operating on the quota file by the quota code (via 141dqio_sem) (unless an admin really wants to screw up something and 142writes to quota files with quotas on). For other details about locking 143see also dquot_operations section. 144 145--------------------------- file_system_type --------------------------- 146prototypes: 147 int (*get_sb) (struct file_system_type *, int, 148 const char *, void *, struct vfsmount *); 149 void (*kill_sb) (struct super_block *); 150locking rules: 151 may block BKL 152get_sb yes yes 153kill_sb yes yes 154 155->get_sb() returns error or 0 with locked superblock attached to the vfsmount 156(exclusive on ->s_umount). 157->kill_sb() takes a write-locked superblock, does all shutdown work on it, 158unlocks and drops the reference. 159 160--------------------------- address_space_operations -------------------------- 161prototypes: 162 int (*writepage)(struct page *page, struct writeback_control *wbc); 163 int (*readpage)(struct file *, struct page *); 164 int (*sync_page)(struct page *); 165 int (*writepages)(struct address_space *, struct writeback_control *); 166 int (*set_page_dirty)(struct page *page); 167 int (*readpages)(struct file *filp, struct address_space *mapping, 168 struct list_head *pages, unsigned nr_pages); 169 int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); 170 int (*commit_write)(struct file *, struct page *, unsigned, unsigned); 171 sector_t (*bmap)(struct address_space *, sector_t); 172 int (*invalidatepage) (struct page *, unsigned long); 173 int (*releasepage) (struct page *, int); 174 int (*direct_IO)(int, struct kiocb *, const struct iovec *iov, 175 loff_t offset, unsigned long nr_segs); 176 int (*launder_page) (struct page *); 177 178locking rules: 179 All except set_page_dirty may block 180 181 BKL PageLocked(page) 182writepage: no yes, unlocks (see below) 183readpage: no yes, unlocks 184sync_page: no maybe 185writepages: no 186set_page_dirty no no 187readpages: no 188prepare_write: no yes 189commit_write: no yes 190bmap: yes 191invalidatepage: no yes 192releasepage: no yes 193direct_IO: no 194launder_page: no yes 195 196 ->prepare_write(), ->commit_write(), ->sync_page() and ->readpage() 197may be called from the request handler (/dev/loop). 198 199 ->readpage() unlocks the page, either synchronously or via I/O 200completion. 201 202 ->readpages() populates the pagecache with the passed pages and starts 203I/O against them. They come unlocked upon I/O completion. 204 205 ->writepage() is used for two purposes: for "memory cleansing" and for 206"sync". These are quite different operations and the behaviour may differ 207depending upon the mode. 208 209If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then 210it *must* start I/O against the page, even if that would involve 211blocking on in-progress I/O. 212 213If writepage is called for memory cleansing (sync_mode == 214WBC_SYNC_NONE) then its role is to get as much writeout underway as 215possible. So writepage should try to avoid blocking against 216currently-in-progress I/O. 217 218If the filesystem is not called for "sync" and it determines that it 219would need to block against in-progress I/O to be able to start new I/O 220against the page the filesystem should redirty the page with 221redirty_page_for_writepage(), then unlock the page and return zero. 222This may also be done to avoid internal deadlocks, but rarely. 223 224If the filesytem is called for sync then it must wait on any 225in-progress I/O and then start new I/O. 226 227The filesystem should unlock the page synchronously, before returning to the 228caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE 229value. WRITEPAGE_ACTIVATE means that page cannot really be written out 230currently, and VM should stop calling ->writepage() on this page for some 231time. VM does this by moving page to the head of the active list, hence the 232name. 233 234Unless the filesystem is going to redirty_page_for_writepage(), unlock the page 235and return zero, writepage *must* run set_page_writeback() against the page, 236followed by unlocking it. Once set_page_writeback() has been run against the 237page, write I/O can be submitted and the write I/O completion handler must run 238end_page_writeback() once the I/O is complete. If no I/O is submitted, the 239filesystem must run end_page_writeback() against the page before returning from 240writepage. 241 242That is: after 2.5.12, pages which are under writeout are *not* locked. Note, 243if the filesystem needs the page to be locked during writeout, that is ok, too, 244the page is allowed to be unlocked at any point in time between the calls to 245set_page_writeback() and end_page_writeback(). 246 247Note, failure to run either redirty_page_for_writepage() or the combination of 248set_page_writeback()/end_page_writeback() on a page submitted to writepage 249will leave the page itself marked clean but it will be tagged as dirty in the 250radix tree. This incoherency can lead to all sorts of hard-to-debug problems 251in the filesystem like having dirty inodes at umount and losing written data. 252 253 ->sync_page() locking rules are not well-defined - usually it is called 254with lock on page, but that is not guaranteed. Considering the currently 255existing instances of this method ->sync_page() itself doesn't look 256well-defined... 257 258 ->writepages() is used for periodic writeback and for syscall-initiated 259sync operations. The address_space should start I/O against at least 260*nr_to_write pages. *nr_to_write must be decremented for each page which is 261written. The address_space implementation may write more (or less) pages 262than *nr_to_write asks for, but it should try to be reasonably close. If 263nr_to_write is NULL, all dirty pages must be written. 264 265writepages should _only_ write pages which are present on 266mapping->io_pages. 267 268 ->set_page_dirty() is called from various places in the kernel 269when the target page is marked as needing writeback. It may be called 270under spinlock (it cannot block) and is sometimes called with the page 271not locked. 272 273 ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some 274filesystems and by the swapper. The latter will eventually go away. All 275instances do not actually need the BKL. Please, keep it that way and don't 276breed new callers. 277 278 ->invalidatepage() is called when the filesystem must attempt to drop 279some or all of the buffers from the page when it is being truncated. It 280returns zero on success. If ->invalidatepage is zero, the kernel uses 281block_invalidatepage() instead. 282 283 ->releasepage() is called when the kernel is about to try to drop the 284buffers from the page in preparation for freeing it. It returns zero to 285indicate that the buffers are (or may be) freeable. If ->releasepage is zero, 286the kernel assumes that the fs has no private interest in the buffers. 287 288 ->launder_page() may be called prior to releasing a page if 289it is still found to be dirty. It returns zero if the page was successfully 290cleaned, or an error value if not. Note that in order to prevent the page 291getting mapped back in and redirtied, it needs to be kept locked 292across the entire operation. 293 294 Note: currently almost all instances of address_space methods are 295using BKL for internal serialization and that's one of the worst sources 296of contention. Normally they are calling library functions (in fs/buffer.c) 297and pass foo_get_block() as a callback (on local block-based filesystems, 298indeed). BKL is not needed for library stuff and is usually taken by 299foo_get_block(). It's an overkill, since block bitmaps can be protected by 300internal fs locking and real critical areas are much smaller than the areas 301filesystems protect now. 302 303----------------------- file_lock_operations ------------------------------ 304prototypes: 305 void (*fl_insert)(struct file_lock *); /* lock insertion callback */ 306 void (*fl_remove)(struct file_lock *); /* lock removal callback */ 307 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 308 void (*fl_release_private)(struct file_lock *); 309 310 311locking rules: 312 BKL may block 313fl_insert: yes no 314fl_remove: yes no 315fl_copy_lock: yes no 316fl_release_private: yes yes 317 318----------------------- lock_manager_operations --------------------------- 319prototypes: 320 int (*fl_compare_owner)(struct file_lock *, struct file_lock *); 321 void (*fl_notify)(struct file_lock *); /* unblock callback */ 322 void (*fl_copy_lock)(struct file_lock *, struct file_lock *); 323 void (*fl_release_private)(struct file_lock *); 324 void (*fl_break)(struct file_lock *); /* break_lease callback */ 325 326locking rules: 327 BKL may block 328fl_compare_owner: yes no 329fl_notify: yes no 330fl_copy_lock: yes no 331fl_release_private: yes yes 332fl_break: yes no 333 334 Currently only NFSD and NLM provide instances of this class. None of the 335them block. If you have out-of-tree instances - please, show up. Locking 336in that area will change. 337--------------------------- buffer_head ----------------------------------- 338prototypes: 339 void (*b_end_io)(struct buffer_head *bh, int uptodate); 340 341locking rules: 342 called from interrupts. In other words, extreme care is needed here. 343bh is locked, but that's all warranties we have here. Currently only RAID1, 344highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices 345call this method upon the IO completion. 346 347--------------------------- block_device_operations ----------------------- 348prototypes: 349 int (*open) (struct inode *, struct file *); 350 int (*release) (struct inode *, struct file *); 351 int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); 352 int (*media_changed) (struct gendisk *); 353 int (*revalidate_disk) (struct gendisk *); 354 355locking rules: 356 BKL bd_sem 357open: yes yes 358release: yes yes 359ioctl: yes no 360media_changed: no no 361revalidate_disk: no no 362 363The last two are called only from check_disk_change(). 364 365--------------------------- file_operations ------------------------------- 366prototypes: 367 loff_t (*llseek) (struct file *, loff_t, int); 368 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); 369 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); 370 ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 371 ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); 372 int (*readdir) (struct file *, void *, filldir_t); 373 unsigned int (*poll) (struct file *, struct poll_table_struct *); 374 int (*ioctl) (struct inode *, struct file *, unsigned int, 375 unsigned long); 376 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 377 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 378 int (*mmap) (struct file *, struct vm_area_struct *); 379 int (*open) (struct inode *, struct file *); 380 int (*flush) (struct file *); 381 int (*release) (struct inode *, struct file *); 382 int (*fsync) (struct file *, struct dentry *, int datasync); 383 int (*aio_fsync) (struct kiocb *, int datasync); 384 int (*fasync) (int, struct file *, int); 385 int (*lock) (struct file *, int, struct file_lock *); 386 ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, 387 loff_t *); 388 ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, 389 loff_t *); 390 ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, 391 void __user *); 392 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, 393 loff_t *, int); 394 unsigned long (*get_unmapped_area)(struct file *, unsigned long, 395 unsigned long, unsigned long, unsigned long); 396 int (*check_flags)(int); 397 int (*dir_notify)(struct file *, unsigned long); 398}; 399 400locking rules: 401 All except ->poll() may block. 402 BKL 403llseek: no (see below) 404read: no 405aio_read: no 406write: no 407aio_write: no 408readdir: no 409poll: no 410ioctl: yes (see below) 411unlocked_ioctl: no (see below) 412compat_ioctl: no 413mmap: no 414open: maybe (see below) 415flush: no 416release: no 417fsync: no (see below) 418aio_fsync: no 419fasync: yes (see below) 420lock: yes 421readv: no 422writev: no 423sendfile: no 424sendpage: no 425get_unmapped_area: no 426check_flags: no 427dir_notify: no 428 429->llseek() locking has moved from llseek to the individual llseek 430implementations. If your fs is not using generic_file_llseek, you 431need to acquire and release the appropriate locks in your ->llseek(). 432For many filesystems, it is probably safe to acquire the inode 433semaphore. Note some filesystems (i.e. remote ones) provide no 434protection for i_size so you will need to use the BKL. 435 436->open() locking is in-transit: big lock partially moved into the methods. 437The only exception is ->open() in the instances of file_operations that never 438end up in ->i_fop/->proc_fops, i.e. ones that belong to character devices 439(chrdev_open() takes lock before replacing ->f_op and calling the secondary 440method. As soon as we fix the handling of module reference counters all 441instances of ->open() will be called without the BKL. 442 443Note: ext2_release() was *the* source of contention on fs-intensive 444loads and dropping BKL on ->release() helps to get rid of that (we still 445grab BKL for cases when we close a file that had been opened r/w, but that 446can and should be done using the internal locking with smaller critical areas). 447Current worst offender is ext2_get_block()... 448 449->fasync() is a mess. This area needs a big cleanup and that will probably 450affect locking. 451 452->readdir() and ->ioctl() on directories must be changed. Ideally we would 453move ->readdir() to inode_operations and use a separate method for directory 454->ioctl() or kill the latter completely. One of the problems is that for 455anything that resembles union-mount we won't have a struct file for all 456components. And there are other reasons why the current interface is a mess... 457 458->ioctl() on regular files is superceded by the ->unlocked_ioctl() that 459doesn't take the BKL. 460 461->read on directories probably must go away - we should just enforce -EISDIR 462in sys_read() and friends. 463 464->fsync() has i_mutex on inode. 465 466--------------------------- dquot_operations ------------------------------- 467prototypes: 468 int (*initialize) (struct inode *, int); 469 int (*drop) (struct inode *); 470 int (*alloc_space) (struct inode *, qsize_t, int); 471 int (*alloc_inode) (const struct inode *, unsigned long); 472 int (*free_space) (struct inode *, qsize_t); 473 int (*free_inode) (const struct inode *, unsigned long); 474 int (*transfer) (struct inode *, struct iattr *); 475 int (*write_dquot) (struct dquot *); 476 int (*acquire_dquot) (struct dquot *); 477 int (*release_dquot) (struct dquot *); 478 int (*mark_dirty) (struct dquot *); 479 int (*write_info) (struct super_block *, int); 480 481These operations are intended to be more or less wrapping functions that ensure 482a proper locking wrt the filesystem and call the generic quota operations. 483 484What filesystem should expect from the generic quota functions: 485 486 FS recursion Held locks when called 487initialize: yes maybe dqonoff_sem 488drop: yes - 489alloc_space: ->mark_dirty() - 490alloc_inode: ->mark_dirty() - 491free_space: ->mark_dirty() - 492free_inode: ->mark_dirty() - 493transfer: yes - 494write_dquot: yes dqonoff_sem or dqptr_sem 495acquire_dquot: yes dqonoff_sem or dqptr_sem 496release_dquot: yes dqonoff_sem or dqptr_sem 497mark_dirty: no - 498write_info: yes dqonoff_sem 499 500FS recursion means calling ->quota_read() and ->quota_write() from superblock 501operations. 502 503->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called 504only directly by the filesystem and do not call any fs functions only 505the ->mark_dirty() operation. 506 507More details about quota locking can be found in fs/dquot.c. 508 509--------------------------- vm_operations_struct ----------------------------- 510prototypes: 511 void (*open)(struct vm_area_struct*); 512 void (*close)(struct vm_area_struct*); 513 struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *); 514 515locking rules: 516 BKL mmap_sem 517open: no yes 518close: no yes 519nopage: no yes 520 521================================================================================ 522 Dubious stuff 523 524(if you break something or notice that it is broken and do not fix it yourself 525- at least put it here) 526 527ipc/shm.c::shm_delete() - may need BKL. 528->read() and ->write() in many drivers are (probably) missing BKL. 529drivers/sgi/char/graphics.c::sgi_graphics_nopage() - may need BKL.