Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at aca0b510cdbf81d52e15014a720be2b8dfd26aea 1080 lines 27 kB view raw
1/* 2 * fs/inotify.c - inode-based file event notifications 3 * 4 * Authors: 5 * John McCutchan <ttb@tentacle.dhs.org> 6 * Robert Love <rml@novell.com> 7 * 8 * Copyright (C) 2005 John McCutchan 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2, or (at your option) any 13 * later version. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 */ 20 21#include <linux/module.h> 22#include <linux/kernel.h> 23#include <linux/sched.h> 24#include <linux/spinlock.h> 25#include <linux/idr.h> 26#include <linux/slab.h> 27#include <linux/fs.h> 28#include <linux/file.h> 29#include <linux/mount.h> 30#include <linux/namei.h> 31#include <linux/poll.h> 32#include <linux/init.h> 33#include <linux/list.h> 34#include <linux/writeback.h> 35#include <linux/inotify.h> 36#include <linux/syscalls.h> 37 38#include <asm/ioctls.h> 39 40static atomic_t inotify_cookie; 41static atomic_t inotify_watches; 42 43static kmem_cache_t *watch_cachep; 44static kmem_cache_t *event_cachep; 45 46static struct vfsmount *inotify_mnt; 47 48/* these are configurable via /proc/sys/fs/inotify/ */ 49int inotify_max_user_instances; 50int inotify_max_user_watches; 51int inotify_max_queued_events; 52 53/* 54 * Lock ordering: 55 * 56 * dentry->d_lock (used to keep d_move() away from dentry->d_parent) 57 * iprune_sem (synchronize shrink_icache_memory()) 58 * inode_lock (protects the super_block->s_inodes list) 59 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) 60 * inotify_dev->sem (protects inotify_device and watches->d_list) 61 */ 62 63/* 64 * Lifetimes of the three main data structures--inotify_device, inode, and 65 * inotify_watch--are managed by reference count. 66 * 67 * inotify_device: Lifetime is from inotify_init() until release. Additional 68 * references can bump the count via get_inotify_dev() and drop the count via 69 * put_inotify_dev(). 70 * 71 * inotify_watch: Lifetime is from create_watch() to destory_watch(). 72 * Additional references can bump the count via get_inotify_watch() and drop 73 * the count via put_inotify_watch(). 74 * 75 * inode: Pinned so long as the inode is associated with a watch, from 76 * create_watch() to put_inotify_watch(). 77 */ 78 79/* 80 * struct inotify_device - represents an inotify instance 81 * 82 * This structure is protected by the semaphore 'sem'. 83 */ 84struct inotify_device { 85 wait_queue_head_t wq; /* wait queue for i/o */ 86 struct idr idr; /* idr mapping wd -> watch */ 87 struct semaphore sem; /* protects this bad boy */ 88 struct list_head events; /* list of queued events */ 89 struct list_head watches; /* list of watches */ 90 atomic_t count; /* reference count */ 91 struct user_struct *user; /* user who opened this dev */ 92 unsigned int queue_size; /* size of the queue (bytes) */ 93 unsigned int event_count; /* number of pending events */ 94 unsigned int max_events; /* maximum number of events */ 95 u32 last_wd; /* the last wd allocated */ 96}; 97 98/* 99 * struct inotify_kernel_event - An inotify event, originating from a watch and 100 * queued for user-space. A list of these is attached to each instance of the 101 * device. In read(), this list is walked and all events that can fit in the 102 * buffer are returned. 103 * 104 * Protected by dev->sem of the device in which we are queued. 105 */ 106struct inotify_kernel_event { 107 struct inotify_event event; /* the user-space event */ 108 struct list_head list; /* entry in inotify_device's list */ 109 char *name; /* filename, if any */ 110}; 111 112/* 113 * struct inotify_watch - represents a watch request on a specific inode 114 * 115 * d_list is protected by dev->sem of the associated watch->dev. 116 * i_list and mask are protected by inode->inotify_sem of the associated inode. 117 * dev, inode, and wd are never written to once the watch is created. 118 */ 119struct inotify_watch { 120 struct list_head d_list; /* entry in inotify_device's list */ 121 struct list_head i_list; /* entry in inode's list */ 122 atomic_t count; /* reference count */ 123 struct inotify_device *dev; /* associated device */ 124 struct inode *inode; /* associated inode */ 125 s32 wd; /* watch descriptor */ 126 u32 mask; /* event mask for this watch */ 127}; 128 129#ifdef CONFIG_SYSCTL 130 131#include <linux/sysctl.h> 132 133static int zero; 134 135ctl_table inotify_table[] = { 136 { 137 .ctl_name = INOTIFY_MAX_USER_INSTANCES, 138 .procname = "max_user_instances", 139 .data = &inotify_max_user_instances, 140 .maxlen = sizeof(int), 141 .mode = 0644, 142 .proc_handler = &proc_dointvec_minmax, 143 .strategy = &sysctl_intvec, 144 .extra1 = &zero, 145 }, 146 { 147 .ctl_name = INOTIFY_MAX_USER_WATCHES, 148 .procname = "max_user_watches", 149 .data = &inotify_max_user_watches, 150 .maxlen = sizeof(int), 151 .mode = 0644, 152 .proc_handler = &proc_dointvec_minmax, 153 .strategy = &sysctl_intvec, 154 .extra1 = &zero, 155 }, 156 { 157 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, 158 .procname = "max_queued_events", 159 .data = &inotify_max_queued_events, 160 .maxlen = sizeof(int), 161 .mode = 0644, 162 .proc_handler = &proc_dointvec_minmax, 163 .strategy = &sysctl_intvec, 164 .extra1 = &zero 165 }, 166 { .ctl_name = 0 } 167}; 168#endif /* CONFIG_SYSCTL */ 169 170static inline void get_inotify_dev(struct inotify_device *dev) 171{ 172 atomic_inc(&dev->count); 173} 174 175static inline void put_inotify_dev(struct inotify_device *dev) 176{ 177 if (atomic_dec_and_test(&dev->count)) { 178 atomic_dec(&dev->user->inotify_devs); 179 free_uid(dev->user); 180 idr_destroy(&dev->idr); 181 kfree(dev); 182 } 183} 184 185static inline void get_inotify_watch(struct inotify_watch *watch) 186{ 187 atomic_inc(&watch->count); 188} 189 190/* 191 * put_inotify_watch - decrements the ref count on a given watch. cleans up 192 * the watch and its references if the count reaches zero. 193 */ 194static inline void put_inotify_watch(struct inotify_watch *watch) 195{ 196 if (atomic_dec_and_test(&watch->count)) { 197 put_inotify_dev(watch->dev); 198 iput(watch->inode); 199 kmem_cache_free(watch_cachep, watch); 200 } 201} 202 203/* 204 * kernel_event - create a new kernel event with the given parameters 205 * 206 * This function can sleep. 207 */ 208static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, 209 const char *name) 210{ 211 struct inotify_kernel_event *kevent; 212 213 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); 214 if (unlikely(!kevent)) 215 return NULL; 216 217 /* we hand this out to user-space, so zero it just in case */ 218 memset(&kevent->event, 0, sizeof(struct inotify_event)); 219 220 kevent->event.wd = wd; 221 kevent->event.mask = mask; 222 kevent->event.cookie = cookie; 223 224 INIT_LIST_HEAD(&kevent->list); 225 226 if (name) { 227 size_t len, rem, event_size = sizeof(struct inotify_event); 228 229 /* 230 * We need to pad the filename so as to properly align an 231 * array of inotify_event structures. Because the structure is 232 * small and the common case is a small filename, we just round 233 * up to the next multiple of the structure's sizeof. This is 234 * simple and safe for all architectures. 235 */ 236 len = strlen(name) + 1; 237 rem = event_size - len; 238 if (len > event_size) { 239 rem = event_size - (len % event_size); 240 if (len % event_size == 0) 241 rem = 0; 242 } 243 244 kevent->name = kmalloc(len + rem, GFP_KERNEL); 245 if (unlikely(!kevent->name)) { 246 kmem_cache_free(event_cachep, kevent); 247 return NULL; 248 } 249 memcpy(kevent->name, name, len); 250 if (rem) 251 memset(kevent->name + len, 0, rem); 252 kevent->event.len = len + rem; 253 } else { 254 kevent->event.len = 0; 255 kevent->name = NULL; 256 } 257 258 return kevent; 259} 260 261/* 262 * inotify_dev_get_event - return the next event in the given dev's queue 263 * 264 * Caller must hold dev->sem. 265 */ 266static inline struct inotify_kernel_event * 267inotify_dev_get_event(struct inotify_device *dev) 268{ 269 return list_entry(dev->events.next, struct inotify_kernel_event, list); 270} 271 272/* 273 * inotify_dev_queue_event - add a new event to the given device 274 * 275 * Caller must hold dev->sem. Can sleep (calls kernel_event()). 276 */ 277static void inotify_dev_queue_event(struct inotify_device *dev, 278 struct inotify_watch *watch, u32 mask, 279 u32 cookie, const char *name) 280{ 281 struct inotify_kernel_event *kevent, *last; 282 283 /* coalescing: drop this event if it is a dupe of the previous */ 284 last = inotify_dev_get_event(dev); 285 if (last && last->event.mask == mask && last->event.wd == watch->wd && 286 last->event.cookie == cookie) { 287 const char *lastname = last->name; 288 289 if (!name && !lastname) 290 return; 291 if (name && lastname && !strcmp(lastname, name)) 292 return; 293 } 294 295 /* the queue overflowed and we already sent the Q_OVERFLOW event */ 296 if (unlikely(dev->event_count > dev->max_events)) 297 return; 298 299 /* if the queue overflows, we need to notify user space */ 300 if (unlikely(dev->event_count == dev->max_events)) 301 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); 302 else 303 kevent = kernel_event(watch->wd, mask, cookie, name); 304 305 if (unlikely(!kevent)) 306 return; 307 308 /* queue the event and wake up anyone waiting */ 309 dev->event_count++; 310 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; 311 list_add_tail(&kevent->list, &dev->events); 312 wake_up_interruptible(&dev->wq); 313} 314 315/* 316 * remove_kevent - cleans up and ultimately frees the given kevent 317 * 318 * Caller must hold dev->sem. 319 */ 320static void remove_kevent(struct inotify_device *dev, 321 struct inotify_kernel_event *kevent) 322{ 323 list_del(&kevent->list); 324 325 dev->event_count--; 326 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; 327 328 kfree(kevent->name); 329 kmem_cache_free(event_cachep, kevent); 330} 331 332/* 333 * inotify_dev_event_dequeue - destroy an event on the given device 334 * 335 * Caller must hold dev->sem. 336 */ 337static void inotify_dev_event_dequeue(struct inotify_device *dev) 338{ 339 if (!list_empty(&dev->events)) { 340 struct inotify_kernel_event *kevent; 341 kevent = inotify_dev_get_event(dev); 342 remove_kevent(dev, kevent); 343 } 344} 345 346/* 347 * inotify_dev_get_wd - returns the next WD for use by the given dev 348 * 349 * Callers must hold dev->sem. This function can sleep. 350 */ 351static int inotify_dev_get_wd(struct inotify_device *dev, 352 struct inotify_watch *watch) 353{ 354 int ret; 355 356 do { 357 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) 358 return -ENOSPC; 359 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd); 360 } while (ret == -EAGAIN); 361 362 return ret; 363} 364 365/* 366 * find_inode - resolve a user-given path to a specific inode and return a nd 367 */ 368static int find_inode(const char __user *dirname, struct nameidata *nd, 369 unsigned flags) 370{ 371 int error; 372 373 error = __user_walk(dirname, flags, nd); 374 if (error) 375 return error; 376 /* you can only watch an inode if you have read permissions on it */ 377 error = vfs_permission(nd, MAY_READ); 378 if (error) 379 path_release(nd); 380 return error; 381} 382 383/* 384 * create_watch - creates a watch on the given device. 385 * 386 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. 387 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. 388 */ 389static struct inotify_watch *create_watch(struct inotify_device *dev, 390 u32 mask, struct inode *inode) 391{ 392 struct inotify_watch *watch; 393 int ret; 394 395 if (atomic_read(&dev->user->inotify_watches) >= 396 inotify_max_user_watches) 397 return ERR_PTR(-ENOSPC); 398 399 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); 400 if (unlikely(!watch)) 401 return ERR_PTR(-ENOMEM); 402 403 ret = inotify_dev_get_wd(dev, watch); 404 if (unlikely(ret)) { 405 kmem_cache_free(watch_cachep, watch); 406 return ERR_PTR(ret); 407 } 408 409 dev->last_wd = watch->wd; 410 watch->mask = mask; 411 atomic_set(&watch->count, 0); 412 INIT_LIST_HEAD(&watch->d_list); 413 INIT_LIST_HEAD(&watch->i_list); 414 415 /* save a reference to device and bump the count to make it official */ 416 get_inotify_dev(dev); 417 watch->dev = dev; 418 419 /* 420 * Save a reference to the inode and bump the ref count to make it 421 * official. We hold a reference to nameidata, which makes this safe. 422 */ 423 watch->inode = igrab(inode); 424 425 /* bump our own count, corresponding to our entry in dev->watches */ 426 get_inotify_watch(watch); 427 428 atomic_inc(&dev->user->inotify_watches); 429 atomic_inc(&inotify_watches); 430 431 return watch; 432} 433 434/* 435 * inotify_find_dev - find the watch associated with the given inode and dev 436 * 437 * Callers must hold inode->inotify_sem. 438 */ 439static struct inotify_watch *inode_find_dev(struct inode *inode, 440 struct inotify_device *dev) 441{ 442 struct inotify_watch *watch; 443 444 list_for_each_entry(watch, &inode->inotify_watches, i_list) { 445 if (watch->dev == dev) 446 return watch; 447 } 448 449 return NULL; 450} 451 452/* 453 * remove_watch_no_event - remove_watch() without the IN_IGNORED event. 454 */ 455static void remove_watch_no_event(struct inotify_watch *watch, 456 struct inotify_device *dev) 457{ 458 list_del(&watch->i_list); 459 list_del(&watch->d_list); 460 461 atomic_dec(&dev->user->inotify_watches); 462 atomic_dec(&inotify_watches); 463 idr_remove(&dev->idr, watch->wd); 464 put_inotify_watch(watch); 465} 466 467/* 468 * remove_watch - Remove a watch from both the device and the inode. Sends 469 * the IN_IGNORED event to the given device signifying that the inode is no 470 * longer watched. 471 * 472 * Callers must hold both inode->inotify_sem and dev->sem. We drop a 473 * reference to the inode before returning. 474 * 475 * The inode is not iput() so as to remain atomic. If the inode needs to be 476 * iput(), the call returns one. Otherwise, it returns zero. 477 */ 478static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) 479{ 480 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); 481 remove_watch_no_event(watch, dev); 482} 483 484/* 485 * inotify_inode_watched - returns nonzero if there are watches on this inode 486 * and zero otherwise. We call this lockless, we do not care if we race. 487 */ 488static inline int inotify_inode_watched(struct inode *inode) 489{ 490 return !list_empty(&inode->inotify_watches); 491} 492 493/* Kernel API */ 494 495/** 496 * inotify_inode_queue_event - queue an event to all watches on this inode 497 * @inode: inode event is originating from 498 * @mask: event mask describing this event 499 * @cookie: cookie for synchronization, or zero 500 * @name: filename, if any 501 */ 502void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, 503 const char *name) 504{ 505 struct inotify_watch *watch, *next; 506 507 if (!inotify_inode_watched(inode)) 508 return; 509 510 down(&inode->inotify_sem); 511 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 512 u32 watch_mask = watch->mask; 513 if (watch_mask & mask) { 514 struct inotify_device *dev = watch->dev; 515 get_inotify_watch(watch); 516 down(&dev->sem); 517 inotify_dev_queue_event(dev, watch, mask, cookie, name); 518 if (watch_mask & IN_ONESHOT) 519 remove_watch_no_event(watch, dev); 520 up(&dev->sem); 521 put_inotify_watch(watch); 522 } 523 } 524 up(&inode->inotify_sem); 525} 526EXPORT_SYMBOL_GPL(inotify_inode_queue_event); 527 528/** 529 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent 530 * @dentry: the dentry in question, we queue against this dentry's parent 531 * @mask: event mask describing this event 532 * @cookie: cookie for synchronization, or zero 533 * @name: filename, if any 534 */ 535void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, 536 u32 cookie, const char *name) 537{ 538 struct dentry *parent; 539 struct inode *inode; 540 541 if (!atomic_read (&inotify_watches)) 542 return; 543 544 spin_lock(&dentry->d_lock); 545 parent = dentry->d_parent; 546 inode = parent->d_inode; 547 548 if (inotify_inode_watched(inode)) { 549 dget(parent); 550 spin_unlock(&dentry->d_lock); 551 inotify_inode_queue_event(inode, mask, cookie, name); 552 dput(parent); 553 } else 554 spin_unlock(&dentry->d_lock); 555} 556EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); 557 558/** 559 * inotify_get_cookie - return a unique cookie for use in synchronizing events. 560 */ 561u32 inotify_get_cookie(void) 562{ 563 return atomic_inc_return(&inotify_cookie); 564} 565EXPORT_SYMBOL_GPL(inotify_get_cookie); 566 567/** 568 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. 569 * @list: list of inodes being unmounted (sb->s_inodes) 570 * 571 * Called with inode_lock held, protecting the unmounting super block's list 572 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. 573 * We temporarily drop inode_lock, however, and CAN block. 574 */ 575void inotify_unmount_inodes(struct list_head *list) 576{ 577 struct inode *inode, *next_i, *need_iput = NULL; 578 579 list_for_each_entry_safe(inode, next_i, list, i_sb_list) { 580 struct inotify_watch *watch, *next_w; 581 struct inode *need_iput_tmp; 582 struct list_head *watches; 583 584 /* 585 * If i_count is zero, the inode cannot have any watches and 586 * doing an __iget/iput with MS_ACTIVE clear would actually 587 * evict all inodes with zero i_count from icache which is 588 * unnecessarily violent and may in fact be illegal to do. 589 */ 590 if (!atomic_read(&inode->i_count)) 591 continue; 592 593 /* 594 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or 595 * I_WILL_FREE which is fine because by that point the inode 596 * cannot have any associated watches. 597 */ 598 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) 599 continue; 600 601 need_iput_tmp = need_iput; 602 need_iput = NULL; 603 /* In case the remove_watch() drops a reference. */ 604 if (inode != need_iput_tmp) 605 __iget(inode); 606 else 607 need_iput_tmp = NULL; 608 /* In case the dropping of a reference would nuke next_i. */ 609 if ((&next_i->i_sb_list != list) && 610 atomic_read(&next_i->i_count) && 611 !(next_i->i_state & (I_CLEAR | I_FREEING | 612 I_WILL_FREE))) { 613 __iget(next_i); 614 need_iput = next_i; 615 } 616 617 /* 618 * We can safely drop inode_lock here because we hold 619 * references on both inode and next_i. Also no new inodes 620 * will be added since the umount has begun. Finally, 621 * iprune_sem keeps shrink_icache_memory() away. 622 */ 623 spin_unlock(&inode_lock); 624 625 if (need_iput_tmp) 626 iput(need_iput_tmp); 627 628 /* for each watch, send IN_UNMOUNT and then remove it */ 629 down(&inode->inotify_sem); 630 watches = &inode->inotify_watches; 631 list_for_each_entry_safe(watch, next_w, watches, i_list) { 632 struct inotify_device *dev = watch->dev; 633 down(&dev->sem); 634 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); 635 remove_watch(watch, dev); 636 up(&dev->sem); 637 } 638 up(&inode->inotify_sem); 639 iput(inode); 640 641 spin_lock(&inode_lock); 642 } 643} 644EXPORT_SYMBOL_GPL(inotify_unmount_inodes); 645 646/** 647 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches 648 * @inode: inode that is about to be removed 649 */ 650void inotify_inode_is_dead(struct inode *inode) 651{ 652 struct inotify_watch *watch, *next; 653 654 down(&inode->inotify_sem); 655 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { 656 struct inotify_device *dev = watch->dev; 657 down(&dev->sem); 658 remove_watch(watch, dev); 659 up(&dev->sem); 660 } 661 up(&inode->inotify_sem); 662} 663EXPORT_SYMBOL_GPL(inotify_inode_is_dead); 664 665/* Device Interface */ 666 667static unsigned int inotify_poll(struct file *file, poll_table *wait) 668{ 669 struct inotify_device *dev = file->private_data; 670 int ret = 0; 671 672 poll_wait(file, &dev->wq, wait); 673 down(&dev->sem); 674 if (!list_empty(&dev->events)) 675 ret = POLLIN | POLLRDNORM; 676 up(&dev->sem); 677 678 return ret; 679} 680 681static ssize_t inotify_read(struct file *file, char __user *buf, 682 size_t count, loff_t *pos) 683{ 684 size_t event_size = sizeof (struct inotify_event); 685 struct inotify_device *dev; 686 char __user *start; 687 int ret; 688 DEFINE_WAIT(wait); 689 690 start = buf; 691 dev = file->private_data; 692 693 while (1) { 694 int events; 695 696 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 697 698 down(&dev->sem); 699 events = !list_empty(&dev->events); 700 up(&dev->sem); 701 if (events) { 702 ret = 0; 703 break; 704 } 705 706 if (file->f_flags & O_NONBLOCK) { 707 ret = -EAGAIN; 708 break; 709 } 710 711 if (signal_pending(current)) { 712 ret = -EINTR; 713 break; 714 } 715 716 schedule(); 717 } 718 719 finish_wait(&dev->wq, &wait); 720 if (ret) 721 return ret; 722 723 down(&dev->sem); 724 while (1) { 725 struct inotify_kernel_event *kevent; 726 727 ret = buf - start; 728 if (list_empty(&dev->events)) 729 break; 730 731 kevent = inotify_dev_get_event(dev); 732 if (event_size + kevent->event.len > count) 733 break; 734 735 if (copy_to_user(buf, &kevent->event, event_size)) { 736 ret = -EFAULT; 737 break; 738 } 739 buf += event_size; 740 count -= event_size; 741 742 if (kevent->name) { 743 if (copy_to_user(buf, kevent->name, kevent->event.len)){ 744 ret = -EFAULT; 745 break; 746 } 747 buf += kevent->event.len; 748 count -= kevent->event.len; 749 } 750 751 remove_kevent(dev, kevent); 752 } 753 up(&dev->sem); 754 755 return ret; 756} 757 758static int inotify_release(struct inode *ignored, struct file *file) 759{ 760 struct inotify_device *dev = file->private_data; 761 762 /* 763 * Destroy all of the watches on this device. Unfortunately, not very 764 * pretty. We cannot do a simple iteration over the list, because we 765 * do not know the inode until we iterate to the watch. But we need to 766 * hold inode->inotify_sem before dev->sem. The following works. 767 */ 768 while (1) { 769 struct inotify_watch *watch; 770 struct list_head *watches; 771 struct inode *inode; 772 773 down(&dev->sem); 774 watches = &dev->watches; 775 if (list_empty(watches)) { 776 up(&dev->sem); 777 break; 778 } 779 watch = list_entry(watches->next, struct inotify_watch, d_list); 780 get_inotify_watch(watch); 781 up(&dev->sem); 782 783 inode = watch->inode; 784 down(&inode->inotify_sem); 785 down(&dev->sem); 786 remove_watch_no_event(watch, dev); 787 up(&dev->sem); 788 up(&inode->inotify_sem); 789 put_inotify_watch(watch); 790 } 791 792 /* destroy all of the events on this device */ 793 down(&dev->sem); 794 while (!list_empty(&dev->events)) 795 inotify_dev_event_dequeue(dev); 796 up(&dev->sem); 797 798 /* free this device: the put matching the get in inotify_init() */ 799 put_inotify_dev(dev); 800 801 return 0; 802} 803 804/* 805 * inotify_ignore - remove a given wd from this inotify instance. 806 * 807 * Can sleep. 808 */ 809static int inotify_ignore(struct inotify_device *dev, s32 wd) 810{ 811 struct inotify_watch *watch; 812 struct inode *inode; 813 814 down(&dev->sem); 815 watch = idr_find(&dev->idr, wd); 816 if (unlikely(!watch)) { 817 up(&dev->sem); 818 return -EINVAL; 819 } 820 get_inotify_watch(watch); 821 inode = watch->inode; 822 up(&dev->sem); 823 824 down(&inode->inotify_sem); 825 down(&dev->sem); 826 827 /* make sure that we did not race */ 828 watch = idr_find(&dev->idr, wd); 829 if (likely(watch)) 830 remove_watch(watch, dev); 831 832 up(&dev->sem); 833 up(&inode->inotify_sem); 834 put_inotify_watch(watch); 835 836 return 0; 837} 838 839static long inotify_ioctl(struct file *file, unsigned int cmd, 840 unsigned long arg) 841{ 842 struct inotify_device *dev; 843 void __user *p; 844 int ret = -ENOTTY; 845 846 dev = file->private_data; 847 p = (void __user *) arg; 848 849 switch (cmd) { 850 case FIONREAD: 851 ret = put_user(dev->queue_size, (int __user *) p); 852 break; 853 } 854 855 return ret; 856} 857 858static struct file_operations inotify_fops = { 859 .poll = inotify_poll, 860 .read = inotify_read, 861 .release = inotify_release, 862 .unlocked_ioctl = inotify_ioctl, 863 .compat_ioctl = inotify_ioctl, 864}; 865 866asmlinkage long sys_inotify_init(void) 867{ 868 struct inotify_device *dev; 869 struct user_struct *user; 870 struct file *filp; 871 int fd, ret; 872 873 fd = get_unused_fd(); 874 if (fd < 0) 875 return fd; 876 877 filp = get_empty_filp(); 878 if (!filp) { 879 ret = -ENFILE; 880 goto out_put_fd; 881 } 882 883 user = get_uid(current->user); 884 if (unlikely(atomic_read(&user->inotify_devs) >= 885 inotify_max_user_instances)) { 886 ret = -EMFILE; 887 goto out_free_uid; 888 } 889 890 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); 891 if (unlikely(!dev)) { 892 ret = -ENOMEM; 893 goto out_free_uid; 894 } 895 896 filp->f_op = &inotify_fops; 897 filp->f_vfsmnt = mntget(inotify_mnt); 898 filp->f_dentry = dget(inotify_mnt->mnt_root); 899 filp->f_mapping = filp->f_dentry->d_inode->i_mapping; 900 filp->f_mode = FMODE_READ; 901 filp->f_flags = O_RDONLY; 902 filp->private_data = dev; 903 904 idr_init(&dev->idr); 905 INIT_LIST_HEAD(&dev->events); 906 INIT_LIST_HEAD(&dev->watches); 907 init_waitqueue_head(&dev->wq); 908 sema_init(&dev->sem, 1); 909 dev->event_count = 0; 910 dev->queue_size = 0; 911 dev->max_events = inotify_max_queued_events; 912 dev->user = user; 913 dev->last_wd = 0; 914 atomic_set(&dev->count, 0); 915 916 get_inotify_dev(dev); 917 atomic_inc(&user->inotify_devs); 918 fd_install(fd, filp); 919 920 return fd; 921out_free_uid: 922 free_uid(user); 923 put_filp(filp); 924out_put_fd: 925 put_unused_fd(fd); 926 return ret; 927} 928 929asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) 930{ 931 struct inotify_watch *watch, *old; 932 struct inode *inode; 933 struct inotify_device *dev; 934 struct nameidata nd; 935 struct file *filp; 936 int ret, fput_needed; 937 int mask_add = 0; 938 unsigned flags = 0; 939 940 filp = fget_light(fd, &fput_needed); 941 if (unlikely(!filp)) 942 return -EBADF; 943 944 /* verify that this is indeed an inotify instance */ 945 if (unlikely(filp->f_op != &inotify_fops)) { 946 ret = -EINVAL; 947 goto fput_and_out; 948 } 949 950 if (!(mask & IN_DONT_FOLLOW)) 951 flags |= LOOKUP_FOLLOW; 952 if (mask & IN_ONLYDIR) 953 flags |= LOOKUP_DIRECTORY; 954 955 ret = find_inode(path, &nd, flags); 956 if (unlikely(ret)) 957 goto fput_and_out; 958 959 /* inode held in place by reference to nd; dev by fget on fd */ 960 inode = nd.dentry->d_inode; 961 dev = filp->private_data; 962 963 down(&inode->inotify_sem); 964 down(&dev->sem); 965 966 if (mask & IN_MASK_ADD) 967 mask_add = 1; 968 969 /* don't let user-space set invalid bits: we don't want flags set */ 970 mask &= IN_ALL_EVENTS | IN_ONESHOT; 971 if (unlikely(!mask)) { 972 ret = -EINVAL; 973 goto out; 974 } 975 976 /* 977 * Handle the case of re-adding a watch on an (inode,dev) pair that we 978 * are already watching. We just update the mask and return its wd. 979 */ 980 old = inode_find_dev(inode, dev); 981 if (unlikely(old)) { 982 if (mask_add) 983 old->mask |= mask; 984 else 985 old->mask = mask; 986 ret = old->wd; 987 goto out; 988 } 989 990 watch = create_watch(dev, mask, inode); 991 if (unlikely(IS_ERR(watch))) { 992 ret = PTR_ERR(watch); 993 goto out; 994 } 995 996 /* Add the watch to the device's and the inode's list */ 997 list_add(&watch->d_list, &dev->watches); 998 list_add(&watch->i_list, &inode->inotify_watches); 999 ret = watch->wd; 1000out: 1001 up(&dev->sem); 1002 up(&inode->inotify_sem); 1003 path_release(&nd); 1004fput_and_out: 1005 fput_light(filp, fput_needed); 1006 return ret; 1007} 1008 1009asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) 1010{ 1011 struct file *filp; 1012 struct inotify_device *dev; 1013 int ret, fput_needed; 1014 1015 filp = fget_light(fd, &fput_needed); 1016 if (unlikely(!filp)) 1017 return -EBADF; 1018 1019 /* verify that this is indeed an inotify instance */ 1020 if (unlikely(filp->f_op != &inotify_fops)) { 1021 ret = -EINVAL; 1022 goto out; 1023 } 1024 1025 dev = filp->private_data; 1026 ret = inotify_ignore(dev, wd); 1027 1028out: 1029 fput_light(filp, fput_needed); 1030 return ret; 1031} 1032 1033static struct super_block * 1034inotify_get_sb(struct file_system_type *fs_type, int flags, 1035 const char *dev_name, void *data) 1036{ 1037 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); 1038} 1039 1040static struct file_system_type inotify_fs_type = { 1041 .name = "inotifyfs", 1042 .get_sb = inotify_get_sb, 1043 .kill_sb = kill_anon_super, 1044}; 1045 1046/* 1047 * inotify_setup - Our initialization function. Note that we cannnot return 1048 * error because we have compiled-in VFS hooks. So an (unlikely) failure here 1049 * must result in panic(). 1050 */ 1051static int __init inotify_setup(void) 1052{ 1053 int ret; 1054 1055 ret = register_filesystem(&inotify_fs_type); 1056 if (unlikely(ret)) 1057 panic("inotify: register_filesystem returned %d!\n", ret); 1058 1059 inotify_mnt = kern_mount(&inotify_fs_type); 1060 if (IS_ERR(inotify_mnt)) 1061 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); 1062 1063 inotify_max_queued_events = 16384; 1064 inotify_max_user_instances = 128; 1065 inotify_max_user_watches = 8192; 1066 1067 atomic_set(&inotify_cookie, 0); 1068 atomic_set(&inotify_watches, 0); 1069 1070 watch_cachep = kmem_cache_create("inotify_watch_cache", 1071 sizeof(struct inotify_watch), 1072 0, SLAB_PANIC, NULL, NULL); 1073 event_cachep = kmem_cache_create("inotify_event_cache", 1074 sizeof(struct inotify_kernel_event), 1075 0, SLAB_PANIC, NULL, NULL); 1076 1077 return 0; 1078} 1079 1080module_init(inotify_setup);