Merge branch 'for-linus' of git://git.infradead.org/users/eparis/notify

* 'for-linus' of git://git.infradead.org/users/eparis/notify:
inotify: use GFP_NOFS under potential memory pressure
fsnotify: fix inotify tail drop check with path entries
inotify: check filename before dropping repeat events
fsnotify: use def_bool in kconfig instead of letting the user choose
inotify: fix error paths in inotify_update_watch
inotify: do not leak inode marks in inotify_add_watch
inotify: drop user watch count when a watch is removed

+91 -59
+1 -11
fs/notify/Kconfig
··· 1 config FSNOTIFY 2 - bool "Filesystem notification backend" 3 - default y 4 - ---help--- 5 - fsnotify is a backend for filesystem notification. fsnotify does 6 - not provide any userspace interface but does provide the basis 7 - needed for other notification schemes such as dnotify, inotify, 8 - and fanotify. 9 - 10 - Say Y here to enable fsnotify suport. 11 - 12 - If unsure, say Y. 13 14 source "fs/notify/dnotify/Kconfig" 15 source "fs/notify/inotify/Kconfig"
··· 1 config FSNOTIFY 2 + def_bool n 3 4 source "fs/notify/dnotify/Kconfig" 5 source "fs/notify/inotify/Kconfig"
+1 -1
fs/notify/dnotify/Kconfig
··· 1 config DNOTIFY 2 bool "Dnotify support" 3 - depends on FSNOTIFY 4 default y 5 help 6 Dnotify is a directory-based per-fd file change notification system
··· 1 config DNOTIFY 2 bool "Dnotify support" 3 + select FSNOTIFY 4 default y 5 help 6 Dnotify is a directory-based per-fd file change notification system
+3 -1
fs/notify/fsnotify.c
··· 159 if (!group->ops->should_send_event(group, to_tell, mask)) 160 continue; 161 if (!event) { 162 - event = fsnotify_create_event(to_tell, mask, data, data_is, file_name, cookie); 163 /* shit, we OOM'd and now we can't tell, maybe 164 * someday someone else will want to do something 165 * here */
··· 159 if (!group->ops->should_send_event(group, to_tell, mask)) 160 continue; 161 if (!event) { 162 + event = fsnotify_create_event(to_tell, mask, data, 163 + data_is, file_name, cookie, 164 + GFP_KERNEL); 165 /* shit, we OOM'd and now we can't tell, maybe 166 * someday someone else will want to do something 167 * here */
+1 -1
fs/notify/inotify/Kconfig
··· 15 16 config INOTIFY_USER 17 bool "Inotify support for userspace" 18 - depends on FSNOTIFY 19 default y 20 ---help--- 21 Say Y here to enable inotify support for userspace, including the
··· 15 16 config INOTIFY_USER 17 bool "Inotify support for userspace" 18 + select FSNOTIFY 19 default y 20 ---help--- 21 Say Y here to enable inotify support for userspace, including the
+71 -38
fs/notify/inotify/inotify_user.c
··· 57 58 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 59 struct kmem_cache *event_priv_cachep __read_mostly; 60 - static struct fsnotify_event *inotify_ignored_event; 61 62 /* 63 * When inotify registers a new group it increments this and uses that ··· 364 return error; 365 } 366 367 /* 368 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 369 * internal reference help on the mark because it is in the idr. ··· 383 struct fsnotify_group *group) 384 { 385 struct inotify_inode_mark_entry *ientry; 386 struct inotify_event_private_data *event_priv; 387 struct fsnotify_event_private_data *fsn_event_priv; 388 - struct idr *idr; 389 390 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 391 392 - event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 393 if (unlikely(!event_priv)) 394 goto skip_send_ignore; 395 ··· 404 fsn_event_priv->group = group; 405 event_priv->wd = ientry->wd; 406 407 - fsnotify_add_notify_event(group, inotify_ignored_event, fsn_event_priv); 408 409 /* did the private data get added? */ 410 if (list_empty(&fsn_event_priv->event_list)) ··· 412 413 skip_send_ignore: 414 415 /* remove this entry from the idr */ 416 - spin_lock(&group->inotify_data.idr_lock); 417 - idr = &group->inotify_data.idr; 418 - idr_remove(idr, ientry->wd); 419 - spin_unlock(&group->inotify_data.idr_lock); 420 421 /* removed from idr, drop that reference */ 422 fsnotify_put_mark(entry); 423 } 424 425 /* ding dong the mark is dead */ ··· 436 { 437 struct fsnotify_mark_entry *entry = NULL; 438 struct inotify_inode_mark_entry *ientry; 439 int ret = 0; 440 int add = (arg & IN_MASK_ADD); 441 __u32 mask; ··· 447 if (unlikely(!mask)) 448 return -EINVAL; 449 450 - ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 451 - if (unlikely(!ientry)) 452 return -ENOMEM; 453 /* we set the mask at the end after attaching it */ 454 - fsnotify_init_mark(&ientry->fsn_entry, inotify_free_mark); 455 - ientry->wd = 0; 456 457 find_entry: 458 spin_lock(&inode->i_lock); 459 entry = fsnotify_find_mark_entry(group, inode); 460 spin_unlock(&inode->i_lock); 461 if (entry) { 462 - kmem_cache_free(inotify_inode_mark_cachep, ientry); 463 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 464 } else { 465 - if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) { 466 - ret = -ENOSPC; 467 goto out_err; 468 - } 469 - 470 - ret = fsnotify_add_mark(&ientry->fsn_entry, group, inode); 471 - if (ret == -EEXIST) 472 - goto find_entry; 473 - else if (ret) 474 - goto out_err; 475 - 476 - entry = &ientry->fsn_entry; 477 retry: 478 ret = -ENOMEM; 479 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 480 goto out_err; 481 482 spin_lock(&group->inotify_data.idr_lock); 483 - /* if entry is added to the idr we keep the reference obtained 484 - * through fsnotify_mark_add. remember to drop this reference 485 - * when entry is removed from idr */ 486 - ret = idr_get_new_above(&group->inotify_data.idr, entry, 487 - ++group->inotify_data.last_wd, 488 - &ientry->wd); 489 spin_unlock(&group->inotify_data.idr_lock); 490 if (ret) { 491 if (ret == -EAGAIN) 492 goto retry; 493 goto out_err; 494 } 495 atomic_inc(&group->inotify_data.user->inotify_watches); 496 } 497 498 spin_lock(&entry->lock); 499 ··· 537 fsnotify_recalc_group_mask(group); 538 } 539 540 - return ientry->wd; 541 542 out_err: 543 - /* see this isn't supposed to happen, just kill the watch */ 544 - if (entry) { 545 - fsnotify_destroy_mark_by_entry(entry); 546 - fsnotify_put_mark(entry); 547 } 548 return ret; 549 } 550 ··· 757 758 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 759 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 760 - inotify_ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, NULL, 0); 761 - if (!inotify_ignored_event) 762 - panic("unable to allocate the inotify ignored event\n"); 763 764 inotify_max_queued_events = 16384; 765 inotify_max_user_instances = 128;
··· 57 58 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; 59 struct kmem_cache *event_priv_cachep __read_mostly; 60 61 /* 62 * When inotify registers a new group it increments this and uses that ··· 365 return error; 366 } 367 368 + static void inotify_remove_from_idr(struct fsnotify_group *group, 369 + struct inotify_inode_mark_entry *ientry) 370 + { 371 + struct idr *idr; 372 + 373 + spin_lock(&group->inotify_data.idr_lock); 374 + idr = &group->inotify_data.idr; 375 + idr_remove(idr, ientry->wd); 376 + spin_unlock(&group->inotify_data.idr_lock); 377 + ientry->wd = -1; 378 + } 379 /* 380 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the 381 * internal reference help on the mark because it is in the idr. ··· 373 struct fsnotify_group *group) 374 { 375 struct inotify_inode_mark_entry *ientry; 376 + struct fsnotify_event *ignored_event; 377 struct inotify_event_private_data *event_priv; 378 struct fsnotify_event_private_data *fsn_event_priv; 379 + 380 + ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, 381 + FSNOTIFY_EVENT_NONE, NULL, 0, 382 + GFP_NOFS); 383 + if (!ignored_event) 384 + return; 385 386 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 387 388 + event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); 389 if (unlikely(!event_priv)) 390 goto skip_send_ignore; 391 ··· 388 fsn_event_priv->group = group; 389 event_priv->wd = ientry->wd; 390 391 + fsnotify_add_notify_event(group, ignored_event, fsn_event_priv); 392 393 /* did the private data get added? */ 394 if (list_empty(&fsn_event_priv->event_list)) ··· 396 397 skip_send_ignore: 398 399 + /* matches the reference taken when the event was created */ 400 + fsnotify_put_event(ignored_event); 401 + 402 /* remove this entry from the idr */ 403 + inotify_remove_from_idr(group, ientry); 404 405 /* removed from idr, drop that reference */ 406 fsnotify_put_mark(entry); 407 + 408 + atomic_dec(&group->inotify_data.user->inotify_watches); 409 } 410 411 /* ding dong the mark is dead */ ··· 418 { 419 struct fsnotify_mark_entry *entry = NULL; 420 struct inotify_inode_mark_entry *ientry; 421 + struct inotify_inode_mark_entry *tmp_ientry; 422 int ret = 0; 423 int add = (arg & IN_MASK_ADD); 424 __u32 mask; ··· 428 if (unlikely(!mask)) 429 return -EINVAL; 430 431 + tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 432 + if (unlikely(!tmp_ientry)) 433 return -ENOMEM; 434 /* we set the mask at the end after attaching it */ 435 + fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); 436 + tmp_ientry->wd = -1; 437 438 find_entry: 439 spin_lock(&inode->i_lock); 440 entry = fsnotify_find_mark_entry(group, inode); 441 spin_unlock(&inode->i_lock); 442 if (entry) { 443 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 444 } else { 445 + ret = -ENOSPC; 446 + if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) 447 goto out_err; 448 retry: 449 ret = -ENOMEM; 450 if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL))) 451 goto out_err; 452 453 spin_lock(&group->inotify_data.idr_lock); 454 + ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, 455 + group->inotify_data.last_wd, 456 + &tmp_ientry->wd); 457 spin_unlock(&group->inotify_data.idr_lock); 458 if (ret) { 459 if (ret == -EAGAIN) 460 goto retry; 461 goto out_err; 462 } 463 + 464 + ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode); 465 + if (ret) { 466 + inotify_remove_from_idr(group, tmp_ientry); 467 + if (ret == -EEXIST) 468 + goto find_entry; 469 + goto out_err; 470 + } 471 + 472 + /* tmp_ientry has been added to the inode, so we are all set up. 473 + * now we just need to make sure tmp_ientry doesn't get freed and 474 + * we need to set up entry and ientry so the generic code can 475 + * do its thing. */ 476 + ientry = tmp_ientry; 477 + entry = &ientry->fsn_entry; 478 + tmp_ientry = NULL; 479 + 480 atomic_inc(&group->inotify_data.user->inotify_watches); 481 + 482 + /* update the idr hint */ 483 + group->inotify_data.last_wd = ientry->wd; 484 + 485 + /* we put the mark on the idr, take a reference */ 486 + fsnotify_get_mark(entry); 487 } 488 + 489 + ret = ientry->wd; 490 491 spin_lock(&entry->lock); 492 ··· 506 fsnotify_recalc_group_mask(group); 507 } 508 509 + /* this either matches fsnotify_find_mark_entry, or init_mark_entry 510 + * depending on which path we took... */ 511 + fsnotify_put_mark(entry); 512 513 out_err: 514 + /* could be an error, could be that we found an existing mark */ 515 + if (tmp_ientry) { 516 + /* on the idr but didn't make it on the inode */ 517 + if (tmp_ientry->wd != -1) 518 + inotify_remove_from_idr(group, tmp_ientry); 519 + kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry); 520 } 521 + 522 return ret; 523 } 524 ··· 721 722 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 723 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 724 725 inotify_max_queued_events = 16384; 726 inotify_max_user_instances = 128;
+13 -6
fs/notify/notification.c
··· 136 { 137 if ((old->mask == new->mask) && 138 (old->to_tell == new->to_tell) && 139 - (old->data_type == new->data_type)) { 140 switch (old->data_type) { 141 case (FSNOTIFY_EVENT_INODE): 142 - if (old->inode == new->inode) 143 return true; 144 break; 145 case (FSNOTIFY_EVENT_PATH): 146 if ((old->path.mnt == new->path.mnt) && 147 (old->path.dentry == new->path.dentry)) 148 return true; 149 case (FSNOTIFY_EVENT_NONE): 150 - return true; 151 }; 152 } 153 return false; ··· 345 * @name the filename, if available 346 */ 347 struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, 348 - int data_type, const char *name, u32 cookie) 349 { 350 struct fsnotify_event *event; 351 352 - event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); 353 if (!event) 354 return NULL; 355 356 initialize_event(event); 357 358 if (name) { 359 - event->file_name = kstrdup(name, GFP_KERNEL); 360 if (!event->file_name) { 361 kmem_cache_free(fsnotify_event_cachep, event); 362 return NULL;
··· 136 { 137 if ((old->mask == new->mask) && 138 (old->to_tell == new->to_tell) && 139 + (old->data_type == new->data_type) && 140 + (old->name_len == new->name_len)) { 141 switch (old->data_type) { 142 case (FSNOTIFY_EVENT_INODE): 143 + /* remember, after old was put on the wait_q we aren't 144 + * allowed to look at the inode any more, only thing 145 + * left to check was if the file_name is the same */ 146 + if (old->name_len && 147 + !strcmp(old->file_name, new->file_name)) 148 return true; 149 break; 150 case (FSNOTIFY_EVENT_PATH): 151 if ((old->path.mnt == new->path.mnt) && 152 (old->path.dentry == new->path.dentry)) 153 return true; 154 + break; 155 case (FSNOTIFY_EVENT_NONE): 156 + return false; 157 }; 158 } 159 return false; ··· 339 * @name the filename, if available 340 */ 341 struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, 342 + int data_type, const char *name, u32 cookie, 343 + gfp_t gfp) 344 { 345 struct fsnotify_event *event; 346 347 + event = kmem_cache_alloc(fsnotify_event_cachep, gfp); 348 if (!event) 349 return NULL; 350 351 initialize_event(event); 352 353 if (name) { 354 + event->file_name = kstrdup(name, gfp); 355 if (!event->file_name) { 356 kmem_cache_free(fsnotify_event_cachep, event); 357 return NULL;
+1 -1
include/linux/fsnotify_backend.h
··· 352 /* put here because inotify does some weird stuff when destroying watches */ 353 extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, 354 void *data, int data_is, const char *name, 355 - u32 cookie); 356 357 #else 358
··· 352 /* put here because inotify does some weird stuff when destroying watches */ 353 extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, 354 void *data, int data_is, const char *name, 355 + u32 cookie, gfp_t gfp); 356 357 #else 358