Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

inotify: rename mark_entry to just mark

rename anything in inotify that deals with mark_entry to just be mark. It
makes a lot more sense.

Signed-off-by: Eric Paris <eparis@redhat.com>

+123 -124
+3 -4
fs/notify/inotify/inotify.h
··· 9 9 int wd; 10 10 }; 11 11 12 - struct inotify_inode_mark_entry { 13 - /* fsnotify_mark MUST be the first thing */ 14 - struct fsnotify_mark fsn_entry; 12 + struct inotify_inode_mark { 13 + struct fsnotify_mark fsn_mark; 15 14 int wd; 16 15 }; 17 16 18 - extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry, 17 + extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, 19 18 struct fsnotify_group *group); 20 19 extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv); 21 20
+24 -24
fs/notify/inotify/inotify_fsnotify.c
··· 88 88 89 89 static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event) 90 90 { 91 - struct fsnotify_mark *entry; 92 - struct inotify_inode_mark_entry *ientry; 91 + struct fsnotify_mark *fsn_mark; 92 + struct inotify_inode_mark *i_mark; 93 93 struct inode *to_tell; 94 94 struct inotify_event_private_data *event_priv; 95 95 struct fsnotify_event_private_data *fsn_event_priv; ··· 98 98 to_tell = event->to_tell; 99 99 100 100 spin_lock(&to_tell->i_lock); 101 - entry = fsnotify_find_mark(group, to_tell); 101 + fsn_mark = fsnotify_find_mark(group, to_tell); 102 102 spin_unlock(&to_tell->i_lock); 103 103 /* race with watch removal? We already passes should_send */ 104 - if (unlikely(!entry)) 104 + if (unlikely(!fsn_mark)) 105 105 return 0; 106 - ientry = container_of(entry, struct inotify_inode_mark_entry, 107 - fsn_entry); 108 - wd = ientry->wd; 106 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, 107 + fsn_mark); 108 + wd = i_mark->wd; 109 109 110 110 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); 111 111 if (unlikely(!event_priv)) ··· 127 127 } 128 128 129 129 /* 130 - * If we hold the entry until after the event is on the queue 130 + * If we hold the fsn_mark until after the event is on the queue 131 131 * IN_IGNORED won't be able to pass this event in the queue 132 132 */ 133 - fsnotify_put_mark(entry); 133 + fsnotify_put_mark(fsn_mark); 134 134 135 135 return ret; 136 136 } 137 137 138 - static void inotify_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) 138 + static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) 139 139 { 140 - inotify_ignored_and_remove_idr(entry, group); 140 + inotify_ignored_and_remove_idr(fsn_mark, group); 141 141 } 142 142 143 143 static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, 144 144 struct vfsmount *mnt, __u32 mask, void *data, 145 145 int data_type) 146 146 { 147 - struct fsnotify_mark *entry; 147 + struct fsnotify_mark *fsn_mark; 148 148 bool send; 149 149 150 150 spin_lock(&inode->i_lock); 151 - entry = fsnotify_find_mark(group, inode); 151 + fsn_mark = fsnotify_find_mark(group, inode); 152 152 spin_unlock(&inode->i_lock); 153 - if (!entry) 153 + if (!fsn_mark) 154 154 return false; 155 155 156 156 mask = (mask & ~FS_EVENT_ON_CHILD); 157 - send = (entry->mask & mask); 157 + send = (fsn_mark->mask & mask); 158 158 159 159 /* find took a reference */ 160 - fsnotify_put_mark(entry); 160 + fsnotify_put_mark(fsn_mark); 161 161 162 162 return send; 163 163 } ··· 171 171 */ 172 172 static int idr_callback(int id, void *p, void *data) 173 173 { 174 - struct fsnotify_mark *entry; 175 - struct inotify_inode_mark_entry *ientry; 174 + struct fsnotify_mark *fsn_mark; 175 + struct inotify_inode_mark *i_mark; 176 176 static bool warned = false; 177 177 178 178 if (warned) 179 179 return 0; 180 180 181 181 warned = true; 182 - entry = p; 183 - ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 182 + fsn_mark = p; 183 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 184 184 185 - WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in " 185 + WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " 186 186 "idr. Probably leaking memory\n", id, p, data); 187 187 188 188 /* ··· 191 191 * out why we got here and the panic is no worse than the original 192 192 * BUG() that was here. 193 193 */ 194 - if (entry) 195 - printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n", 196 - entry->group, entry->i.inode, ientry->wd); 194 + if (fsn_mark) 195 + printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n", 196 + fsn_mark->group, fsn_mark->i.inode, i_mark->wd); 197 197 return 0; 198 198 } 199 199
+96 -96
fs/notify/inotify/inotify_user.c
··· 353 353 354 354 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, 355 355 int *last_wd, 356 - struct inotify_inode_mark_entry *ientry) 356 + struct inotify_inode_mark *i_mark) 357 357 { 358 358 int ret; 359 359 ··· 362 362 return -ENOMEM; 363 363 364 364 spin_lock(idr_lock); 365 - ret = idr_get_new_above(idr, ientry, *last_wd + 1, 366 - &ientry->wd); 365 + ret = idr_get_new_above(idr, i_mark, *last_wd + 1, 366 + &i_mark->wd); 367 367 /* we added the mark to the idr, take a reference */ 368 368 if (!ret) { 369 - fsnotify_get_mark(&ientry->fsn_entry); 370 - *last_wd = ientry->wd; 369 + *last_wd = i_mark->wd; 370 + fsnotify_get_mark(&i_mark->fsn_mark); 371 371 } 372 372 spin_unlock(idr_lock); 373 373 } while (ret == -EAGAIN); ··· 375 375 return ret; 376 376 } 377 377 378 - static struct inotify_inode_mark_entry *inotify_idr_find_locked(struct fsnotify_group *group, 378 + static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, 379 379 int wd) 380 380 { 381 381 struct idr *idr = &group->inotify_data.idr; 382 382 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 383 - struct inotify_inode_mark_entry *ientry; 383 + struct inotify_inode_mark *i_mark; 384 384 385 385 assert_spin_locked(idr_lock); 386 386 387 - ientry = idr_find(idr, wd); 388 - if (ientry) { 389 - struct fsnotify_mark *fsn_entry = &ientry->fsn_entry; 387 + i_mark = idr_find(idr, wd); 388 + if (i_mark) { 389 + struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; 390 390 391 - fsnotify_get_mark(fsn_entry); 391 + fsnotify_get_mark(fsn_mark); 392 392 /* One ref for being in the idr, one ref we just took */ 393 - BUG_ON(atomic_read(&fsn_entry->refcnt) < 2); 393 + BUG_ON(atomic_read(&fsn_mark->refcnt) < 2); 394 394 } 395 395 396 - return ientry; 396 + return i_mark; 397 397 } 398 398 399 - static struct inotify_inode_mark_entry *inotify_idr_find(struct fsnotify_group *group, 399 + static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, 400 400 int wd) 401 401 { 402 - struct inotify_inode_mark_entry *ientry; 402 + struct inotify_inode_mark *i_mark; 403 403 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 404 404 405 405 spin_lock(idr_lock); 406 - ientry = inotify_idr_find_locked(group, wd); 406 + i_mark = inotify_idr_find_locked(group, wd); 407 407 spin_unlock(idr_lock); 408 408 409 - return ientry; 409 + return i_mark; 410 410 } 411 411 412 412 static void do_inotify_remove_from_idr(struct fsnotify_group *group, 413 - struct inotify_inode_mark_entry *ientry) 413 + struct inotify_inode_mark *i_mark) 414 414 { 415 415 struct idr *idr = &group->inotify_data.idr; 416 416 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 417 - int wd = ientry->wd; 417 + int wd = i_mark->wd; 418 418 419 419 assert_spin_locked(idr_lock); 420 420 421 421 idr_remove(idr, wd); 422 422 423 423 /* removed from the idr, drop that ref */ 424 - fsnotify_put_mark(&ientry->fsn_entry); 424 + fsnotify_put_mark(&i_mark->fsn_mark); 425 425 } 426 426 427 427 /* ··· 429 429 * on the mark because it was in the idr. 430 430 */ 431 431 static void inotify_remove_from_idr(struct fsnotify_group *group, 432 - struct inotify_inode_mark_entry *ientry) 432 + struct inotify_inode_mark *i_mark) 433 433 { 434 434 spinlock_t *idr_lock = &group->inotify_data.idr_lock; 435 - struct inotify_inode_mark_entry *found_ientry = NULL; 435 + struct inotify_inode_mark *found_i_mark = NULL; 436 436 int wd; 437 437 438 438 spin_lock(idr_lock); 439 - wd = ientry->wd; 439 + wd = i_mark->wd; 440 440 441 441 /* 442 - * does this ientry think it is in the idr? we shouldn't get called 442 + * does this i_mark think it is in the idr? we shouldn't get called 443 443 * if it wasn't.... 444 444 */ 445 445 if (wd == -1) { 446 - WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p" 447 - " ientry->inode=%p\n", __func__, ientry, ientry->wd, 448 - ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 446 + WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" 447 + " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, 448 + i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode); 449 449 goto out; 450 450 } 451 451 452 452 /* Lets look in the idr to see if we find it */ 453 - found_ientry = inotify_idr_find_locked(group, wd); 454 - if (unlikely(!found_ientry)) { 455 - WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p" 456 - " ientry->inode=%p\n", __func__, ientry, ientry->wd, 457 - ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 453 + found_i_mark = inotify_idr_find_locked(group, wd); 454 + if (unlikely(!found_i_mark)) { 455 + WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" 456 + " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, 457 + i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode); 458 458 goto out; 459 459 } 460 460 461 461 /* 462 - * We found an entry in the idr at the right wd, but it's 463 - * not the entry we were told to remove. eparis seriously 462 + * We found an mark in the idr at the right wd, but it's 463 + * not the mark we were told to remove. eparis seriously 464 464 * fucked up somewhere. 465 465 */ 466 - if (unlikely(found_ientry != ientry)) { 467 - WARN_ONCE(1, "%s: ientry=%p ientry->wd=%d ientry->group=%p " 468 - "entry->inode=%p found_ientry=%p found_ientry->wd=%d " 469 - "found_ientry->group=%p found_ientry->inode=%p\n", 470 - __func__, ientry, ientry->wd, ientry->fsn_entry.group, 471 - ientry->fsn_entry.i.inode, found_ientry, found_ientry->wd, 472 - found_ientry->fsn_entry.group, 473 - found_ientry->fsn_entry.i.inode); 466 + if (unlikely(found_i_mark != i_mark)) { 467 + WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " 468 + "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d " 469 + "found_i_mark->group=%p found_i_mark->inode=%p\n", 470 + __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group, 471 + i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd, 472 + found_i_mark->fsn_mark.group, 473 + found_i_mark->fsn_mark.i.inode); 474 474 goto out; 475 475 } 476 476 ··· 479 479 * one ref held by the caller trying to kill us 480 480 * one ref grabbed by inotify_idr_find 481 481 */ 482 - if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) { 483 - printk(KERN_ERR "%s: ientry=%p ientry->wd=%d ientry->group=%p" 484 - " ientry->inode=%p\n", __func__, ientry, ientry->wd, 485 - ientry->fsn_entry.group, ientry->fsn_entry.i.inode); 482 + if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) { 483 + printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" 484 + " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd, 485 + i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode); 486 486 /* we can't really recover with bad ref cnting.. */ 487 487 BUG(); 488 488 } 489 489 490 - do_inotify_remove_from_idr(group, ientry); 490 + do_inotify_remove_from_idr(group, i_mark); 491 491 out: 492 492 /* match the ref taken by inotify_idr_find_locked() */ 493 - if (found_ientry) 494 - fsnotify_put_mark(&found_ientry->fsn_entry); 495 - ientry->wd = -1; 493 + if (found_i_mark) 494 + fsnotify_put_mark(&found_i_mark->fsn_mark); 495 + i_mark->wd = -1; 496 496 spin_unlock(idr_lock); 497 497 } 498 498 499 499 /* 500 500 * Send IN_IGNORED for this wd, remove this wd from the idr. 501 501 */ 502 - void inotify_ignored_and_remove_idr(struct fsnotify_mark *entry, 502 + void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, 503 503 struct fsnotify_group *group) 504 504 { 505 - struct inotify_inode_mark_entry *ientry; 505 + struct inotify_inode_mark *i_mark; 506 506 struct fsnotify_event *ignored_event; 507 507 struct inotify_event_private_data *event_priv; 508 508 struct fsnotify_event_private_data *fsn_event_priv; ··· 514 514 if (!ignored_event) 515 515 return; 516 516 517 - ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 517 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 518 518 519 519 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); 520 520 if (unlikely(!event_priv)) ··· 523 523 fsn_event_priv = &event_priv->fsnotify_event_priv_data; 524 524 525 525 fsn_event_priv->group = group; 526 - event_priv->wd = ientry->wd; 526 + event_priv->wd = i_mark->wd; 527 527 528 528 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL); 529 529 if (ret) ··· 534 534 /* matches the reference taken when the event was created */ 535 535 fsnotify_put_event(ignored_event); 536 536 537 - /* remove this entry from the idr */ 538 - inotify_remove_from_idr(group, ientry); 537 + /* remove this mark from the idr */ 538 + inotify_remove_from_idr(group, i_mark); 539 539 540 540 atomic_dec(&group->inotify_data.user->inotify_watches); 541 541 } 542 542 543 543 /* ding dong the mark is dead */ 544 - static void inotify_free_mark(struct fsnotify_mark *entry) 544 + static void inotify_free_mark(struct fsnotify_mark *fsn_mark) 545 545 { 546 - struct inotify_inode_mark_entry *ientry; 546 + struct inotify_inode_mark *i_mark; 547 547 548 - ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 548 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 549 549 550 - kmem_cache_free(inotify_inode_mark_cachep, ientry); 550 + kmem_cache_free(inotify_inode_mark_cachep, i_mark); 551 551 } 552 552 553 553 static int inotify_update_existing_watch(struct fsnotify_group *group, 554 554 struct inode *inode, 555 555 u32 arg) 556 556 { 557 - struct fsnotify_mark *entry; 558 - struct inotify_inode_mark_entry *ientry; 557 + struct fsnotify_mark *fsn_mark; 558 + struct inotify_inode_mark *i_mark; 559 559 __u32 old_mask, new_mask; 560 560 __u32 mask; 561 561 int add = (arg & IN_MASK_ADD); ··· 567 567 return -EINVAL; 568 568 569 569 spin_lock(&inode->i_lock); 570 - entry = fsnotify_find_mark(group, inode); 570 + fsn_mark = fsnotify_find_mark(group, inode); 571 571 spin_unlock(&inode->i_lock); 572 - if (!entry) 572 + if (!fsn_mark) 573 573 return -ENOENT; 574 574 575 - ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); 575 + i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 576 576 577 - spin_lock(&entry->lock); 577 + spin_lock(&fsn_mark->lock); 578 578 579 - old_mask = entry->mask; 579 + old_mask = fsn_mark->mask; 580 580 if (add) { 581 - entry->mask |= mask; 582 - new_mask = entry->mask; 581 + fsn_mark->mask |= mask; 582 + new_mask = fsn_mark->mask; 583 583 } else { 584 - entry->mask = mask; 585 - new_mask = entry->mask; 584 + fsn_mark->mask = mask; 585 + new_mask = fsn_mark->mask; 586 586 } 587 587 588 - spin_unlock(&entry->lock); 588 + spin_unlock(&fsn_mark->lock); 589 589 590 590 if (old_mask != new_mask) { 591 591 /* more bits in old than in new? */ 592 592 int dropped = (old_mask & ~new_mask); 593 - /* more bits in this entry than the inode's mask? */ 593 + /* more bits in this fsn_mark than the inode's mask? */ 594 594 int do_inode = (new_mask & ~inode->i_fsnotify_mask); 595 - /* more bits in this entry than the group? */ 595 + /* more bits in this fsn_mark than the group? */ 596 596 int do_group = (new_mask & ~group->mask); 597 597 598 - /* update the inode with this new entry */ 598 + /* update the inode with this new fsn_mark */ 599 599 if (dropped || do_inode) 600 600 fsnotify_recalc_inode_mask(inode); 601 601 ··· 605 605 } 606 606 607 607 /* return the wd */ 608 - ret = ientry->wd; 608 + ret = i_mark->wd; 609 609 610 610 /* match the get from fsnotify_find_mark() */ 611 - fsnotify_put_mark(entry); 611 + fsnotify_put_mark(fsn_mark); 612 612 613 613 return ret; 614 614 } ··· 617 617 struct inode *inode, 618 618 u32 arg) 619 619 { 620 - struct inotify_inode_mark_entry *tmp_ientry; 620 + struct inotify_inode_mark *tmp_i_mark; 621 621 __u32 mask; 622 622 int ret; 623 623 struct idr *idr = &group->inotify_data.idr; ··· 628 628 if (unlikely(!mask)) 629 629 return -EINVAL; 630 630 631 - tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 632 - if (unlikely(!tmp_ientry)) 631 + tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); 632 + if (unlikely(!tmp_i_mark)) 633 633 return -ENOMEM; 634 634 635 - fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark); 636 - tmp_ientry->fsn_entry.mask = mask; 637 - tmp_ientry->wd = -1; 635 + fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark); 636 + tmp_i_mark->fsn_mark.mask = mask; 637 + tmp_i_mark->wd = -1; 638 638 639 639 ret = -ENOSPC; 640 640 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) 641 641 goto out_err; 642 642 643 643 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd, 644 - tmp_ientry); 644 + tmp_i_mark); 645 645 if (ret) 646 646 goto out_err; 647 647 648 648 /* we are on the idr, now get on the inode */ 649 - ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode, 0); 649 + ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, 0); 650 650 if (ret) { 651 651 /* we failed to get on the inode, get off the idr */ 652 - inotify_remove_from_idr(group, tmp_ientry); 652 + inotify_remove_from_idr(group, tmp_i_mark); 653 653 goto out_err; 654 654 } 655 655 656 656 /* increment the number of watches the user has */ 657 657 atomic_inc(&group->inotify_data.user->inotify_watches); 658 658 659 - /* return the watch descriptor for this new entry */ 660 - ret = tmp_ientry->wd; 659 + /* return the watch descriptor for this new mark */ 660 + ret = tmp_i_mark->wd; 661 661 662 662 /* if this mark added a new event update the group mask */ 663 663 if (mask & ~group->mask) 664 664 fsnotify_recalc_group_mask(group); 665 665 666 666 out_err: 667 - /* match the ref from fsnotify_init_markentry() */ 668 - fsnotify_put_mark(&tmp_ientry->fsn_entry); 667 + /* match the ref from fsnotify_init_mark() */ 668 + fsnotify_put_mark(&tmp_i_mark->fsn_mark); 669 669 670 670 return ret; 671 671 } ··· 801 801 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) 802 802 { 803 803 struct fsnotify_group *group; 804 - struct inotify_inode_mark_entry *ientry; 804 + struct inotify_inode_mark *i_mark; 805 805 struct file *filp; 806 806 int ret = 0, fput_needed; 807 807 ··· 817 817 group = filp->private_data; 818 818 819 819 ret = -EINVAL; 820 - ientry = inotify_idr_find(group, wd); 821 - if (unlikely(!ientry)) 820 + i_mark = inotify_idr_find(group, wd); 821 + if (unlikely(!i_mark)) 822 822 goto out; 823 823 824 824 ret = 0; 825 825 826 - fsnotify_destroy_mark(&ientry->fsn_entry); 826 + fsnotify_destroy_mark(&i_mark->fsn_mark); 827 827 828 828 /* match ref taken by inotify_idr_find */ 829 - fsnotify_put_mark(&ientry->fsn_entry); 829 + fsnotify_put_mark(&i_mark->fsn_mark); 830 830 831 831 out: 832 832 fput_light(filp, fput_needed); ··· 840 840 */ 841 841 static int __init inotify_user_setup(void) 842 842 { 843 - inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC); 843 + inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC); 844 844 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC); 845 845 846 846 inotify_max_queued_events = 16384;