at v2.6.26-rc9 2238 lines 58 kB view raw
1/* 2 * fs/dcache.c 3 * 4 * Complete reimplementation 5 * (C) 1997 Thomas Schoebel-Theuer, 6 * with heavy changes by Linus Torvalds 7 */ 8 9/* 10 * Notes on the allocation strategy: 11 * 12 * The dcache is a master of the icache - whenever a dcache entry 13 * exists, the inode will always exist. "iput()" is done either when 14 * the dcache entry is deleted or garbage collected. 15 */ 16 17#include <linux/syscalls.h> 18#include <linux/string.h> 19#include <linux/mm.h> 20#include <linux/fdtable.h> 21#include <linux/fs.h> 22#include <linux/fsnotify.h> 23#include <linux/slab.h> 24#include <linux/init.h> 25#include <linux/hash.h> 26#include <linux/cache.h> 27#include <linux/module.h> 28#include <linux/mount.h> 29#include <linux/file.h> 30#include <asm/uaccess.h> 31#include <linux/security.h> 32#include <linux/seqlock.h> 33#include <linux/swap.h> 34#include <linux/bootmem.h> 35#include "internal.h" 36 37 38int sysctl_vfs_cache_pressure __read_mostly = 100; 39EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 40 41 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 42__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 43 44EXPORT_SYMBOL(dcache_lock); 45 46static struct kmem_cache *dentry_cache __read_mostly; 47 48#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 49 50/* 51 * This is the single most critical data structure when it comes 52 * to the dcache: the hashtable for lookups. Somebody should try 53 * to make this good - I've just made it work. 54 * 55 * This hash-function tries to avoid losing too many bits of hash 56 * information, yet avoid using a prime hash-size or similar. 57 */ 58#define D_HASHBITS d_hash_shift 59#define D_HASHMASK d_hash_mask 60 61static unsigned int d_hash_mask __read_mostly; 62static unsigned int d_hash_shift __read_mostly; 63static struct hlist_head *dentry_hashtable __read_mostly; 64static LIST_HEAD(dentry_unused); 65 66/* Statistics gathering. */ 67struct dentry_stat_t dentry_stat = { 68 .age_limit = 45, 69}; 70 71static void __d_free(struct dentry *dentry) 72{ 73 if (dname_external(dentry)) 74 kfree(dentry->d_name.name); 75 kmem_cache_free(dentry_cache, dentry); 76} 77 78static void d_callback(struct rcu_head *head) 79{ 80 struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); 81 __d_free(dentry); 82} 83 84/* 85 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry 86 * inside dcache_lock. 87 */ 88static void d_free(struct dentry *dentry) 89{ 90 if (dentry->d_op && dentry->d_op->d_release) 91 dentry->d_op->d_release(dentry); 92 /* if dentry was never inserted into hash, immediate free is OK */ 93 if (hlist_unhashed(&dentry->d_hash)) 94 __d_free(dentry); 95 else 96 call_rcu(&dentry->d_u.d_rcu, d_callback); 97} 98 99static void dentry_lru_remove(struct dentry *dentry) 100{ 101 if (!list_empty(&dentry->d_lru)) { 102 list_del_init(&dentry->d_lru); 103 dentry_stat.nr_unused--; 104 } 105} 106 107/* 108 * Release the dentry's inode, using the filesystem 109 * d_iput() operation if defined. 110 */ 111static void dentry_iput(struct dentry * dentry) 112 __releases(dentry->d_lock) 113 __releases(dcache_lock) 114{ 115 struct inode *inode = dentry->d_inode; 116 if (inode) { 117 dentry->d_inode = NULL; 118 list_del_init(&dentry->d_alias); 119 spin_unlock(&dentry->d_lock); 120 spin_unlock(&dcache_lock); 121 if (!inode->i_nlink) 122 fsnotify_inoderemove(inode); 123 if (dentry->d_op && dentry->d_op->d_iput) 124 dentry->d_op->d_iput(dentry, inode); 125 else 126 iput(inode); 127 } else { 128 spin_unlock(&dentry->d_lock); 129 spin_unlock(&dcache_lock); 130 } 131} 132 133/** 134 * d_kill - kill dentry and return parent 135 * @dentry: dentry to kill 136 * 137 * The dentry must already be unhashed and removed from the LRU. 138 * 139 * If this is the root of the dentry tree, return NULL. 140 */ 141static struct dentry *d_kill(struct dentry *dentry) 142 __releases(dentry->d_lock) 143 __releases(dcache_lock) 144{ 145 struct dentry *parent; 146 147 list_del(&dentry->d_u.d_child); 148 dentry_stat.nr_dentry--; /* For d_free, below */ 149 /*drops the locks, at that point nobody can reach this dentry */ 150 dentry_iput(dentry); 151 parent = dentry->d_parent; 152 d_free(dentry); 153 return dentry == parent ? NULL : parent; 154} 155 156/* 157 * This is dput 158 * 159 * This is complicated by the fact that we do not want to put 160 * dentries that are no longer on any hash chain on the unused 161 * list: we'd much rather just get rid of them immediately. 162 * 163 * However, that implies that we have to traverse the dentry 164 * tree upwards to the parents which might _also_ now be 165 * scheduled for deletion (it may have been only waiting for 166 * its last child to go away). 167 * 168 * This tail recursion is done by hand as we don't want to depend 169 * on the compiler to always get this right (gcc generally doesn't). 170 * Real recursion would eat up our stack space. 171 */ 172 173/* 174 * dput - release a dentry 175 * @dentry: dentry to release 176 * 177 * Release a dentry. This will drop the usage count and if appropriate 178 * call the dentry unlink method as well as removing it from the queues and 179 * releasing its resources. If the parent dentries were scheduled for release 180 * they too may now get deleted. 181 * 182 * no dcache lock, please. 183 */ 184 185void dput(struct dentry *dentry) 186{ 187 if (!dentry) 188 return; 189 190repeat: 191 if (atomic_read(&dentry->d_count) == 1) 192 might_sleep(); 193 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock)) 194 return; 195 196 spin_lock(&dentry->d_lock); 197 if (atomic_read(&dentry->d_count)) { 198 spin_unlock(&dentry->d_lock); 199 spin_unlock(&dcache_lock); 200 return; 201 } 202 203 /* 204 * AV: ->d_delete() is _NOT_ allowed to block now. 205 */ 206 if (dentry->d_op && dentry->d_op->d_delete) { 207 if (dentry->d_op->d_delete(dentry)) 208 goto unhash_it; 209 } 210 /* Unreachable? Get rid of it */ 211 if (d_unhashed(dentry)) 212 goto kill_it; 213 if (list_empty(&dentry->d_lru)) { 214 dentry->d_flags |= DCACHE_REFERENCED; 215 list_add(&dentry->d_lru, &dentry_unused); 216 dentry_stat.nr_unused++; 217 } 218 spin_unlock(&dentry->d_lock); 219 spin_unlock(&dcache_lock); 220 return; 221 222unhash_it: 223 __d_drop(dentry); 224kill_it: 225 dentry_lru_remove(dentry); 226 dentry = d_kill(dentry); 227 if (dentry) 228 goto repeat; 229} 230 231/** 232 * d_invalidate - invalidate a dentry 233 * @dentry: dentry to invalidate 234 * 235 * Try to invalidate the dentry if it turns out to be 236 * possible. If there are other dentries that can be 237 * reached through this one we can't delete it and we 238 * return -EBUSY. On success we return 0. 239 * 240 * no dcache lock. 241 */ 242 243int d_invalidate(struct dentry * dentry) 244{ 245 /* 246 * If it's already been dropped, return OK. 247 */ 248 spin_lock(&dcache_lock); 249 if (d_unhashed(dentry)) { 250 spin_unlock(&dcache_lock); 251 return 0; 252 } 253 /* 254 * Check whether to do a partial shrink_dcache 255 * to get rid of unused child entries. 256 */ 257 if (!list_empty(&dentry->d_subdirs)) { 258 spin_unlock(&dcache_lock); 259 shrink_dcache_parent(dentry); 260 spin_lock(&dcache_lock); 261 } 262 263 /* 264 * Somebody else still using it? 265 * 266 * If it's a directory, we can't drop it 267 * for fear of somebody re-populating it 268 * with children (even though dropping it 269 * would make it unreachable from the root, 270 * we might still populate it if it was a 271 * working directory or similar). 272 */ 273 spin_lock(&dentry->d_lock); 274 if (atomic_read(&dentry->d_count) > 1) { 275 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { 276 spin_unlock(&dentry->d_lock); 277 spin_unlock(&dcache_lock); 278 return -EBUSY; 279 } 280 } 281 282 __d_drop(dentry); 283 spin_unlock(&dentry->d_lock); 284 spin_unlock(&dcache_lock); 285 return 0; 286} 287 288/* This should be called _only_ with dcache_lock held */ 289 290static inline struct dentry * __dget_locked(struct dentry *dentry) 291{ 292 atomic_inc(&dentry->d_count); 293 dentry_lru_remove(dentry); 294 return dentry; 295} 296 297struct dentry * dget_locked(struct dentry *dentry) 298{ 299 return __dget_locked(dentry); 300} 301 302/** 303 * d_find_alias - grab a hashed alias of inode 304 * @inode: inode in question 305 * @want_discon: flag, used by d_splice_alias, to request 306 * that only a DISCONNECTED alias be returned. 307 * 308 * If inode has a hashed alias, or is a directory and has any alias, 309 * acquire the reference to alias and return it. Otherwise return NULL. 310 * Notice that if inode is a directory there can be only one alias and 311 * it can be unhashed only if it has no children, or if it is the root 312 * of a filesystem. 313 * 314 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer 315 * any other hashed alias over that one unless @want_discon is set, 316 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. 317 */ 318 319static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 320{ 321 struct list_head *head, *next, *tmp; 322 struct dentry *alias, *discon_alias=NULL; 323 324 head = &inode->i_dentry; 325 next = inode->i_dentry.next; 326 while (next != head) { 327 tmp = next; 328 next = tmp->next; 329 prefetch(next); 330 alias = list_entry(tmp, struct dentry, d_alias); 331 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 332 if (IS_ROOT(alias) && 333 (alias->d_flags & DCACHE_DISCONNECTED)) 334 discon_alias = alias; 335 else if (!want_discon) { 336 __dget_locked(alias); 337 return alias; 338 } 339 } 340 } 341 if (discon_alias) 342 __dget_locked(discon_alias); 343 return discon_alias; 344} 345 346struct dentry * d_find_alias(struct inode *inode) 347{ 348 struct dentry *de = NULL; 349 350 if (!list_empty(&inode->i_dentry)) { 351 spin_lock(&dcache_lock); 352 de = __d_find_alias(inode, 0); 353 spin_unlock(&dcache_lock); 354 } 355 return de; 356} 357 358/* 359 * Try to kill dentries associated with this inode. 360 * WARNING: you must own a reference to inode. 361 */ 362void d_prune_aliases(struct inode *inode) 363{ 364 struct dentry *dentry; 365restart: 366 spin_lock(&dcache_lock); 367 list_for_each_entry(dentry, &inode->i_dentry, d_alias) { 368 spin_lock(&dentry->d_lock); 369 if (!atomic_read(&dentry->d_count)) { 370 __dget_locked(dentry); 371 __d_drop(dentry); 372 spin_unlock(&dentry->d_lock); 373 spin_unlock(&dcache_lock); 374 dput(dentry); 375 goto restart; 376 } 377 spin_unlock(&dentry->d_lock); 378 } 379 spin_unlock(&dcache_lock); 380} 381 382/* 383 * Throw away a dentry - free the inode, dput the parent. This requires that 384 * the LRU list has already been removed. 385 * 386 * Try to prune ancestors as well. This is necessary to prevent 387 * quadratic behavior of shrink_dcache_parent(), but is also expected 388 * to be beneficial in reducing dentry cache fragmentation. 389 */ 390static void prune_one_dentry(struct dentry * dentry) 391 __releases(dentry->d_lock) 392 __releases(dcache_lock) 393 __acquires(dcache_lock) 394{ 395 __d_drop(dentry); 396 dentry = d_kill(dentry); 397 398 /* 399 * Prune ancestors. Locking is simpler than in dput(), 400 * because dcache_lock needs to be taken anyway. 401 */ 402 spin_lock(&dcache_lock); 403 while (dentry) { 404 if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) 405 return; 406 407 if (dentry->d_op && dentry->d_op->d_delete) 408 dentry->d_op->d_delete(dentry); 409 dentry_lru_remove(dentry); 410 __d_drop(dentry); 411 dentry = d_kill(dentry); 412 spin_lock(&dcache_lock); 413 } 414} 415 416/** 417 * prune_dcache - shrink the dcache 418 * @count: number of entries to try and free 419 * @sb: if given, ignore dentries for other superblocks 420 * which are being unmounted. 421 * 422 * Shrink the dcache. This is done when we need 423 * more memory, or simply when we need to unmount 424 * something (at which point we need to unuse 425 * all dentries). 426 * 427 * This function may fail to free any resources if 428 * all the dentries are in use. 429 */ 430 431static void prune_dcache(int count, struct super_block *sb) 432{ 433 spin_lock(&dcache_lock); 434 for (; count ; count--) { 435 struct dentry *dentry; 436 struct list_head *tmp; 437 struct rw_semaphore *s_umount; 438 439 cond_resched_lock(&dcache_lock); 440 441 tmp = dentry_unused.prev; 442 if (sb) { 443 /* Try to find a dentry for this sb, but don't try 444 * too hard, if they aren't near the tail they will 445 * be moved down again soon 446 */ 447 int skip = count; 448 while (skip && tmp != &dentry_unused && 449 list_entry(tmp, struct dentry, d_lru)->d_sb != sb) { 450 skip--; 451 tmp = tmp->prev; 452 } 453 } 454 if (tmp == &dentry_unused) 455 break; 456 list_del_init(tmp); 457 prefetch(dentry_unused.prev); 458 dentry_stat.nr_unused--; 459 dentry = list_entry(tmp, struct dentry, d_lru); 460 461 spin_lock(&dentry->d_lock); 462 /* 463 * We found an inuse dentry which was not removed from 464 * dentry_unused because of laziness during lookup. Do not free 465 * it - just keep it off the dentry_unused list. 466 */ 467 if (atomic_read(&dentry->d_count)) { 468 spin_unlock(&dentry->d_lock); 469 continue; 470 } 471 /* If the dentry was recently referenced, don't free it. */ 472 if (dentry->d_flags & DCACHE_REFERENCED) { 473 dentry->d_flags &= ~DCACHE_REFERENCED; 474 list_add(&dentry->d_lru, &dentry_unused); 475 dentry_stat.nr_unused++; 476 spin_unlock(&dentry->d_lock); 477 continue; 478 } 479 /* 480 * If the dentry is not DCACHED_REFERENCED, it is time 481 * to remove it from the dcache, provided the super block is 482 * NULL (which means we are trying to reclaim memory) 483 * or this dentry belongs to the same super block that 484 * we want to shrink. 485 */ 486 /* 487 * If this dentry is for "my" filesystem, then I can prune it 488 * without taking the s_umount lock (I already hold it). 489 */ 490 if (sb && dentry->d_sb == sb) { 491 prune_one_dentry(dentry); 492 continue; 493 } 494 /* 495 * ...otherwise we need to be sure this filesystem isn't being 496 * unmounted, otherwise we could race with 497 * generic_shutdown_super(), and end up holding a reference to 498 * an inode while the filesystem is unmounted. 499 * So we try to get s_umount, and make sure s_root isn't NULL. 500 * (Take a local copy of s_umount to avoid a use-after-free of 501 * `dentry'). 502 */ 503 s_umount = &dentry->d_sb->s_umount; 504 if (down_read_trylock(s_umount)) { 505 if (dentry->d_sb->s_root != NULL) { 506 prune_one_dentry(dentry); 507 up_read(s_umount); 508 continue; 509 } 510 up_read(s_umount); 511 } 512 spin_unlock(&dentry->d_lock); 513 /* 514 * Insert dentry at the head of the list as inserting at the 515 * tail leads to a cycle. 516 */ 517 list_add(&dentry->d_lru, &dentry_unused); 518 dentry_stat.nr_unused++; 519 } 520 spin_unlock(&dcache_lock); 521} 522 523/* 524 * Shrink the dcache for the specified super block. 525 * This allows us to unmount a device without disturbing 526 * the dcache for the other devices. 527 * 528 * This implementation makes just two traversals of the 529 * unused list. On the first pass we move the selected 530 * dentries to the most recent end, and on the second 531 * pass we free them. The second pass must restart after 532 * each dput(), but since the target dentries are all at 533 * the end, it's really just a single traversal. 534 */ 535 536/** 537 * shrink_dcache_sb - shrink dcache for a superblock 538 * @sb: superblock 539 * 540 * Shrink the dcache for the specified super block. This 541 * is used to free the dcache before unmounting a file 542 * system 543 */ 544 545void shrink_dcache_sb(struct super_block * sb) 546{ 547 struct list_head *tmp, *next; 548 struct dentry *dentry; 549 550 /* 551 * Pass one ... move the dentries for the specified 552 * superblock to the most recent end of the unused list. 553 */ 554 spin_lock(&dcache_lock); 555 list_for_each_prev_safe(tmp, next, &dentry_unused) { 556 dentry = list_entry(tmp, struct dentry, d_lru); 557 if (dentry->d_sb != sb) 558 continue; 559 list_move_tail(tmp, &dentry_unused); 560 } 561 562 /* 563 * Pass two ... free the dentries for this superblock. 564 */ 565repeat: 566 list_for_each_prev_safe(tmp, next, &dentry_unused) { 567 dentry = list_entry(tmp, struct dentry, d_lru); 568 if (dentry->d_sb != sb) 569 continue; 570 dentry_stat.nr_unused--; 571 list_del_init(tmp); 572 spin_lock(&dentry->d_lock); 573 if (atomic_read(&dentry->d_count)) { 574 spin_unlock(&dentry->d_lock); 575 continue; 576 } 577 prune_one_dentry(dentry); 578 cond_resched_lock(&dcache_lock); 579 goto repeat; 580 } 581 spin_unlock(&dcache_lock); 582} 583 584/* 585 * destroy a single subtree of dentries for unmount 586 * - see the comments on shrink_dcache_for_umount() for a description of the 587 * locking 588 */ 589static void shrink_dcache_for_umount_subtree(struct dentry *dentry) 590{ 591 struct dentry *parent; 592 unsigned detached = 0; 593 594 BUG_ON(!IS_ROOT(dentry)); 595 596 /* detach this root from the system */ 597 spin_lock(&dcache_lock); 598 dentry_lru_remove(dentry); 599 __d_drop(dentry); 600 spin_unlock(&dcache_lock); 601 602 for (;;) { 603 /* descend to the first leaf in the current subtree */ 604 while (!list_empty(&dentry->d_subdirs)) { 605 struct dentry *loop; 606 607 /* this is a branch with children - detach all of them 608 * from the system in one go */ 609 spin_lock(&dcache_lock); 610 list_for_each_entry(loop, &dentry->d_subdirs, 611 d_u.d_child) { 612 dentry_lru_remove(loop); 613 __d_drop(loop); 614 cond_resched_lock(&dcache_lock); 615 } 616 spin_unlock(&dcache_lock); 617 618 /* move to the first child */ 619 dentry = list_entry(dentry->d_subdirs.next, 620 struct dentry, d_u.d_child); 621 } 622 623 /* consume the dentries from this leaf up through its parents 624 * until we find one with children or run out altogether */ 625 do { 626 struct inode *inode; 627 628 if (atomic_read(&dentry->d_count) != 0) { 629 printk(KERN_ERR 630 "BUG: Dentry %p{i=%lx,n=%s}" 631 " still in use (%d)" 632 " [unmount of %s %s]\n", 633 dentry, 634 dentry->d_inode ? 635 dentry->d_inode->i_ino : 0UL, 636 dentry->d_name.name, 637 atomic_read(&dentry->d_count), 638 dentry->d_sb->s_type->name, 639 dentry->d_sb->s_id); 640 BUG(); 641 } 642 643 parent = dentry->d_parent; 644 if (parent == dentry) 645 parent = NULL; 646 else 647 atomic_dec(&parent->d_count); 648 649 list_del(&dentry->d_u.d_child); 650 detached++; 651 652 inode = dentry->d_inode; 653 if (inode) { 654 dentry->d_inode = NULL; 655 list_del_init(&dentry->d_alias); 656 if (dentry->d_op && dentry->d_op->d_iput) 657 dentry->d_op->d_iput(dentry, inode); 658 else 659 iput(inode); 660 } 661 662 d_free(dentry); 663 664 /* finished when we fall off the top of the tree, 665 * otherwise we ascend to the parent and move to the 666 * next sibling if there is one */ 667 if (!parent) 668 goto out; 669 670 dentry = parent; 671 672 } while (list_empty(&dentry->d_subdirs)); 673 674 dentry = list_entry(dentry->d_subdirs.next, 675 struct dentry, d_u.d_child); 676 } 677out: 678 /* several dentries were freed, need to correct nr_dentry */ 679 spin_lock(&dcache_lock); 680 dentry_stat.nr_dentry -= detached; 681 spin_unlock(&dcache_lock); 682} 683 684/* 685 * destroy the dentries attached to a superblock on unmounting 686 * - we don't need to use dentry->d_lock, and only need dcache_lock when 687 * removing the dentry from the system lists and hashes because: 688 * - the superblock is detached from all mountings and open files, so the 689 * dentry trees will not be rearranged by the VFS 690 * - s_umount is write-locked, so the memory pressure shrinker will ignore 691 * any dentries belonging to this superblock that it comes across 692 * - the filesystem itself is no longer permitted to rearrange the dentries 693 * in this superblock 694 */ 695void shrink_dcache_for_umount(struct super_block *sb) 696{ 697 struct dentry *dentry; 698 699 if (down_read_trylock(&sb->s_umount)) 700 BUG(); 701 702 dentry = sb->s_root; 703 sb->s_root = NULL; 704 atomic_dec(&dentry->d_count); 705 shrink_dcache_for_umount_subtree(dentry); 706 707 while (!hlist_empty(&sb->s_anon)) { 708 dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash); 709 shrink_dcache_for_umount_subtree(dentry); 710 } 711} 712 713/* 714 * Search for at least 1 mount point in the dentry's subdirs. 715 * We descend to the next level whenever the d_subdirs 716 * list is non-empty and continue searching. 717 */ 718 719/** 720 * have_submounts - check for mounts over a dentry 721 * @parent: dentry to check. 722 * 723 * Return true if the parent or its subdirectories contain 724 * a mount point 725 */ 726 727int have_submounts(struct dentry *parent) 728{ 729 struct dentry *this_parent = parent; 730 struct list_head *next; 731 732 spin_lock(&dcache_lock); 733 if (d_mountpoint(parent)) 734 goto positive; 735repeat: 736 next = this_parent->d_subdirs.next; 737resume: 738 while (next != &this_parent->d_subdirs) { 739 struct list_head *tmp = next; 740 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 741 next = tmp->next; 742 /* Have we found a mount point ? */ 743 if (d_mountpoint(dentry)) 744 goto positive; 745 if (!list_empty(&dentry->d_subdirs)) { 746 this_parent = dentry; 747 goto repeat; 748 } 749 } 750 /* 751 * All done at this level ... ascend and resume the search. 752 */ 753 if (this_parent != parent) { 754 next = this_parent->d_u.d_child.next; 755 this_parent = this_parent->d_parent; 756 goto resume; 757 } 758 spin_unlock(&dcache_lock); 759 return 0; /* No mount points found in tree */ 760positive: 761 spin_unlock(&dcache_lock); 762 return 1; 763} 764 765/* 766 * Search the dentry child list for the specified parent, 767 * and move any unused dentries to the end of the unused 768 * list for prune_dcache(). We descend to the next level 769 * whenever the d_subdirs list is non-empty and continue 770 * searching. 771 * 772 * It returns zero iff there are no unused children, 773 * otherwise it returns the number of children moved to 774 * the end of the unused list. This may not be the total 775 * number of unused children, because select_parent can 776 * drop the lock and return early due to latency 777 * constraints. 778 */ 779static int select_parent(struct dentry * parent) 780{ 781 struct dentry *this_parent = parent; 782 struct list_head *next; 783 int found = 0; 784 785 spin_lock(&dcache_lock); 786repeat: 787 next = this_parent->d_subdirs.next; 788resume: 789 while (next != &this_parent->d_subdirs) { 790 struct list_head *tmp = next; 791 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 792 next = tmp->next; 793 794 dentry_lru_remove(dentry); 795 /* 796 * move only zero ref count dentries to the end 797 * of the unused list for prune_dcache 798 */ 799 if (!atomic_read(&dentry->d_count)) { 800 list_add_tail(&dentry->d_lru, &dentry_unused); 801 dentry_stat.nr_unused++; 802 found++; 803 } 804 805 /* 806 * We can return to the caller if we have found some (this 807 * ensures forward progress). We'll be coming back to find 808 * the rest. 809 */ 810 if (found && need_resched()) 811 goto out; 812 813 /* 814 * Descend a level if the d_subdirs list is non-empty. 815 */ 816 if (!list_empty(&dentry->d_subdirs)) { 817 this_parent = dentry; 818 goto repeat; 819 } 820 } 821 /* 822 * All done at this level ... ascend and resume the search. 823 */ 824 if (this_parent != parent) { 825 next = this_parent->d_u.d_child.next; 826 this_parent = this_parent->d_parent; 827 goto resume; 828 } 829out: 830 spin_unlock(&dcache_lock); 831 return found; 832} 833 834/** 835 * shrink_dcache_parent - prune dcache 836 * @parent: parent of entries to prune 837 * 838 * Prune the dcache to remove unused children of the parent dentry. 839 */ 840 841void shrink_dcache_parent(struct dentry * parent) 842{ 843 int found; 844 845 while ((found = select_parent(parent)) != 0) 846 prune_dcache(found, parent->d_sb); 847} 848 849/* 850 * Scan `nr' dentries and return the number which remain. 851 * 852 * We need to avoid reentering the filesystem if the caller is performing a 853 * GFP_NOFS allocation attempt. One example deadlock is: 854 * 855 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> 856 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> 857 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. 858 * 859 * In this case we return -1 to tell the caller that we baled. 860 */ 861static int shrink_dcache_memory(int nr, gfp_t gfp_mask) 862{ 863 if (nr) { 864 if (!(gfp_mask & __GFP_FS)) 865 return -1; 866 prune_dcache(nr, NULL); 867 } 868 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 869} 870 871static struct shrinker dcache_shrinker = { 872 .shrink = shrink_dcache_memory, 873 .seeks = DEFAULT_SEEKS, 874}; 875 876/** 877 * d_alloc - allocate a dcache entry 878 * @parent: parent of entry to allocate 879 * @name: qstr of the name 880 * 881 * Allocates a dentry. It returns %NULL if there is insufficient memory 882 * available. On a success the dentry is returned. The name passed in is 883 * copied and the copy passed in may be reused after this call. 884 */ 885 886struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) 887{ 888 struct dentry *dentry; 889 char *dname; 890 891 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL); 892 if (!dentry) 893 return NULL; 894 895 if (name->len > DNAME_INLINE_LEN-1) { 896 dname = kmalloc(name->len + 1, GFP_KERNEL); 897 if (!dname) { 898 kmem_cache_free(dentry_cache, dentry); 899 return NULL; 900 } 901 } else { 902 dname = dentry->d_iname; 903 } 904 dentry->d_name.name = dname; 905 906 dentry->d_name.len = name->len; 907 dentry->d_name.hash = name->hash; 908 memcpy(dname, name->name, name->len); 909 dname[name->len] = 0; 910 911 atomic_set(&dentry->d_count, 1); 912 dentry->d_flags = DCACHE_UNHASHED; 913 spin_lock_init(&dentry->d_lock); 914 dentry->d_inode = NULL; 915 dentry->d_parent = NULL; 916 dentry->d_sb = NULL; 917 dentry->d_op = NULL; 918 dentry->d_fsdata = NULL; 919 dentry->d_mounted = 0; 920#ifdef CONFIG_PROFILING 921 dentry->d_cookie = NULL; 922#endif 923 INIT_HLIST_NODE(&dentry->d_hash); 924 INIT_LIST_HEAD(&dentry->d_lru); 925 INIT_LIST_HEAD(&dentry->d_subdirs); 926 INIT_LIST_HEAD(&dentry->d_alias); 927 928 if (parent) { 929 dentry->d_parent = dget(parent); 930 dentry->d_sb = parent->d_sb; 931 } else { 932 INIT_LIST_HEAD(&dentry->d_u.d_child); 933 } 934 935 spin_lock(&dcache_lock); 936 if (parent) 937 list_add(&dentry->d_u.d_child, &parent->d_subdirs); 938 dentry_stat.nr_dentry++; 939 spin_unlock(&dcache_lock); 940 941 return dentry; 942} 943 944struct dentry *d_alloc_name(struct dentry *parent, const char *name) 945{ 946 struct qstr q; 947 948 q.name = name; 949 q.len = strlen(name); 950 q.hash = full_name_hash(q.name, q.len); 951 return d_alloc(parent, &q); 952} 953 954/** 955 * d_instantiate - fill in inode information for a dentry 956 * @entry: dentry to complete 957 * @inode: inode to attach to this dentry 958 * 959 * Fill in inode information in the entry. 960 * 961 * This turns negative dentries into productive full members 962 * of society. 963 * 964 * NOTE! This assumes that the inode count has been incremented 965 * (or otherwise set) by the caller to indicate that it is now 966 * in use by the dcache. 967 */ 968 969void d_instantiate(struct dentry *entry, struct inode * inode) 970{ 971 BUG_ON(!list_empty(&entry->d_alias)); 972 spin_lock(&dcache_lock); 973 if (inode) 974 list_add(&entry->d_alias, &inode->i_dentry); 975 entry->d_inode = inode; 976 fsnotify_d_instantiate(entry, inode); 977 spin_unlock(&dcache_lock); 978 security_d_instantiate(entry, inode); 979} 980 981/** 982 * d_instantiate_unique - instantiate a non-aliased dentry 983 * @entry: dentry to instantiate 984 * @inode: inode to attach to this dentry 985 * 986 * Fill in inode information in the entry. On success, it returns NULL. 987 * If an unhashed alias of "entry" already exists, then we return the 988 * aliased dentry instead and drop one reference to inode. 989 * 990 * Note that in order to avoid conflicts with rename() etc, the caller 991 * had better be holding the parent directory semaphore. 992 * 993 * This also assumes that the inode count has been incremented 994 * (or otherwise set) by the caller to indicate that it is now 995 * in use by the dcache. 996 */ 997static struct dentry *__d_instantiate_unique(struct dentry *entry, 998 struct inode *inode) 999{ 1000 struct dentry *alias; 1001 int len = entry->d_name.len; 1002 const char *name = entry->d_name.name; 1003 unsigned int hash = entry->d_name.hash; 1004 1005 if (!inode) { 1006 entry->d_inode = NULL; 1007 return NULL; 1008 } 1009 1010 list_for_each_entry(alias, &inode->i_dentry, d_alias) { 1011 struct qstr *qstr = &alias->d_name; 1012 1013 if (qstr->hash != hash) 1014 continue; 1015 if (alias->d_parent != entry->d_parent) 1016 continue; 1017 if (qstr->len != len) 1018 continue; 1019 if (memcmp(qstr->name, name, len)) 1020 continue; 1021 dget_locked(alias); 1022 return alias; 1023 } 1024 1025 list_add(&entry->d_alias, &inode->i_dentry); 1026 entry->d_inode = inode; 1027 fsnotify_d_instantiate(entry, inode); 1028 return NULL; 1029} 1030 1031struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) 1032{ 1033 struct dentry *result; 1034 1035 BUG_ON(!list_empty(&entry->d_alias)); 1036 1037 spin_lock(&dcache_lock); 1038 result = __d_instantiate_unique(entry, inode); 1039 spin_unlock(&dcache_lock); 1040 1041 if (!result) { 1042 security_d_instantiate(entry, inode); 1043 return NULL; 1044 } 1045 1046 BUG_ON(!d_unhashed(result)); 1047 iput(inode); 1048 return result; 1049} 1050 1051EXPORT_SYMBOL(d_instantiate_unique); 1052 1053/** 1054 * d_alloc_root - allocate root dentry 1055 * @root_inode: inode to allocate the root for 1056 * 1057 * Allocate a root ("/") dentry for the inode given. The inode is 1058 * instantiated and returned. %NULL is returned if there is insufficient 1059 * memory or the inode passed is %NULL. 1060 */ 1061 1062struct dentry * d_alloc_root(struct inode * root_inode) 1063{ 1064 struct dentry *res = NULL; 1065 1066 if (root_inode) { 1067 static const struct qstr name = { .name = "/", .len = 1 }; 1068 1069 res = d_alloc(NULL, &name); 1070 if (res) { 1071 res->d_sb = root_inode->i_sb; 1072 res->d_parent = res; 1073 d_instantiate(res, root_inode); 1074 } 1075 } 1076 return res; 1077} 1078 1079static inline struct hlist_head *d_hash(struct dentry *parent, 1080 unsigned long hash) 1081{ 1082 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 1083 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS); 1084 return dentry_hashtable + (hash & D_HASHMASK); 1085} 1086 1087/** 1088 * d_alloc_anon - allocate an anonymous dentry 1089 * @inode: inode to allocate the dentry for 1090 * 1091 * This is similar to d_alloc_root. It is used by filesystems when 1092 * creating a dentry for a given inode, often in the process of 1093 * mapping a filehandle to a dentry. The returned dentry may be 1094 * anonymous, or may have a full name (if the inode was already 1095 * in the cache). The file system may need to make further 1096 * efforts to connect this dentry into the dcache properly. 1097 * 1098 * When called on a directory inode, we must ensure that 1099 * the inode only ever has one dentry. If a dentry is 1100 * found, that is returned instead of allocating a new one. 1101 * 1102 * On successful return, the reference to the inode has been transferred 1103 * to the dentry. If %NULL is returned (indicating kmalloc failure), 1104 * the reference on the inode has not been released. 1105 */ 1106 1107struct dentry * d_alloc_anon(struct inode *inode) 1108{ 1109 static const struct qstr anonstring = { .name = "" }; 1110 struct dentry *tmp; 1111 struct dentry *res; 1112 1113 if ((res = d_find_alias(inode))) { 1114 iput(inode); 1115 return res; 1116 } 1117 1118 tmp = d_alloc(NULL, &anonstring); 1119 if (!tmp) 1120 return NULL; 1121 1122 tmp->d_parent = tmp; /* make sure dput doesn't croak */ 1123 1124 spin_lock(&dcache_lock); 1125 res = __d_find_alias(inode, 0); 1126 if (!res) { 1127 /* attach a disconnected dentry */ 1128 res = tmp; 1129 tmp = NULL; 1130 spin_lock(&res->d_lock); 1131 res->d_sb = inode->i_sb; 1132 res->d_parent = res; 1133 res->d_inode = inode; 1134 res->d_flags |= DCACHE_DISCONNECTED; 1135 res->d_flags &= ~DCACHE_UNHASHED; 1136 list_add(&res->d_alias, &inode->i_dentry); 1137 hlist_add_head(&res->d_hash, &inode->i_sb->s_anon); 1138 spin_unlock(&res->d_lock); 1139 1140 inode = NULL; /* don't drop reference */ 1141 } 1142 spin_unlock(&dcache_lock); 1143 1144 if (inode) 1145 iput(inode); 1146 if (tmp) 1147 dput(tmp); 1148 return res; 1149} 1150 1151 1152/** 1153 * d_splice_alias - splice a disconnected dentry into the tree if one exists 1154 * @inode: the inode which may have a disconnected dentry 1155 * @dentry: a negative dentry which we want to point to the inode. 1156 * 1157 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and 1158 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry 1159 * and return it, else simply d_add the inode to the dentry and return NULL. 1160 * 1161 * This is needed in the lookup routine of any filesystem that is exportable 1162 * (via knfsd) so that we can build dcache paths to directories effectively. 1163 * 1164 * If a dentry was found and moved, then it is returned. Otherwise NULL 1165 * is returned. This matches the expected return value of ->lookup. 1166 * 1167 */ 1168struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) 1169{ 1170 struct dentry *new = NULL; 1171 1172 if (inode && S_ISDIR(inode->i_mode)) { 1173 spin_lock(&dcache_lock); 1174 new = __d_find_alias(inode, 1); 1175 if (new) { 1176 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); 1177 fsnotify_d_instantiate(new, inode); 1178 spin_unlock(&dcache_lock); 1179 security_d_instantiate(new, inode); 1180 d_rehash(dentry); 1181 d_move(new, dentry); 1182 iput(inode); 1183 } else { 1184 /* d_instantiate takes dcache_lock, so we do it by hand */ 1185 list_add(&dentry->d_alias, &inode->i_dentry); 1186 dentry->d_inode = inode; 1187 fsnotify_d_instantiate(dentry, inode); 1188 spin_unlock(&dcache_lock); 1189 security_d_instantiate(dentry, inode); 1190 d_rehash(dentry); 1191 } 1192 } else 1193 d_add(dentry, inode); 1194 return new; 1195} 1196 1197 1198/** 1199 * d_lookup - search for a dentry 1200 * @parent: parent dentry 1201 * @name: qstr of name we wish to find 1202 * 1203 * Searches the children of the parent dentry for the name in question. If 1204 * the dentry is found its reference count is incremented and the dentry 1205 * is returned. The caller must use d_put to free the entry when it has 1206 * finished using it. %NULL is returned on failure. 1207 * 1208 * __d_lookup is dcache_lock free. The hash list is protected using RCU. 1209 * Memory barriers are used while updating and doing lockless traversal. 1210 * To avoid races with d_move while rename is happening, d_lock is used. 1211 * 1212 * Overflows in memcmp(), while d_move, are avoided by keeping the length 1213 * and name pointer in one structure pointed by d_qstr. 1214 * 1215 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while 1216 * lookup is going on. 1217 * 1218 * dentry_unused list is not updated even if lookup finds the required dentry 1219 * in there. It is updated in places such as prune_dcache, shrink_dcache_sb, 1220 * select_parent and __dget_locked. This laziness saves lookup from dcache_lock 1221 * acquisition. 1222 * 1223 * d_lookup() is protected against the concurrent renames in some unrelated 1224 * directory using the seqlockt_t rename_lock. 1225 */ 1226 1227struct dentry * d_lookup(struct dentry * parent, struct qstr * name) 1228{ 1229 struct dentry * dentry = NULL; 1230 unsigned long seq; 1231 1232 do { 1233 seq = read_seqbegin(&rename_lock); 1234 dentry = __d_lookup(parent, name); 1235 if (dentry) 1236 break; 1237 } while (read_seqretry(&rename_lock, seq)); 1238 return dentry; 1239} 1240 1241struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) 1242{ 1243 unsigned int len = name->len; 1244 unsigned int hash = name->hash; 1245 const unsigned char *str = name->name; 1246 struct hlist_head *head = d_hash(parent,hash); 1247 struct dentry *found = NULL; 1248 struct hlist_node *node; 1249 struct dentry *dentry; 1250 1251 rcu_read_lock(); 1252 1253 hlist_for_each_entry_rcu(dentry, node, head, d_hash) { 1254 struct qstr *qstr; 1255 1256 if (dentry->d_name.hash != hash) 1257 continue; 1258 if (dentry->d_parent != parent) 1259 continue; 1260 1261 spin_lock(&dentry->d_lock); 1262 1263 /* 1264 * Recheck the dentry after taking the lock - d_move may have 1265 * changed things. Don't bother checking the hash because we're 1266 * about to compare the whole name anyway. 1267 */ 1268 if (dentry->d_parent != parent) 1269 goto next; 1270 1271 /* 1272 * It is safe to compare names since d_move() cannot 1273 * change the qstr (protected by d_lock). 1274 */ 1275 qstr = &dentry->d_name; 1276 if (parent->d_op && parent->d_op->d_compare) { 1277 if (parent->d_op->d_compare(parent, qstr, name)) 1278 goto next; 1279 } else { 1280 if (qstr->len != len) 1281 goto next; 1282 if (memcmp(qstr->name, str, len)) 1283 goto next; 1284 } 1285 1286 if (!d_unhashed(dentry)) { 1287 atomic_inc(&dentry->d_count); 1288 found = dentry; 1289 } 1290 spin_unlock(&dentry->d_lock); 1291 break; 1292next: 1293 spin_unlock(&dentry->d_lock); 1294 } 1295 rcu_read_unlock(); 1296 1297 return found; 1298} 1299 1300/** 1301 * d_hash_and_lookup - hash the qstr then search for a dentry 1302 * @dir: Directory to search in 1303 * @name: qstr of name we wish to find 1304 * 1305 * On hash failure or on lookup failure NULL is returned. 1306 */ 1307struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) 1308{ 1309 struct dentry *dentry = NULL; 1310 1311 /* 1312 * Check for a fs-specific hash function. Note that we must 1313 * calculate the standard hash first, as the d_op->d_hash() 1314 * routine may choose to leave the hash value unchanged. 1315 */ 1316 name->hash = full_name_hash(name->name, name->len); 1317 if (dir->d_op && dir->d_op->d_hash) { 1318 if (dir->d_op->d_hash(dir, name) < 0) 1319 goto out; 1320 } 1321 dentry = d_lookup(dir, name); 1322out: 1323 return dentry; 1324} 1325 1326/** 1327 * d_validate - verify dentry provided from insecure source 1328 * @dentry: The dentry alleged to be valid child of @dparent 1329 * @dparent: The parent dentry (known to be valid) 1330 * @hash: Hash of the dentry 1331 * @len: Length of the name 1332 * 1333 * An insecure source has sent us a dentry, here we verify it and dget() it. 1334 * This is used by ncpfs in its readdir implementation. 1335 * Zero is returned in the dentry is invalid. 1336 */ 1337 1338int d_validate(struct dentry *dentry, struct dentry *dparent) 1339{ 1340 struct hlist_head *base; 1341 struct hlist_node *lhp; 1342 1343 /* Check whether the ptr might be valid at all.. */ 1344 if (!kmem_ptr_validate(dentry_cache, dentry)) 1345 goto out; 1346 1347 if (dentry->d_parent != dparent) 1348 goto out; 1349 1350 spin_lock(&dcache_lock); 1351 base = d_hash(dparent, dentry->d_name.hash); 1352 hlist_for_each(lhp,base) { 1353 /* hlist_for_each_entry_rcu() not required for d_hash list 1354 * as it is parsed under dcache_lock 1355 */ 1356 if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { 1357 __dget_locked(dentry); 1358 spin_unlock(&dcache_lock); 1359 return 1; 1360 } 1361 } 1362 spin_unlock(&dcache_lock); 1363out: 1364 return 0; 1365} 1366 1367/* 1368 * When a file is deleted, we have two options: 1369 * - turn this dentry into a negative dentry 1370 * - unhash this dentry and free it. 1371 * 1372 * Usually, we want to just turn this into 1373 * a negative dentry, but if anybody else is 1374 * currently using the dentry or the inode 1375 * we can't do that and we fall back on removing 1376 * it from the hash queues and waiting for 1377 * it to be deleted later when it has no users 1378 */ 1379 1380/** 1381 * d_delete - delete a dentry 1382 * @dentry: The dentry to delete 1383 * 1384 * Turn the dentry into a negative dentry if possible, otherwise 1385 * remove it from the hash queues so it can be deleted later 1386 */ 1387 1388void d_delete(struct dentry * dentry) 1389{ 1390 int isdir = 0; 1391 /* 1392 * Are we the only user? 1393 */ 1394 spin_lock(&dcache_lock); 1395 spin_lock(&dentry->d_lock); 1396 isdir = S_ISDIR(dentry->d_inode->i_mode); 1397 if (atomic_read(&dentry->d_count) == 1) { 1398 dentry_iput(dentry); 1399 fsnotify_nameremove(dentry, isdir); 1400 return; 1401 } 1402 1403 if (!d_unhashed(dentry)) 1404 __d_drop(dentry); 1405 1406 spin_unlock(&dentry->d_lock); 1407 spin_unlock(&dcache_lock); 1408 1409 fsnotify_nameremove(dentry, isdir); 1410} 1411 1412static void __d_rehash(struct dentry * entry, struct hlist_head *list) 1413{ 1414 1415 entry->d_flags &= ~DCACHE_UNHASHED; 1416 hlist_add_head_rcu(&entry->d_hash, list); 1417} 1418 1419static void _d_rehash(struct dentry * entry) 1420{ 1421 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash)); 1422} 1423 1424/** 1425 * d_rehash - add an entry back to the hash 1426 * @entry: dentry to add to the hash 1427 * 1428 * Adds a dentry to the hash according to its name. 1429 */ 1430 1431void d_rehash(struct dentry * entry) 1432{ 1433 spin_lock(&dcache_lock); 1434 spin_lock(&entry->d_lock); 1435 _d_rehash(entry); 1436 spin_unlock(&entry->d_lock); 1437 spin_unlock(&dcache_lock); 1438} 1439 1440#define do_switch(x,y) do { \ 1441 __typeof__ (x) __tmp = x; \ 1442 x = y; y = __tmp; } while (0) 1443 1444/* 1445 * When switching names, the actual string doesn't strictly have to 1446 * be preserved in the target - because we're dropping the target 1447 * anyway. As such, we can just do a simple memcpy() to copy over 1448 * the new name before we switch. 1449 * 1450 * Note that we have to be a lot more careful about getting the hash 1451 * switched - we have to switch the hash value properly even if it 1452 * then no longer matches the actual (corrupted) string of the target. 1453 * The hash value has to match the hash queue that the dentry is on.. 1454 */ 1455static void switch_names(struct dentry *dentry, struct dentry *target) 1456{ 1457 if (dname_external(target)) { 1458 if (dname_external(dentry)) { 1459 /* 1460 * Both external: swap the pointers 1461 */ 1462 do_switch(target->d_name.name, dentry->d_name.name); 1463 } else { 1464 /* 1465 * dentry:internal, target:external. Steal target's 1466 * storage and make target internal. 1467 */ 1468 memcpy(target->d_iname, dentry->d_name.name, 1469 dentry->d_name.len + 1); 1470 dentry->d_name.name = target->d_name.name; 1471 target->d_name.name = target->d_iname; 1472 } 1473 } else { 1474 if (dname_external(dentry)) { 1475 /* 1476 * dentry:external, target:internal. Give dentry's 1477 * storage to target and make dentry internal 1478 */ 1479 memcpy(dentry->d_iname, target->d_name.name, 1480 target->d_name.len + 1); 1481 target->d_name.name = dentry->d_name.name; 1482 dentry->d_name.name = dentry->d_iname; 1483 } else { 1484 /* 1485 * Both are internal. Just copy target to dentry 1486 */ 1487 memcpy(dentry->d_iname, target->d_name.name, 1488 target->d_name.len + 1); 1489 } 1490 } 1491} 1492 1493/* 1494 * We cannibalize "target" when moving dentry on top of it, 1495 * because it's going to be thrown away anyway. We could be more 1496 * polite about it, though. 1497 * 1498 * This forceful removal will result in ugly /proc output if 1499 * somebody holds a file open that got deleted due to a rename. 1500 * We could be nicer about the deleted file, and let it show 1501 * up under the name it had before it was deleted rather than 1502 * under the original name of the file that was moved on top of it. 1503 */ 1504 1505/* 1506 * d_move_locked - move a dentry 1507 * @dentry: entry to move 1508 * @target: new dentry 1509 * 1510 * Update the dcache to reflect the move of a file name. Negative 1511 * dcache entries should not be moved in this way. 1512 */ 1513static void d_move_locked(struct dentry * dentry, struct dentry * target) 1514{ 1515 struct hlist_head *list; 1516 1517 if (!dentry->d_inode) 1518 printk(KERN_WARNING "VFS: moving negative dcache entry\n"); 1519 1520 write_seqlock(&rename_lock); 1521 /* 1522 * XXXX: do we really need to take target->d_lock? 1523 */ 1524 if (target < dentry) { 1525 spin_lock(&target->d_lock); 1526 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 1527 } else { 1528 spin_lock(&dentry->d_lock); 1529 spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); 1530 } 1531 1532 /* Move the dentry to the target hash queue, if on different bucket */ 1533 if (d_unhashed(dentry)) 1534 goto already_unhashed; 1535 1536 hlist_del_rcu(&dentry->d_hash); 1537 1538already_unhashed: 1539 list = d_hash(target->d_parent, target->d_name.hash); 1540 __d_rehash(dentry, list); 1541 1542 /* Unhash the target: dput() will then get rid of it */ 1543 __d_drop(target); 1544 1545 list_del(&dentry->d_u.d_child); 1546 list_del(&target->d_u.d_child); 1547 1548 /* Switch the names.. */ 1549 switch_names(dentry, target); 1550 do_switch(dentry->d_name.len, target->d_name.len); 1551 do_switch(dentry->d_name.hash, target->d_name.hash); 1552 1553 /* ... and switch the parents */ 1554 if (IS_ROOT(dentry)) { 1555 dentry->d_parent = target->d_parent; 1556 target->d_parent = target; 1557 INIT_LIST_HEAD(&target->d_u.d_child); 1558 } else { 1559 do_switch(dentry->d_parent, target->d_parent); 1560 1561 /* And add them back to the (new) parent lists */ 1562 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs); 1563 } 1564 1565 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1566 spin_unlock(&target->d_lock); 1567 fsnotify_d_move(dentry); 1568 spin_unlock(&dentry->d_lock); 1569 write_sequnlock(&rename_lock); 1570} 1571 1572/** 1573 * d_move - move a dentry 1574 * @dentry: entry to move 1575 * @target: new dentry 1576 * 1577 * Update the dcache to reflect the move of a file name. Negative 1578 * dcache entries should not be moved in this way. 1579 */ 1580 1581void d_move(struct dentry * dentry, struct dentry * target) 1582{ 1583 spin_lock(&dcache_lock); 1584 d_move_locked(dentry, target); 1585 spin_unlock(&dcache_lock); 1586} 1587 1588/* 1589 * Helper that returns 1 if p1 is a parent of p2, else 0 1590 */ 1591static int d_isparent(struct dentry *p1, struct dentry *p2) 1592{ 1593 struct dentry *p; 1594 1595 for (p = p2; p->d_parent != p; p = p->d_parent) { 1596 if (p->d_parent == p1) 1597 return 1; 1598 } 1599 return 0; 1600} 1601 1602/* 1603 * This helper attempts to cope with remotely renamed directories 1604 * 1605 * It assumes that the caller is already holding 1606 * dentry->d_parent->d_inode->i_mutex and the dcache_lock 1607 * 1608 * Note: If ever the locking in lock_rename() changes, then please 1609 * remember to update this too... 1610 */ 1611static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) 1612 __releases(dcache_lock) 1613{ 1614 struct mutex *m1 = NULL, *m2 = NULL; 1615 struct dentry *ret; 1616 1617 /* If alias and dentry share a parent, then no extra locks required */ 1618 if (alias->d_parent == dentry->d_parent) 1619 goto out_unalias; 1620 1621 /* Check for loops */ 1622 ret = ERR_PTR(-ELOOP); 1623 if (d_isparent(alias, dentry)) 1624 goto out_err; 1625 1626 /* See lock_rename() */ 1627 ret = ERR_PTR(-EBUSY); 1628 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) 1629 goto out_err; 1630 m1 = &dentry->d_sb->s_vfs_rename_mutex; 1631 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex)) 1632 goto out_err; 1633 m2 = &alias->d_parent->d_inode->i_mutex; 1634out_unalias: 1635 d_move_locked(alias, dentry); 1636 ret = alias; 1637out_err: 1638 spin_unlock(&dcache_lock); 1639 if (m2) 1640 mutex_unlock(m2); 1641 if (m1) 1642 mutex_unlock(m1); 1643 return ret; 1644} 1645 1646/* 1647 * Prepare an anonymous dentry for life in the superblock's dentry tree as a 1648 * named dentry in place of the dentry to be replaced. 1649 */ 1650static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) 1651{ 1652 struct dentry *dparent, *aparent; 1653 1654 switch_names(dentry, anon); 1655 do_switch(dentry->d_name.len, anon->d_name.len); 1656 do_switch(dentry->d_name.hash, anon->d_name.hash); 1657 1658 dparent = dentry->d_parent; 1659 aparent = anon->d_parent; 1660 1661 dentry->d_parent = (aparent == anon) ? dentry : aparent; 1662 list_del(&dentry->d_u.d_child); 1663 if (!IS_ROOT(dentry)) 1664 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); 1665 else 1666 INIT_LIST_HEAD(&dentry->d_u.d_child); 1667 1668 anon->d_parent = (dparent == dentry) ? anon : dparent; 1669 list_del(&anon->d_u.d_child); 1670 if (!IS_ROOT(anon)) 1671 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs); 1672 else 1673 INIT_LIST_HEAD(&anon->d_u.d_child); 1674 1675 anon->d_flags &= ~DCACHE_DISCONNECTED; 1676} 1677 1678/** 1679 * d_materialise_unique - introduce an inode into the tree 1680 * @dentry: candidate dentry 1681 * @inode: inode to bind to the dentry, to which aliases may be attached 1682 * 1683 * Introduces an dentry into the tree, substituting an extant disconnected 1684 * root directory alias in its place if there is one 1685 */ 1686struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) 1687{ 1688 struct dentry *actual; 1689 1690 BUG_ON(!d_unhashed(dentry)); 1691 1692 spin_lock(&dcache_lock); 1693 1694 if (!inode) { 1695 actual = dentry; 1696 dentry->d_inode = NULL; 1697 goto found_lock; 1698 } 1699 1700 if (S_ISDIR(inode->i_mode)) { 1701 struct dentry *alias; 1702 1703 /* Does an aliased dentry already exist? */ 1704 alias = __d_find_alias(inode, 0); 1705 if (alias) { 1706 actual = alias; 1707 /* Is this an anonymous mountpoint that we could splice 1708 * into our tree? */ 1709 if (IS_ROOT(alias)) { 1710 spin_lock(&alias->d_lock); 1711 __d_materialise_dentry(dentry, alias); 1712 __d_drop(alias); 1713 goto found; 1714 } 1715 /* Nope, but we must(!) avoid directory aliasing */ 1716 actual = __d_unalias(dentry, alias); 1717 if (IS_ERR(actual)) 1718 dput(alias); 1719 goto out_nolock; 1720 } 1721 } 1722 1723 /* Add a unique reference */ 1724 actual = __d_instantiate_unique(dentry, inode); 1725 if (!actual) 1726 actual = dentry; 1727 else if (unlikely(!d_unhashed(actual))) 1728 goto shouldnt_be_hashed; 1729 1730found_lock: 1731 spin_lock(&actual->d_lock); 1732found: 1733 _d_rehash(actual); 1734 spin_unlock(&actual->d_lock); 1735 spin_unlock(&dcache_lock); 1736out_nolock: 1737 if (actual == dentry) { 1738 security_d_instantiate(dentry, inode); 1739 return NULL; 1740 } 1741 1742 iput(inode); 1743 return actual; 1744 1745shouldnt_be_hashed: 1746 spin_unlock(&dcache_lock); 1747 BUG(); 1748} 1749 1750static int prepend(char **buffer, int *buflen, const char *str, int namelen) 1751{ 1752 *buflen -= namelen; 1753 if (*buflen < 0) 1754 return -ENAMETOOLONG; 1755 *buffer -= namelen; 1756 memcpy(*buffer, str, namelen); 1757 return 0; 1758} 1759 1760static int prepend_name(char **buffer, int *buflen, struct qstr *name) 1761{ 1762 return prepend(buffer, buflen, name->name, name->len); 1763} 1764 1765/** 1766 * __d_path - return the path of a dentry 1767 * @path: the dentry/vfsmount to report 1768 * @root: root vfsmnt/dentry (may be modified by this function) 1769 * @buffer: buffer to return value in 1770 * @buflen: buffer length 1771 * 1772 * Convert a dentry into an ASCII path name. If the entry has been deleted 1773 * the string " (deleted)" is appended. Note that this is ambiguous. 1774 * 1775 * Returns the buffer or an error code if the path was too long. 1776 * 1777 * "buflen" should be positive. Caller holds the dcache_lock. 1778 * 1779 * If path is not reachable from the supplied root, then the value of 1780 * root is changed (without modifying refcounts). 1781 */ 1782char *__d_path(const struct path *path, struct path *root, 1783 char *buffer, int buflen) 1784{ 1785 struct dentry *dentry = path->dentry; 1786 struct vfsmount *vfsmnt = path->mnt; 1787 char *end = buffer + buflen; 1788 char *retval; 1789 1790 spin_lock(&vfsmount_lock); 1791 prepend(&end, &buflen, "\0", 1); 1792 if (!IS_ROOT(dentry) && d_unhashed(dentry) && 1793 (prepend(&end, &buflen, " (deleted)", 10) != 0)) 1794 goto Elong; 1795 1796 if (buflen < 1) 1797 goto Elong; 1798 /* Get '/' right */ 1799 retval = end-1; 1800 *retval = '/'; 1801 1802 for (;;) { 1803 struct dentry * parent; 1804 1805 if (dentry == root->dentry && vfsmnt == root->mnt) 1806 break; 1807 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { 1808 /* Global root? */ 1809 if (vfsmnt->mnt_parent == vfsmnt) { 1810 goto global_root; 1811 } 1812 dentry = vfsmnt->mnt_mountpoint; 1813 vfsmnt = vfsmnt->mnt_parent; 1814 continue; 1815 } 1816 parent = dentry->d_parent; 1817 prefetch(parent); 1818 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 1819 (prepend(&end, &buflen, "/", 1) != 0)) 1820 goto Elong; 1821 retval = end; 1822 dentry = parent; 1823 } 1824 1825out: 1826 spin_unlock(&vfsmount_lock); 1827 return retval; 1828 1829global_root: 1830 retval += 1; /* hit the slash */ 1831 if (prepend_name(&retval, &buflen, &dentry->d_name) != 0) 1832 goto Elong; 1833 root->mnt = vfsmnt; 1834 root->dentry = dentry; 1835 goto out; 1836 1837Elong: 1838 retval = ERR_PTR(-ENAMETOOLONG); 1839 goto out; 1840} 1841 1842/** 1843 * d_path - return the path of a dentry 1844 * @path: path to report 1845 * @buf: buffer to return value in 1846 * @buflen: buffer length 1847 * 1848 * Convert a dentry into an ASCII path name. If the entry has been deleted 1849 * the string " (deleted)" is appended. Note that this is ambiguous. 1850 * 1851 * Returns the buffer or an error code if the path was too long. 1852 * 1853 * "buflen" should be positive. 1854 */ 1855char *d_path(const struct path *path, char *buf, int buflen) 1856{ 1857 char *res; 1858 struct path root; 1859 struct path tmp; 1860 1861 /* 1862 * We have various synthetic filesystems that never get mounted. On 1863 * these filesystems dentries are never used for lookup purposes, and 1864 * thus don't need to be hashed. They also don't need a name until a 1865 * user wants to identify the object in /proc/pid/fd/. The little hack 1866 * below allows us to generate a name for these objects on demand: 1867 */ 1868 if (path->dentry->d_op && path->dentry->d_op->d_dname) 1869 return path->dentry->d_op->d_dname(path->dentry, buf, buflen); 1870 1871 read_lock(&current->fs->lock); 1872 root = current->fs->root; 1873 path_get(&root); 1874 read_unlock(&current->fs->lock); 1875 spin_lock(&dcache_lock); 1876 tmp = root; 1877 res = __d_path(path, &tmp, buf, buflen); 1878 spin_unlock(&dcache_lock); 1879 path_put(&root); 1880 return res; 1881} 1882 1883/* 1884 * Helper function for dentry_operations.d_dname() members 1885 */ 1886char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen, 1887 const char *fmt, ...) 1888{ 1889 va_list args; 1890 char temp[64]; 1891 int sz; 1892 1893 va_start(args, fmt); 1894 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; 1895 va_end(args); 1896 1897 if (sz > sizeof(temp) || sz > buflen) 1898 return ERR_PTR(-ENAMETOOLONG); 1899 1900 buffer += buflen - sz; 1901 return memcpy(buffer, temp, sz); 1902} 1903 1904/* 1905 * Write full pathname from the root of the filesystem into the buffer. 1906 */ 1907char *dentry_path(struct dentry *dentry, char *buf, int buflen) 1908{ 1909 char *end = buf + buflen; 1910 char *retval; 1911 1912 spin_lock(&dcache_lock); 1913 prepend(&end, &buflen, "\0", 1); 1914 if (!IS_ROOT(dentry) && d_unhashed(dentry) && 1915 (prepend(&end, &buflen, "//deleted", 9) != 0)) 1916 goto Elong; 1917 if (buflen < 1) 1918 goto Elong; 1919 /* Get '/' right */ 1920 retval = end-1; 1921 *retval = '/'; 1922 1923 while (!IS_ROOT(dentry)) { 1924 struct dentry *parent = dentry->d_parent; 1925 1926 prefetch(parent); 1927 if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) || 1928 (prepend(&end, &buflen, "/", 1) != 0)) 1929 goto Elong; 1930 1931 retval = end; 1932 dentry = parent; 1933 } 1934 spin_unlock(&dcache_lock); 1935 return retval; 1936Elong: 1937 spin_unlock(&dcache_lock); 1938 return ERR_PTR(-ENAMETOOLONG); 1939} 1940 1941/* 1942 * NOTE! The user-level library version returns a 1943 * character pointer. The kernel system call just 1944 * returns the length of the buffer filled (which 1945 * includes the ending '\0' character), or a negative 1946 * error value. So libc would do something like 1947 * 1948 * char *getcwd(char * buf, size_t size) 1949 * { 1950 * int retval; 1951 * 1952 * retval = sys_getcwd(buf, size); 1953 * if (retval >= 0) 1954 * return buf; 1955 * errno = -retval; 1956 * return NULL; 1957 * } 1958 */ 1959asmlinkage long sys_getcwd(char __user *buf, unsigned long size) 1960{ 1961 int error; 1962 struct path pwd, root; 1963 char *page = (char *) __get_free_page(GFP_USER); 1964 1965 if (!page) 1966 return -ENOMEM; 1967 1968 read_lock(&current->fs->lock); 1969 pwd = current->fs->pwd; 1970 path_get(&pwd); 1971 root = current->fs->root; 1972 path_get(&root); 1973 read_unlock(&current->fs->lock); 1974 1975 error = -ENOENT; 1976 /* Has the current directory has been unlinked? */ 1977 spin_lock(&dcache_lock); 1978 if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) { 1979 unsigned long len; 1980 struct path tmp = root; 1981 char * cwd; 1982 1983 cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE); 1984 spin_unlock(&dcache_lock); 1985 1986 error = PTR_ERR(cwd); 1987 if (IS_ERR(cwd)) 1988 goto out; 1989 1990 error = -ERANGE; 1991 len = PAGE_SIZE + page - cwd; 1992 if (len <= size) { 1993 error = len; 1994 if (copy_to_user(buf, cwd, len)) 1995 error = -EFAULT; 1996 } 1997 } else 1998 spin_unlock(&dcache_lock); 1999 2000out: 2001 path_put(&pwd); 2002 path_put(&root); 2003 free_page((unsigned long) page); 2004 return error; 2005} 2006 2007/* 2008 * Test whether new_dentry is a subdirectory of old_dentry. 2009 * 2010 * Trivially implemented using the dcache structure 2011 */ 2012 2013/** 2014 * is_subdir - is new dentry a subdirectory of old_dentry 2015 * @new_dentry: new dentry 2016 * @old_dentry: old dentry 2017 * 2018 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth). 2019 * Returns 0 otherwise. 2020 * Caller must ensure that "new_dentry" is pinned before calling is_subdir() 2021 */ 2022 2023int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry) 2024{ 2025 int result; 2026 struct dentry * saved = new_dentry; 2027 unsigned long seq; 2028 2029 /* need rcu_readlock to protect against the d_parent trashing due to 2030 * d_move 2031 */ 2032 rcu_read_lock(); 2033 do { 2034 /* for restarting inner loop in case of seq retry */ 2035 new_dentry = saved; 2036 result = 0; 2037 seq = read_seqbegin(&rename_lock); 2038 for (;;) { 2039 if (new_dentry != old_dentry) { 2040 struct dentry * parent = new_dentry->d_parent; 2041 if (parent == new_dentry) 2042 break; 2043 new_dentry = parent; 2044 continue; 2045 } 2046 result = 1; 2047 break; 2048 } 2049 } while (read_seqretry(&rename_lock, seq)); 2050 rcu_read_unlock(); 2051 2052 return result; 2053} 2054 2055void d_genocide(struct dentry *root) 2056{ 2057 struct dentry *this_parent = root; 2058 struct list_head *next; 2059 2060 spin_lock(&dcache_lock); 2061repeat: 2062 next = this_parent->d_subdirs.next; 2063resume: 2064 while (next != &this_parent->d_subdirs) { 2065 struct list_head *tmp = next; 2066 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); 2067 next = tmp->next; 2068 if (d_unhashed(dentry)||!dentry->d_inode) 2069 continue; 2070 if (!list_empty(&dentry->d_subdirs)) { 2071 this_parent = dentry; 2072 goto repeat; 2073 } 2074 atomic_dec(&dentry->d_count); 2075 } 2076 if (this_parent != root) { 2077 next = this_parent->d_u.d_child.next; 2078 atomic_dec(&this_parent->d_count); 2079 this_parent = this_parent->d_parent; 2080 goto resume; 2081 } 2082 spin_unlock(&dcache_lock); 2083} 2084 2085/** 2086 * find_inode_number - check for dentry with name 2087 * @dir: directory to check 2088 * @name: Name to find. 2089 * 2090 * Check whether a dentry already exists for the given name, 2091 * and return the inode number if it has an inode. Otherwise 2092 * 0 is returned. 2093 * 2094 * This routine is used to post-process directory listings for 2095 * filesystems using synthetic inode numbers, and is necessary 2096 * to keep getcwd() working. 2097 */ 2098 2099ino_t find_inode_number(struct dentry *dir, struct qstr *name) 2100{ 2101 struct dentry * dentry; 2102 ino_t ino = 0; 2103 2104 dentry = d_hash_and_lookup(dir, name); 2105 if (dentry) { 2106 if (dentry->d_inode) 2107 ino = dentry->d_inode->i_ino; 2108 dput(dentry); 2109 } 2110 return ino; 2111} 2112 2113static __initdata unsigned long dhash_entries; 2114static int __init set_dhash_entries(char *str) 2115{ 2116 if (!str) 2117 return 0; 2118 dhash_entries = simple_strtoul(str, &str, 0); 2119 return 1; 2120} 2121__setup("dhash_entries=", set_dhash_entries); 2122 2123static void __init dcache_init_early(void) 2124{ 2125 int loop; 2126 2127 /* If hashes are distributed across NUMA nodes, defer 2128 * hash allocation until vmalloc space is available. 2129 */ 2130 if (hashdist) 2131 return; 2132 2133 dentry_hashtable = 2134 alloc_large_system_hash("Dentry cache", 2135 sizeof(struct hlist_head), 2136 dhash_entries, 2137 13, 2138 HASH_EARLY, 2139 &d_hash_shift, 2140 &d_hash_mask, 2141 0); 2142 2143 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2144 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2145} 2146 2147static void __init dcache_init(void) 2148{ 2149 int loop; 2150 2151 /* 2152 * A constructor could be added for stable state like the lists, 2153 * but it is probably not worth it because of the cache nature 2154 * of the dcache. 2155 */ 2156 dentry_cache = KMEM_CACHE(dentry, 2157 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2158 2159 register_shrinker(&dcache_shrinker); 2160 2161 /* Hash may have been set up in dcache_init_early */ 2162 if (!hashdist) 2163 return; 2164 2165 dentry_hashtable = 2166 alloc_large_system_hash("Dentry cache", 2167 sizeof(struct hlist_head), 2168 dhash_entries, 2169 13, 2170 0, 2171 &d_hash_shift, 2172 &d_hash_mask, 2173 0); 2174 2175 for (loop = 0; loop < (1 << d_hash_shift); loop++) 2176 INIT_HLIST_HEAD(&dentry_hashtable[loop]); 2177} 2178 2179/* SLAB cache for __getname() consumers */ 2180struct kmem_cache *names_cachep __read_mostly; 2181 2182/* SLAB cache for file structures */ 2183struct kmem_cache *filp_cachep __read_mostly; 2184 2185EXPORT_SYMBOL(d_genocide); 2186 2187void __init vfs_caches_init_early(void) 2188{ 2189 dcache_init_early(); 2190 inode_init_early(); 2191} 2192 2193void __init vfs_caches_init(unsigned long mempages) 2194{ 2195 unsigned long reserve; 2196 2197 /* Base hash sizes on available memory, with a reserve equal to 2198 150% of current kernel size */ 2199 2200 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); 2201 mempages -= reserve; 2202 2203 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2204 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2205 2206 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2207 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2208 2209 dcache_init(); 2210 inode_init(); 2211 files_init(mempages); 2212 mnt_init(); 2213 bdev_cache_init(); 2214 chrdev_init(); 2215} 2216 2217EXPORT_SYMBOL(d_alloc); 2218EXPORT_SYMBOL(d_alloc_anon); 2219EXPORT_SYMBOL(d_alloc_root); 2220EXPORT_SYMBOL(d_delete); 2221EXPORT_SYMBOL(d_find_alias); 2222EXPORT_SYMBOL(d_instantiate); 2223EXPORT_SYMBOL(d_invalidate); 2224EXPORT_SYMBOL(d_lookup); 2225EXPORT_SYMBOL(d_move); 2226EXPORT_SYMBOL_GPL(d_materialise_unique); 2227EXPORT_SYMBOL(d_path); 2228EXPORT_SYMBOL(d_prune_aliases); 2229EXPORT_SYMBOL(d_rehash); 2230EXPORT_SYMBOL(d_splice_alias); 2231EXPORT_SYMBOL(d_validate); 2232EXPORT_SYMBOL(dget_locked); 2233EXPORT_SYMBOL(dput); 2234EXPORT_SYMBOL(find_inode_number); 2235EXPORT_SYMBOL(have_submounts); 2236EXPORT_SYMBOL(names_cachep); 2237EXPORT_SYMBOL(shrink_dcache_parent); 2238EXPORT_SYMBOL(shrink_dcache_sb);