Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fuse: new work queue to periodically invalidate expired dentries

This patch adds the necessary infrastructure to keep track of all dentries
created for FUSE file systems. A set of rbtrees, protected by hashed
locks, will be used to keep all these dentries sorted by expiry time.

A new module parameter 'inval_wq' is also added. When set, it will start
a work queue which will periodically invalidate expired dentries. The
value of this new parameter is the period, in seconds, for this work
queue. Once this parameter is set, every new dentry will be added to one
of the rbtrees.

When the work queue is executed, it will check all the rbtrees and will
invalidate those dentries that have timed-out.

The work queue period can not be smaller than 5 seconds, but can be
disabled by setting 'inval_wq' to zero (which is the default).

Signed-off-by: Luis Henriques <luis@igalia.com>
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>

authored by

Luis Henriques and committed by
Miklos Szeredi
ab84ad59 395b9553

+212 -25
+199 -25
fs/fuse/dir.c
··· 27 27 MODULE_PARM_DESC(allow_sys_admin_access, 28 28 "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check"); 29 29 30 + struct dentry_bucket { 31 + struct rb_root tree; 32 + spinlock_t lock; 33 + }; 34 + 35 + #define HASH_BITS 5 36 + #define HASH_SIZE (1 << HASH_BITS) 37 + static struct dentry_bucket dentry_hash[HASH_SIZE]; 38 + struct delayed_work dentry_tree_work; 39 + 40 + /* Minimum invalidation work queue frequency */ 41 + #define FUSE_DENTRY_INVAL_FREQ_MIN 5 42 + 43 + unsigned __read_mostly inval_wq; 44 + static int inval_wq_set(const char *val, const struct kernel_param *kp) 45 + { 46 + unsigned int num; 47 + unsigned int old = inval_wq; 48 + int ret; 49 + 50 + if (!val) 51 + return -EINVAL; 52 + 53 + ret = kstrtouint(val, 0, &num); 54 + if (ret) 55 + return ret; 56 + 57 + if ((num < FUSE_DENTRY_INVAL_FREQ_MIN) && (num != 0)) 58 + return -EINVAL; 59 + 60 + /* This should prevent overflow in secs_to_jiffies() */ 61 + if (num > USHRT_MAX) 62 + return -EINVAL; 63 + 64 + *((unsigned int *)kp->arg) = num; 65 + 66 + if (num && !old) 67 + schedule_delayed_work(&dentry_tree_work, 68 + secs_to_jiffies(num)); 69 + else if (!num && old) 70 + cancel_delayed_work_sync(&dentry_tree_work); 71 + 72 + return 0; 73 + } 74 + static const struct kernel_param_ops inval_wq_ops = { 75 + .set = inval_wq_set, 76 + .get = param_get_uint, 77 + }; 78 + module_param_cb(inval_wq, &inval_wq_ops, &inval_wq, 0644); 79 + __MODULE_PARM_TYPE(inval_wq, "uint"); 80 + MODULE_PARM_DESC(inval_wq, 81 + "Dentries invalidation work queue period in secs (>= " 82 + __stringify(FUSE_DENTRY_INVAL_FREQ_MIN) ")."); 83 + 84 + static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry) 85 + { 86 + int i = hash_ptr(dentry, HASH_BITS); 87 + 88 + return &dentry_hash[i]; 89 + } 90 + 30 91 static void fuse_advise_use_readdirplus(struct inode *dir) 31 92 { 32 93 struct fuse_inode *fi = get_fuse_inode(dir); ··· 95 34 set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state); 96 35 } 97 36 98 - #if BITS_PER_LONG >= 64 99 - static inline void __fuse_dentry_settime(struct dentry *entry, u64 time) 100 - { 101 - entry->d_fsdata = (void *) time; 102 - } 103 - 104 - static inline u64 fuse_dentry_time(const struct dentry *entry) 105 - { 106 - return (u64)entry->d_fsdata; 107 - } 108 - 109 - #else 110 - union fuse_dentry { 37 + struct fuse_dentry { 111 38 u64 time; 112 - struct rcu_head rcu; 39 + union { 40 + struct rcu_head rcu; 41 + struct rb_node node; 42 + }; 43 + struct dentry *dentry; 113 44 }; 45 + 46 + static void __fuse_dentry_tree_del_node(struct fuse_dentry *fd, 47 + struct dentry_bucket *bucket) 48 + { 49 + if (!RB_EMPTY_NODE(&fd->node)) { 50 + rb_erase(&fd->node, &bucket->tree); 51 + RB_CLEAR_NODE(&fd->node); 52 + } 53 + } 54 + 55 + static void fuse_dentry_tree_del_node(struct dentry *dentry) 56 + { 57 + struct fuse_dentry *fd = dentry->d_fsdata; 58 + struct dentry_bucket *bucket = get_dentry_bucket(dentry); 59 + 60 + spin_lock(&bucket->lock); 61 + __fuse_dentry_tree_del_node(fd, bucket); 62 + spin_unlock(&bucket->lock); 63 + } 64 + 65 + static void fuse_dentry_tree_add_node(struct dentry *dentry) 66 + { 67 + struct fuse_dentry *fd = dentry->d_fsdata; 68 + struct dentry_bucket *bucket; 69 + struct fuse_dentry *cur; 70 + struct rb_node **p, *parent = NULL; 71 + 72 + if (!inval_wq) 73 + return; 74 + 75 + bucket = get_dentry_bucket(dentry); 76 + 77 + spin_lock(&bucket->lock); 78 + 79 + __fuse_dentry_tree_del_node(fd, bucket); 80 + 81 + p = &bucket->tree.rb_node; 82 + while (*p) { 83 + parent = *p; 84 + cur = rb_entry(*p, struct fuse_dentry, node); 85 + if (fd->time < cur->time) 86 + p = &(*p)->rb_left; 87 + else 88 + p = &(*p)->rb_right; 89 + } 90 + rb_link_node(&fd->node, parent, p); 91 + rb_insert_color(&fd->node, &bucket->tree); 92 + spin_unlock(&bucket->lock); 93 + } 94 + 95 + /* 96 + * work queue which, when enabled, will periodically check for expired dentries 97 + * in the dentries tree. 98 + */ 99 + static void fuse_dentry_tree_work(struct work_struct *work) 100 + { 101 + LIST_HEAD(dispose); 102 + struct fuse_dentry *fd; 103 + struct rb_node *node; 104 + int i; 105 + 106 + for (i = 0; i < HASH_SIZE; i++) { 107 + spin_lock(&dentry_hash[i].lock); 108 + node = rb_first(&dentry_hash[i].tree); 109 + while (node) { 110 + fd = rb_entry(node, struct fuse_dentry, node); 111 + if (time_after64(get_jiffies_64(), fd->time)) { 112 + rb_erase(&fd->node, &dentry_hash[i].tree); 113 + RB_CLEAR_NODE(&fd->node); 114 + spin_unlock(&dentry_hash[i].lock); 115 + d_dispose_if_unused(fd->dentry, &dispose); 116 + cond_resched(); 117 + spin_lock(&dentry_hash[i].lock); 118 + } else 119 + break; 120 + node = rb_first(&dentry_hash[i].tree); 121 + } 122 + spin_unlock(&dentry_hash[i].lock); 123 + shrink_dentry_list(&dispose); 124 + } 125 + 126 + if (inval_wq) 127 + schedule_delayed_work(&dentry_tree_work, 128 + secs_to_jiffies(inval_wq)); 129 + } 130 + 131 + void fuse_dentry_tree_init(void) 132 + { 133 + int i; 134 + 135 + for (i = 0; i < HASH_SIZE; i++) { 136 + spin_lock_init(&dentry_hash[i].lock); 137 + dentry_hash[i].tree = RB_ROOT; 138 + } 139 + INIT_DELAYED_WORK(&dentry_tree_work, fuse_dentry_tree_work); 140 + } 141 + 142 + void fuse_dentry_tree_cleanup(void) 143 + { 144 + int i; 145 + 146 + inval_wq = 0; 147 + cancel_delayed_work_sync(&dentry_tree_work); 148 + 149 + for (i = 0; i < HASH_SIZE; i++) 150 + WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree)); 151 + } 114 152 115 153 static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time) 116 154 { 117 - ((union fuse_dentry *) dentry->d_fsdata)->time = time; 155 + ((struct fuse_dentry *) dentry->d_fsdata)->time = time; 118 156 } 119 157 120 158 static inline u64 fuse_dentry_time(const struct dentry *entry) 121 159 { 122 - return ((union fuse_dentry *) entry->d_fsdata)->time; 160 + return ((struct fuse_dentry *) entry->d_fsdata)->time; 123 161 } 124 - #endif 125 162 126 163 static void fuse_dentry_settime(struct dentry *dentry, u64 time) 127 164 { ··· 240 81 } 241 82 242 83 __fuse_dentry_settime(dentry, time); 84 + fuse_dentry_tree_add_node(dentry); 243 85 } 244 86 245 87 /* ··· 443 283 goto out; 444 284 } 445 285 446 - #if BITS_PER_LONG < 64 447 286 static int fuse_dentry_init(struct dentry *dentry) 448 287 { 449 - dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), 450 - GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE); 288 + struct fuse_dentry *fd; 451 289 452 - return dentry->d_fsdata ? 0 : -ENOMEM; 290 + fd = kzalloc(sizeof(struct fuse_dentry), 291 + GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE); 292 + if (!fd) 293 + return -ENOMEM; 294 + 295 + fd->dentry = dentry; 296 + RB_CLEAR_NODE(&fd->node); 297 + dentry->d_fsdata = fd; 298 + 299 + return 0; 453 300 } 301 + 302 + static void fuse_dentry_prune(struct dentry *dentry) 303 + { 304 + struct fuse_dentry *fd = dentry->d_fsdata; 305 + 306 + if (!RB_EMPTY_NODE(&fd->node)) 307 + fuse_dentry_tree_del_node(dentry); 308 + } 309 + 454 310 static void fuse_dentry_release(struct dentry *dentry) 455 311 { 456 - union fuse_dentry *fd = dentry->d_fsdata; 312 + struct fuse_dentry *fd = dentry->d_fsdata; 457 313 458 314 kfree_rcu(fd, rcu); 459 315 } 460 - #endif 461 316 462 317 static int fuse_dentry_delete(const struct dentry *dentry) 463 318 { ··· 506 331 const struct dentry_operations fuse_dentry_operations = { 507 332 .d_revalidate = fuse_dentry_revalidate, 508 333 .d_delete = fuse_dentry_delete, 509 - #if BITS_PER_LONG < 64 510 334 .d_init = fuse_dentry_init, 335 + .d_prune = fuse_dentry_prune, 511 336 .d_release = fuse_dentry_release, 512 - #endif 513 337 .d_automount = fuse_dentry_automount, 514 338 }; 515 339
+10
fs/fuse/fuse_i.h
··· 54 54 /** Frequency (in jiffies) of request timeout checks, if opted into */ 55 55 extern const unsigned long fuse_timeout_timer_freq; 56 56 57 + /* 58 + * Dentries invalidation workqueue period, in seconds. The value of this 59 + * parameter shall be >= FUSE_DENTRY_INVAL_FREQ_MIN seconds, or 0 (zero), in 60 + * which case no workqueue will be created. 61 + */ 62 + extern unsigned inval_wq __read_mostly; 63 + 57 64 /** Maximum of max_pages received in init_out */ 58 65 extern unsigned int fuse_max_pages_limit; 59 66 /* ··· 1283 1276 1284 1277 /* Check if any requests timed out */ 1285 1278 void fuse_check_timeout(struct work_struct *work); 1279 + 1280 + void fuse_dentry_tree_init(void); 1281 + void fuse_dentry_tree_cleanup(void); 1286 1282 1287 1283 /** 1288 1284 * Invalidate inode attributes
+3
fs/fuse/inode.c
··· 2294 2294 if (res) 2295 2295 goto err_sysfs_cleanup; 2296 2296 2297 + fuse_dentry_tree_init(); 2298 + 2297 2299 sanitize_global_limit(&max_user_bgreq); 2298 2300 sanitize_global_limit(&max_user_congthresh); 2299 2301 ··· 2315 2313 { 2316 2314 pr_debug("exit\n"); 2317 2315 2316 + fuse_dentry_tree_cleanup(); 2318 2317 fuse_ctl_cleanup(); 2319 2318 fuse_sysfs_cleanup(); 2320 2319 fuse_fs_cleanup();