Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* CacheFiles path walking and related routines
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/fs.h>
9#include <linux/namei.h>
10#include "internal.h"
11
12/*
13 * Mark the backing file as being a cache file if it's not already in use. The
14 * mark tells the culling request command that it's not allowed to cull the
15 * file or directory. The caller must hold the inode lock.
16 */
17static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 struct inode *inode)
19{
20 bool can_use = false;
21
22 if (!(inode->i_flags & S_KERNEL_FILE)) {
23 inode->i_flags |= S_KERNEL_FILE;
24 trace_cachefiles_mark_active(object, inode);
25 can_use = true;
26 } else {
27 trace_cachefiles_mark_failed(object, inode);
28 }
29
30 return can_use;
31}
32
33static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
34 struct inode *inode)
35{
36 bool can_use;
37
38 inode_lock(inode);
39 can_use = __cachefiles_mark_inode_in_use(object, inode);
40 inode_unlock(inode);
41 return can_use;
42}
43
44/*
45 * Unmark a backing inode. The caller must hold the inode lock.
46 */
47static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
48 struct inode *inode)
49{
50 inode->i_flags &= ~S_KERNEL_FILE;
51 trace_cachefiles_mark_inactive(object, inode);
52}
53
54static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
55 struct inode *inode)
56{
57 inode_lock(inode);
58 __cachefiles_unmark_inode_in_use(object, inode);
59 inode_unlock(inode);
60}
61
62/*
63 * Unmark a backing inode and tell cachefilesd that there's something that can
64 * be culled.
65 */
66void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
67 struct file *file)
68{
69 struct cachefiles_cache *cache = object->volume->cache;
70 struct inode *inode = file_inode(file);
71
72 cachefiles_do_unmark_inode_in_use(object, inode);
73
74 if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
75 atomic_long_add(inode->i_blocks, &cache->b_released);
76 if (atomic_inc_return(&cache->f_released))
77 cachefiles_state_changed(cache);
78 }
79}
80
81/*
82 * get a subdirectory
83 */
84struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
85 struct dentry *dir,
86 const char *dirname,
87 bool *_is_new)
88{
89 struct dentry *subdir;
90 struct path path;
91 int ret;
92
93 _enter(",,%s", dirname);
94
95 /* search the current directory for the element name */
96
97retry:
98 ret = cachefiles_inject_read_error();
99 if (ret == 0)
100 subdir = start_creating(&nop_mnt_idmap, dir, &QSTR(dirname));
101 else
102 subdir = ERR_PTR(ret);
103 trace_cachefiles_lookup(NULL, dir, subdir);
104 if (IS_ERR(subdir)) {
105 trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
106 PTR_ERR(subdir),
107 cachefiles_trace_lookup_error);
108 if (PTR_ERR(subdir) == -ENOMEM)
109 goto nomem_d_alloc;
110 goto lookup_error;
111 }
112
113 _debug("subdir -> %pd %s",
114 subdir, d_backing_inode(subdir) ? "positive" : "negative");
115
116 /* we need to create the subdir if it doesn't exist yet */
117 if (d_is_negative(subdir)) {
118 ret = cachefiles_has_space(cache, 1, 0,
119 cachefiles_has_space_for_create);
120 if (ret < 0)
121 goto mkdir_error;
122
123 _debug("attempt mkdir");
124
125 path.mnt = cache->mnt;
126 path.dentry = dir;
127 ret = security_path_mkdir(&path, subdir, 0700);
128 if (ret < 0)
129 goto mkdir_error;
130 ret = cachefiles_inject_write_error();
131 if (ret == 0) {
132 subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700, NULL);
133 } else {
134 end_creating(subdir);
135 subdir = ERR_PTR(ret);
136 }
137 if (IS_ERR(subdir)) {
138 trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
139 cachefiles_trace_mkdir_error);
140 goto mkdir_error;
141 }
142 trace_cachefiles_mkdir(dir, subdir);
143
144 if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
145 end_creating(subdir);
146 goto retry;
147 }
148 ASSERT(d_backing_inode(subdir));
149
150 _debug("mkdir -> %pd{ino=%lu}",
151 subdir, d_backing_inode(subdir)->i_ino);
152 if (_is_new)
153 *_is_new = true;
154 }
155
156 /* Tell rmdir() it's not allowed to delete the subdir */
157 inode_lock(d_inode(subdir));
158 end_creating_keep(subdir);
159
160 if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
161 pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
162 subdir, d_inode(subdir)->i_ino);
163 goto mark_error;
164 }
165
166 inode_unlock(d_inode(subdir));
167
168 /* we need to make sure the subdir is a directory */
169 ASSERT(d_backing_inode(subdir));
170
171 if (!d_can_lookup(subdir)) {
172 pr_err("%s is not a directory\n", dirname);
173 ret = -EIO;
174 goto check_error;
175 }
176
177 ret = -EPERM;
178 if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
179 !d_backing_inode(subdir)->i_op->lookup ||
180 !d_backing_inode(subdir)->i_op->mkdir ||
181 !d_backing_inode(subdir)->i_op->rename ||
182 !d_backing_inode(subdir)->i_op->rmdir ||
183 !d_backing_inode(subdir)->i_op->unlink)
184 goto check_error;
185
186 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
187 return subdir;
188
189check_error:
190 cachefiles_put_directory(subdir);
191 _leave(" = %d [check]", ret);
192 return ERR_PTR(ret);
193
194mark_error:
195 inode_unlock(d_inode(subdir));
196 dput(subdir);
197 return ERR_PTR(-EBUSY);
198
199mkdir_error:
200 end_creating(subdir);
201 pr_err("mkdir %s failed with error %d\n", dirname, ret);
202 return ERR_PTR(ret);
203
204lookup_error:
205 ret = PTR_ERR(subdir);
206 pr_err("Lookup %s failed with error %d\n", dirname, ret);
207 return ERR_PTR(ret);
208
209nomem_d_alloc:
210 inode_unlock(d_inode(dir));
211 _leave(" = -ENOMEM");
212 return ERR_PTR(-ENOMEM);
213}
214
215/*
216 * Put a subdirectory.
217 */
218void cachefiles_put_directory(struct dentry *dir)
219{
220 if (dir) {
221 cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
222 dput(dir);
223 }
224}
225
226/*
227 * Remove a regular file from the cache.
228 */
229static int cachefiles_unlink(struct cachefiles_cache *cache,
230 struct cachefiles_object *object,
231 struct dentry *dir, struct dentry *dentry,
232 enum fscache_why_object_killed why)
233{
234 struct path path = {
235 .mnt = cache->mnt,
236 .dentry = dir,
237 };
238 int ret;
239
240 trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
241 ret = security_path_unlink(&path, dentry);
242 if (ret < 0) {
243 cachefiles_io_error(cache, "Unlink security error");
244 return ret;
245 }
246
247 ret = cachefiles_inject_remove_error();
248 if (ret == 0) {
249 ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
250 if (ret == -EIO)
251 cachefiles_io_error(cache, "Unlink failed");
252 }
253 if (ret != 0)
254 trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
255 cachefiles_trace_unlink_error);
256 return ret;
257}
258
259/*
260 * Delete an object representation from the cache
261 * - File backed objects are unlinked
262 * - Directory backed objects are stuffed into the graveyard for userspace to
263 * delete
264 * On entry dir must be locked. It will be unlocked on exit.
265 * On entry there must be at least 2 refs on rep, one will be dropped on exit.
266 */
267int cachefiles_bury_object(struct cachefiles_cache *cache,
268 struct cachefiles_object *object,
269 struct dentry *dir,
270 struct dentry *rep,
271 enum fscache_why_object_killed why)
272{
273 struct dentry *grave, *trap;
274 struct path path, path_to_graveyard;
275 char nbuffer[8 + 8 + 1];
276 int ret;
277
278 _enter(",'%pd','%pd'", dir, rep);
279
280 if (rep->d_parent != dir) {
281 end_removing(rep);
282 _leave(" = -ESTALE");
283 return -ESTALE;
284 }
285
286 /* non-directories can just be unlinked */
287 if (!d_is_dir(rep)) {
288 ret = cachefiles_unlink(cache, object, dir, rep, why);
289 end_removing(rep);
290
291 _leave(" = %d", ret);
292 return ret;
293 }
294
295 /* directories have to be moved to the graveyard */
296 _debug("move stale object to graveyard");
297 end_removing(rep);
298
299try_again:
300 /* first step is to make up a grave dentry in the graveyard */
301 sprintf(nbuffer, "%08x%08x",
302 (uint32_t) ktime_get_real_seconds(),
303 (uint32_t) atomic_inc_return(&cache->gravecounter));
304
305 /* do the multiway lock magic */
306 trap = lock_rename(cache->graveyard, dir);
307 if (IS_ERR(trap))
308 return PTR_ERR(trap);
309
310 /* do some checks before getting the grave dentry */
311 if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
312 /* the entry was probably culled when we dropped the parent dir
313 * lock */
314 unlock_rename(cache->graveyard, dir);
315 _leave(" = 0 [culled?]");
316 return 0;
317 }
318
319 if (!d_can_lookup(cache->graveyard)) {
320 unlock_rename(cache->graveyard, dir);
321 cachefiles_io_error(cache, "Graveyard no longer a directory");
322 return -EIO;
323 }
324
325 if (trap == rep) {
326 unlock_rename(cache->graveyard, dir);
327 cachefiles_io_error(cache, "May not make directory loop");
328 return -EIO;
329 }
330
331 if (d_mountpoint(rep)) {
332 unlock_rename(cache->graveyard, dir);
333 cachefiles_io_error(cache, "Mountpoint in cache");
334 return -EIO;
335 }
336
337 grave = lookup_one(&nop_mnt_idmap, &QSTR(nbuffer), cache->graveyard);
338 if (IS_ERR(grave)) {
339 unlock_rename(cache->graveyard, dir);
340 trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
341 PTR_ERR(grave),
342 cachefiles_trace_lookup_error);
343
344 if (PTR_ERR(grave) == -ENOMEM) {
345 _leave(" = -ENOMEM");
346 return -ENOMEM;
347 }
348
349 cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
350 return -EIO;
351 }
352
353 if (d_is_positive(grave)) {
354 unlock_rename(cache->graveyard, dir);
355 dput(grave);
356 grave = NULL;
357 cond_resched();
358 goto try_again;
359 }
360
361 if (d_mountpoint(grave)) {
362 unlock_rename(cache->graveyard, dir);
363 dput(grave);
364 cachefiles_io_error(cache, "Mountpoint in graveyard");
365 return -EIO;
366 }
367
368 /* target should not be an ancestor of source */
369 if (trap == grave) {
370 unlock_rename(cache->graveyard, dir);
371 dput(grave);
372 cachefiles_io_error(cache, "May not make directory loop");
373 return -EIO;
374 }
375
376 /* attempt the rename */
377 path.mnt = cache->mnt;
378 path.dentry = dir;
379 path_to_graveyard.mnt = cache->mnt;
380 path_to_graveyard.dentry = cache->graveyard;
381 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
382 if (ret < 0) {
383 cachefiles_io_error(cache, "Rename security error %d", ret);
384 } else {
385 struct renamedata rd = {
386 .mnt_idmap = &nop_mnt_idmap,
387 .old_parent = dir,
388 .old_dentry = rep,
389 .new_parent = cache->graveyard,
390 .new_dentry = grave,
391 };
392 trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
393 ret = cachefiles_inject_read_error();
394 if (ret == 0)
395 ret = vfs_rename(&rd);
396 if (ret != 0)
397 trace_cachefiles_vfs_error(object, d_inode(dir), ret,
398 cachefiles_trace_rename_error);
399 if (ret != 0 && ret != -ENOMEM)
400 cachefiles_io_error(cache,
401 "Rename failed with error %d", ret);
402 }
403
404 __cachefiles_unmark_inode_in_use(object, d_inode(rep));
405 unlock_rename(cache->graveyard, dir);
406 dput(grave);
407 _leave(" = 0");
408 return 0;
409}
410
411/*
412 * Delete a cache file.
413 */
414int cachefiles_delete_object(struct cachefiles_object *object,
415 enum fscache_why_object_killed why)
416{
417 struct cachefiles_volume *volume = object->volume;
418 struct dentry *dentry = object->file->f_path.dentry;
419 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
420 int ret;
421
422 _enter(",OBJ%x{%pD}", object->debug_id, object->file);
423
424 dentry = start_removing_dentry(fan, dentry);
425 if (IS_ERR(dentry))
426 ret = PTR_ERR(dentry);
427 else
428 ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
429 end_removing(dentry);
430 return ret;
431}
432
433/*
434 * Create a temporary file and leave it unattached and un-xattr'd until the
435 * time comes to discard the object from memory.
436 */
437struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
438{
439 struct cachefiles_volume *volume = object->volume;
440 struct cachefiles_cache *cache = volume->cache;
441 const struct cred *saved_cred;
442 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
443 struct file *file;
444 const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
445 uint64_t ni_size;
446 long ret;
447
448
449 cachefiles_begin_secure(cache, &saved_cred);
450
451 ret = cachefiles_inject_write_error();
452 if (ret == 0) {
453 file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
454 S_IFREG | 0600,
455 O_RDWR | O_LARGEFILE | O_DIRECT,
456 cache->cache_cred);
457 ret = PTR_ERR_OR_ZERO(file);
458 }
459 if (ret) {
460 trace_cachefiles_vfs_error(object, d_inode(fan), ret,
461 cachefiles_trace_tmpfile_error);
462 if (ret == -EIO)
463 cachefiles_io_error_obj(object, "Failed to create tmpfile");
464 goto err;
465 }
466
467 trace_cachefiles_tmpfile(object, file_inode(file));
468
469 /* This is a newly created file with no other possible user */
470 if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
471 WARN_ON(1);
472
473 ret = cachefiles_ondemand_init_object(object);
474 if (ret < 0)
475 goto err_unuse;
476
477 ni_size = object->cookie->object_size;
478 ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
479
480 if (ni_size > 0) {
481 trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
482 cachefiles_trunc_expand_tmpfile);
483 ret = cachefiles_inject_write_error();
484 if (ret == 0)
485 ret = vfs_truncate(&file->f_path, ni_size);
486 if (ret < 0) {
487 trace_cachefiles_vfs_error(
488 object, file_inode(file), ret,
489 cachefiles_trace_trunc_error);
490 goto err_unuse;
491 }
492 }
493
494 ret = -EINVAL;
495 if (unlikely(!file->f_op->read_iter) ||
496 unlikely(!file->f_op->write_iter)) {
497 fput(file);
498 pr_notice("Cache does not support read_iter and write_iter\n");
499 goto err_unuse;
500 }
501out:
502 cachefiles_end_secure(cache, saved_cred);
503 return file;
504
505err_unuse:
506 cachefiles_do_unmark_inode_in_use(object, file_inode(file));
507 fput(file);
508err:
509 file = ERR_PTR(ret);
510 goto out;
511}
512
513/*
514 * Create a new file.
515 */
516static bool cachefiles_create_file(struct cachefiles_object *object)
517{
518 struct file *file;
519 int ret;
520
521 ret = cachefiles_has_space(object->volume->cache, 1, 0,
522 cachefiles_has_space_for_create);
523 if (ret < 0)
524 return false;
525
526 file = cachefiles_create_tmpfile(object);
527 if (IS_ERR(file))
528 return false;
529
530 set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
531 set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
532 _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
533 object->file = file;
534 return true;
535}
536
537/*
538 * Open an existing file, checking its attributes and replacing it if it is
539 * stale.
540 */
541static bool cachefiles_open_file(struct cachefiles_object *object,
542 struct dentry *dentry)
543{
544 struct cachefiles_cache *cache = object->volume->cache;
545 struct file *file;
546 struct path path;
547 int ret;
548
549 _enter("%pd", dentry);
550
551 if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
552 pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
553 dentry, d_inode(dentry)->i_ino);
554 return false;
555 }
556
557 /* We need to open a file interface onto a data file now as we can't do
558 * it on demand because writeback called from do_exit() sees
559 * current->fs == NULL - which breaks d_path() called from ext4 open.
560 */
561 path.mnt = cache->mnt;
562 path.dentry = dentry;
563 file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred);
564 if (IS_ERR(file)) {
565 trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
566 PTR_ERR(file),
567 cachefiles_trace_open_error);
568 goto error;
569 }
570
571 if (unlikely(!file->f_op->read_iter) ||
572 unlikely(!file->f_op->write_iter)) {
573 pr_notice("Cache does not support read_iter and write_iter\n");
574 goto error_fput;
575 }
576 _debug("file -> %pd positive", dentry);
577
578 ret = cachefiles_ondemand_init_object(object);
579 if (ret < 0)
580 goto error_fput;
581
582 ret = cachefiles_check_auxdata(object, file);
583 if (ret < 0)
584 goto check_failed;
585
586 clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
587
588 object->file = file;
589
590 /* Always update the atime on an object we've just looked up (this is
591 * used to keep track of culling, and atimes are only updated by read,
592 * write and readdir but not lookup or open).
593 */
594 touch_atime(&file->f_path);
595 return true;
596
597check_failed:
598 fscache_cookie_lookup_negative(object->cookie);
599 cachefiles_unmark_inode_in_use(object, file);
600 fput(file);
601 if (ret == -ESTALE)
602 return cachefiles_create_file(object);
603 return false;
604
605error_fput:
606 fput(file);
607error:
608 cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
609 return false;
610}
611
612/*
613 * walk from the parent object to the child object through the backing
614 * filesystem, creating directories as we go
615 */
616bool cachefiles_look_up_object(struct cachefiles_object *object)
617{
618 struct cachefiles_volume *volume = object->volume;
619 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
620 int ret;
621
622 _enter("OBJ%x,%s,", object->debug_id, object->d_name);
623
624 /* Look up path "cache/vol/fanout/file". */
625 ret = cachefiles_inject_read_error();
626 if (ret == 0)
627 dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
628 &QSTR(object->d_name), fan);
629 else
630 dentry = ERR_PTR(ret);
631 trace_cachefiles_lookup(object, fan, dentry);
632 if (IS_ERR(dentry)) {
633 if (dentry == ERR_PTR(-ENOENT))
634 goto new_file;
635 if (dentry == ERR_PTR(-EIO))
636 cachefiles_io_error_obj(object, "Lookup failed");
637 return false;
638 }
639
640 if (!d_is_reg(dentry)) {
641 pr_err("%pd is not a file\n", dentry);
642 struct dentry *de = start_removing_dentry(fan, dentry);
643 if (IS_ERR(de))
644 ret = PTR_ERR(de);
645 else
646 ret = cachefiles_bury_object(volume->cache, object,
647 fan, de,
648 FSCACHE_OBJECT_IS_WEIRD);
649 dput(dentry);
650 if (ret < 0)
651 return false;
652 goto new_file;
653 }
654
655 ret = cachefiles_open_file(object, dentry);
656 dput(dentry);
657 if (!ret)
658 return false;
659
660 _leave(" = t [%lu]", file_inode(object->file)->i_ino);
661 return true;
662
663new_file:
664 fscache_cookie_lookup_negative(object->cookie);
665 return cachefiles_create_file(object);
666}
667
668/*
669 * Attempt to link a temporary file into its rightful place in the cache.
670 */
671bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
672 struct cachefiles_object *object)
673{
674 struct cachefiles_volume *volume = object->volume;
675 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
676 bool success = false;
677 int ret;
678
679 _enter(",%pD", object->file);
680
681 ret = cachefiles_inject_read_error();
682 if (ret == 0)
683 dentry = start_creating(&nop_mnt_idmap, fan, &QSTR(object->d_name));
684 else
685 dentry = ERR_PTR(ret);
686 if (IS_ERR(dentry)) {
687 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
688 cachefiles_trace_lookup_error);
689 _debug("lookup fail %ld", PTR_ERR(dentry));
690 goto out;
691 }
692
693 /*
694 * This loop will only execute more than once if some other thread
695 * races to create the object we are trying to create.
696 */
697 while (!d_is_negative(dentry)) {
698 ret = cachefiles_unlink(volume->cache, object, fan, dentry,
699 FSCACHE_OBJECT_IS_STALE);
700 if (ret < 0)
701 goto out_end;
702
703 end_creating(dentry);
704
705 ret = cachefiles_inject_read_error();
706 if (ret == 0)
707 dentry = start_creating(&nop_mnt_idmap, fan,
708 &QSTR(object->d_name));
709 else
710 dentry = ERR_PTR(ret);
711 if (IS_ERR(dentry)) {
712 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
713 cachefiles_trace_lookup_error);
714 _debug("lookup fail %ld", PTR_ERR(dentry));
715 goto out;
716 }
717 }
718
719 ret = cachefiles_inject_read_error();
720 if (ret == 0)
721 ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
722 d_inode(fan), dentry, NULL);
723 if (ret < 0) {
724 trace_cachefiles_vfs_error(object, d_inode(fan), ret,
725 cachefiles_trace_link_error);
726 _debug("link fail %d", ret);
727 } else {
728 trace_cachefiles_link(object, file_inode(object->file));
729 spin_lock(&object->lock);
730 /* TODO: Do we want to switch the file pointer to the new dentry? */
731 clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
732 spin_unlock(&object->lock);
733 success = true;
734 }
735
736out_end:
737 end_creating(dentry);
738out:
739 _leave(" = %u", success);
740 return success;
741}
742
743/*
744 * Look up an inode to be checked or culled. Return -EBUSY if the inode is
745 * marked in use.
746 */
747static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
748 struct dentry *dir,
749 char *filename)
750{
751 struct dentry *victim;
752 int ret = -ENOENT;
753
754 victim = start_removing(&nop_mnt_idmap, dir, &QSTR(filename));
755
756 if (IS_ERR(victim))
757 goto lookup_error;
758 if (d_inode(victim)->i_flags & S_KERNEL_FILE)
759 goto lookup_busy;
760 return victim;
761
762lookup_busy:
763 ret = -EBUSY;
764 end_removing(victim);
765 return ERR_PTR(ret);
766
767lookup_error:
768 ret = PTR_ERR(victim);
769 if (ret == -ENOENT)
770 return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
771
772 if (ret == -EIO) {
773 cachefiles_io_error(cache, "Lookup failed");
774 } else if (ret != -ENOMEM) {
775 pr_err("Internal error: %d\n", ret);
776 ret = -EIO;
777 }
778
779 return ERR_PTR(ret);
780}
781
782/*
783 * Cull an object if it's not in use
784 * - called only by cache manager daemon
785 */
786int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
787 char *filename)
788{
789 struct dentry *victim;
790 struct inode *inode;
791 int ret;
792
793 _enter(",%pd/,%s", dir, filename);
794
795 victim = cachefiles_lookup_for_cull(cache, dir, filename);
796 if (IS_ERR(victim))
797 return PTR_ERR(victim);
798
799 /* check to see if someone is using this object */
800 inode = d_inode(victim);
801 inode_lock(inode);
802 if (inode->i_flags & S_KERNEL_FILE) {
803 ret = -EBUSY;
804 } else {
805 /* Stop the cache from picking it back up */
806 inode->i_flags |= S_KERNEL_FILE;
807 ret = 0;
808 }
809 inode_unlock(inode);
810 if (ret < 0)
811 goto error_unlock;
812
813 ret = cachefiles_bury_object(cache, NULL, dir, victim,
814 FSCACHE_OBJECT_WAS_CULLED);
815 dput(victim);
816 if (ret < 0)
817 goto error;
818
819 fscache_count_culled();
820 _leave(" = 0");
821 return 0;
822
823error_unlock:
824 end_removing(victim);
825error:
826 if (ret == -ENOENT)
827 return -ESTALE; /* Probably got retired by the netfs */
828
829 if (ret != -ENOMEM) {
830 pr_err("Internal error: %d\n", ret);
831 ret = -EIO;
832 }
833
834 _leave(" = %d", ret);
835 return ret;
836}
837
838/*
839 * Find out if an object is in use or not
840 * - called only by cache manager daemon
841 * - returns -EBUSY or 0 to indicate whether an object is in use or not
842 */
843int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
844 char *filename)
845{
846 struct dentry *victim;
847 int ret = 0;
848
849 victim = cachefiles_lookup_for_cull(cache, dir, filename);
850 if (IS_ERR(victim))
851 return PTR_ERR(victim);
852
853 inode_unlock(d_inode(dir));
854 dput(victim);
855 return ret;
856}