Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_inode_item.h"
17#include "xfs_quota.h"
18#include "xfs_trace.h"
19#include "xfs_icache.h"
20#include "xfs_bmap_util.h"
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
23#include "xfs_reflink.h"
24#include "xfs_ialloc.h"
25#include "xfs_ag.h"
26#include "xfs_log_priv.h"
27#include "xfs_health.h"
28
29#include <linux/iversion.h>
30
31/* Radix tree tags for incore inode tree. */
32
33/* inode is to be reclaimed */
34#define XFS_ICI_RECLAIM_TAG 0
35/* Inode has speculative preallocations (posteof or cow) to clean. */
36#define XFS_ICI_BLOCKGC_TAG 1
37
38/*
39 * The goal for walking incore inodes. These can correspond with incore inode
40 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
41 */
42enum xfs_icwalk_goal {
43 /* Goals directly associated with tagged inodes. */
44 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
45 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
46};
47
48static int xfs_icwalk(struct xfs_mount *mp,
49 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
50static int xfs_icwalk_ag(struct xfs_perag *pag,
51 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
52
53/*
54 * Private inode cache walk flags for struct xfs_icwalk. Must not
55 * coincide with XFS_ICWALK_FLAGS_VALID.
56 */
57
58/* Stop scanning after icw_scan_limit inodes. */
59#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
60
61#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
62#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
63
64#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
65 XFS_ICWALK_FLAG_RECLAIM_SICK | \
66 XFS_ICWALK_FLAG_UNION)
67
68/*
69 * Allocate and initialise an xfs_inode.
70 */
71struct xfs_inode *
72xfs_inode_alloc(
73 struct xfs_mount *mp,
74 xfs_ino_t ino)
75{
76 struct xfs_inode *ip;
77
78 /*
79 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
80 * and return NULL here on ENOMEM.
81 */
82 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
83
84 if (inode_init_always(mp->m_super, VFS_I(ip))) {
85 kmem_cache_free(xfs_inode_cache, ip);
86 return NULL;
87 }
88
89 /* VFS doesn't initialise i_mode! */
90 VFS_I(ip)->i_mode = 0;
91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
92
93 XFS_STATS_INC(mp, vn_active);
94 ASSERT(atomic_read(&ip->i_pincount) == 0);
95 ASSERT(ip->i_ino == 0);
96
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
101 ip->i_cowfp = NULL;
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
104 memset(&ip->i_df, 0, sizeof(ip->i_df));
105 ip->i_flags = 0;
106 ip->i_delayed_blks = 0;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
108 ip->i_nblocks = 0;
109 ip->i_forkoff = 0;
110 ip->i_sick = 0;
111 ip->i_checked = 0;
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = 0;
117
118 return ip;
119}
120
121STATIC void
122xfs_inode_free_callback(
123 struct rcu_head *head)
124{
125 struct inode *inode = container_of(head, struct inode, i_rcu);
126 struct xfs_inode *ip = XFS_I(inode);
127
128 switch (VFS_I(ip)->i_mode & S_IFMT) {
129 case S_IFREG:
130 case S_IFDIR:
131 case S_IFLNK:
132 xfs_idestroy_fork(&ip->i_df);
133 break;
134 }
135
136 xfs_ifork_zap_attr(ip);
137
138 if (ip->i_cowfp) {
139 xfs_idestroy_fork(ip->i_cowfp);
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
141 }
142 if (ip->i_itemp) {
143 ASSERT(!test_bit(XFS_LI_IN_AIL,
144 &ip->i_itemp->ili_item.li_flags));
145 xfs_inode_item_destroy(ip);
146 ip->i_itemp = NULL;
147 }
148
149 kmem_cache_free(xfs_inode_cache, ip);
150}
151
152static void
153__xfs_inode_free(
154 struct xfs_inode *ip)
155{
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
159 XFS_STATS_DEC(ip->i_mount, vn_active);
160
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
162}
163
164void
165xfs_inode_free(
166 struct xfs_inode *ip)
167{
168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
169
170 /*
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
174 * races.
175 */
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
178 ip->i_ino = 0;
179 spin_unlock(&ip->i_flags_lock);
180
181 __xfs_inode_free(ip);
182}
183
184/*
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
187 */
188static void
189xfs_reclaim_work_queue(
190 struct xfs_mount *mp)
191{
192
193 rcu_read_lock();
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
197 }
198 rcu_read_unlock();
199}
200
201/*
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
204 */
205static inline void
206xfs_blockgc_queue(
207 struct xfs_perag *pag)
208{
209 struct xfs_mount *mp = pag->pag_mount;
210
211 if (!xfs_is_blockgc_enabled(mp))
212 return;
213
214 rcu_read_lock();
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
216 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
217 &pag->pag_blockgc_work,
218 msecs_to_jiffies(xfs_blockgc_secs * 1000));
219 rcu_read_unlock();
220}
221
222/* Set a tag on both the AG incore inode tree and the AG radix tree. */
223static void
224xfs_perag_set_inode_tag(
225 struct xfs_perag *pag,
226 xfs_agino_t agino,
227 unsigned int tag)
228{
229 struct xfs_mount *mp = pag->pag_mount;
230 bool was_tagged;
231
232 lockdep_assert_held(&pag->pag_ici_lock);
233
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
236
237 if (tag == XFS_ICI_RECLAIM_TAG)
238 pag->pag_ici_reclaimable++;
239
240 if (was_tagged)
241 return;
242
243 /* propagate the tag up into the perag radix tree */
244 spin_lock(&mp->m_perag_lock);
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246 spin_unlock(&mp->m_perag_lock);
247
248 /* start background work */
249 switch (tag) {
250 case XFS_ICI_RECLAIM_TAG:
251 xfs_reclaim_work_queue(mp);
252 break;
253 case XFS_ICI_BLOCKGC_TAG:
254 xfs_blockgc_queue(pag);
255 break;
256 }
257
258 trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
259}
260
261/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
262static void
263xfs_perag_clear_inode_tag(
264 struct xfs_perag *pag,
265 xfs_agino_t agino,
266 unsigned int tag)
267{
268 struct xfs_mount *mp = pag->pag_mount;
269
270 lockdep_assert_held(&pag->pag_ici_lock);
271
272 /*
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
275 */
276 if (agino != NULLAGINO)
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
278 else
279 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
280
281 if (tag == XFS_ICI_RECLAIM_TAG)
282 pag->pag_ici_reclaimable--;
283
284 if (radix_tree_tagged(&pag->pag_ici_root, tag))
285 return;
286
287 /* clear the tag from the perag radix tree */
288 spin_lock(&mp->m_perag_lock);
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290 spin_unlock(&mp->m_perag_lock);
291
292 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
293}
294
295/*
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
299 * overwrite the values unconditionally. Hence we save the parameters we
300 * need to retain across reinitialisation, and rewrite them into the VFS inode
301 * after reinitialisation even if it fails.
302 */
303static int
304xfs_reinit_inode(
305 struct xfs_mount *mp,
306 struct inode *inode)
307{
308 int error;
309 uint32_t nlink = inode->i_nlink;
310 uint32_t generation = inode->i_generation;
311 uint64_t version = inode_peek_iversion(inode);
312 umode_t mode = inode->i_mode;
313 dev_t dev = inode->i_rdev;
314 kuid_t uid = inode->i_uid;
315 kgid_t gid = inode->i_gid;
316 unsigned long state = inode->i_state;
317
318 error = inode_init_always(mp->m_super, inode);
319
320 set_nlink(inode, nlink);
321 inode->i_generation = generation;
322 inode_set_iversion_queried(inode, version);
323 inode->i_mode = mode;
324 inode->i_rdev = dev;
325 inode->i_uid = uid;
326 inode->i_gid = gid;
327 inode->i_state = state;
328 mapping_set_large_folios(inode->i_mapping);
329 return error;
330}
331
332/*
333 * Carefully nudge an inode whose VFS state has been torn down back into a
334 * usable state. Drops the i_flags_lock and the rcu read lock.
335 */
336static int
337xfs_iget_recycle(
338 struct xfs_perag *pag,
339 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
340{
341 struct xfs_mount *mp = ip->i_mount;
342 struct inode *inode = VFS_I(ip);
343 int error;
344
345 trace_xfs_iget_recycle(ip);
346
347 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
348 return -EAGAIN;
349
350 /*
351 * We need to make it look like the inode is being reclaimed to prevent
352 * the actual reclaim workers from stomping over us while we recycle
353 * the inode. We can't clear the radix tree tag yet as it requires
354 * pag_ici_lock to be held exclusive.
355 */
356 ip->i_flags |= XFS_IRECLAIM;
357
358 spin_unlock(&ip->i_flags_lock);
359 rcu_read_unlock();
360
361 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
362 error = xfs_reinit_inode(mp, inode);
363 xfs_iunlock(ip, XFS_ILOCK_EXCL);
364 if (error) {
365 /*
366 * Re-initializing the inode failed, and we are in deep
367 * trouble. Try to re-add it to the reclaim list.
368 */
369 rcu_read_lock();
370 spin_lock(&ip->i_flags_lock);
371 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
372 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
373 spin_unlock(&ip->i_flags_lock);
374 rcu_read_unlock();
375
376 trace_xfs_iget_recycle_fail(ip);
377 return error;
378 }
379
380 spin_lock(&pag->pag_ici_lock);
381 spin_lock(&ip->i_flags_lock);
382
383 /*
384 * Clear the per-lifetime state in the inode as we are now effectively
385 * a new inode and need to return to the initial state before reuse
386 * occurs.
387 */
388 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
389 ip->i_flags |= XFS_INEW;
390 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
391 XFS_ICI_RECLAIM_TAG);
392 inode->i_state = I_NEW;
393 spin_unlock(&ip->i_flags_lock);
394 spin_unlock(&pag->pag_ici_lock);
395
396 return 0;
397}
398
399/*
400 * If we are allocating a new inode, then check what was returned is
401 * actually a free, empty inode. If we are not allocating an inode,
402 * then check we didn't find a free inode.
403 *
404 * Returns:
405 * 0 if the inode free state matches the lookup context
406 * -ENOENT if the inode is free and we are not allocating
407 * -EFSCORRUPTED if there is any state mismatch at all
408 */
409static int
410xfs_iget_check_free_state(
411 struct xfs_inode *ip,
412 int flags)
413{
414 if (flags & XFS_IGET_CREATE) {
415 /* should be a free inode */
416 if (VFS_I(ip)->i_mode != 0) {
417 xfs_warn(ip->i_mount,
418"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
419 ip->i_ino, VFS_I(ip)->i_mode);
420 xfs_agno_mark_sick(ip->i_mount,
421 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
422 XFS_SICK_AG_INOBT);
423 return -EFSCORRUPTED;
424 }
425
426 if (ip->i_nblocks != 0) {
427 xfs_warn(ip->i_mount,
428"Corruption detected! Free inode 0x%llx has blocks allocated!",
429 ip->i_ino);
430 xfs_agno_mark_sick(ip->i_mount,
431 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
432 XFS_SICK_AG_INOBT);
433 return -EFSCORRUPTED;
434 }
435 return 0;
436 }
437
438 /* should be an allocated inode */
439 if (VFS_I(ip)->i_mode == 0)
440 return -ENOENT;
441
442 return 0;
443}
444
445/* Make all pending inactivation work start immediately. */
446static bool
447xfs_inodegc_queue_all(
448 struct xfs_mount *mp)
449{
450 struct xfs_inodegc *gc;
451 int cpu;
452 bool ret = false;
453
454 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
455 gc = per_cpu_ptr(mp->m_inodegc, cpu);
456 if (!llist_empty(&gc->list)) {
457 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
458 ret = true;
459 }
460 }
461
462 return ret;
463}
464
465/* Wait for all queued work and collect errors */
466static int
467xfs_inodegc_wait_all(
468 struct xfs_mount *mp)
469{
470 int cpu;
471 int error = 0;
472
473 flush_workqueue(mp->m_inodegc_wq);
474 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
475 struct xfs_inodegc *gc;
476
477 gc = per_cpu_ptr(mp->m_inodegc, cpu);
478 if (gc->error && !error)
479 error = gc->error;
480 gc->error = 0;
481 }
482
483 return error;
484}
485
486/*
487 * Check the validity of the inode we just found it the cache
488 */
489static int
490xfs_iget_cache_hit(
491 struct xfs_perag *pag,
492 struct xfs_inode *ip,
493 xfs_ino_t ino,
494 int flags,
495 int lock_flags) __releases(RCU)
496{
497 struct inode *inode = VFS_I(ip);
498 struct xfs_mount *mp = ip->i_mount;
499 int error;
500
501 /*
502 * check for re-use of an inode within an RCU grace period due to the
503 * radix tree nodes not being updated yet. We monitor for this by
504 * setting the inode number to zero before freeing the inode structure.
505 * If the inode has been reallocated and set up, then the inode number
506 * will not match, so check for that, too.
507 */
508 spin_lock(&ip->i_flags_lock);
509 if (ip->i_ino != ino)
510 goto out_skip;
511
512 /*
513 * If we are racing with another cache hit that is currently
514 * instantiating this inode or currently recycling it out of
515 * reclaimable state, wait for the initialisation to complete
516 * before continuing.
517 *
518 * If we're racing with the inactivation worker we also want to wait.
519 * If we're creating a new file, it's possible that the worker
520 * previously marked the inode as free on disk but hasn't finished
521 * updating the incore state yet. The AGI buffer will be dirty and
522 * locked to the icreate transaction, so a synchronous push of the
523 * inodegc workers would result in deadlock. For a regular iget, the
524 * worker is running already, so we might as well wait.
525 *
526 * XXX(hch): eventually we should do something equivalent to
527 * wait_on_inode to wait for these flags to be cleared
528 * instead of polling for it.
529 */
530 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
531 goto out_skip;
532
533 if (ip->i_flags & XFS_NEED_INACTIVE) {
534 /* Unlinked inodes cannot be re-grabbed. */
535 if (VFS_I(ip)->i_nlink == 0) {
536 error = -ENOENT;
537 goto out_error;
538 }
539 goto out_inodegc_flush;
540 }
541
542 /*
543 * Check the inode free state is valid. This also detects lookup
544 * racing with unlinks.
545 */
546 error = xfs_iget_check_free_state(ip, flags);
547 if (error)
548 goto out_error;
549
550 /* Skip inodes that have no vfs state. */
551 if ((flags & XFS_IGET_INCORE) &&
552 (ip->i_flags & XFS_IRECLAIMABLE))
553 goto out_skip;
554
555 /* The inode fits the selection criteria; process it. */
556 if (ip->i_flags & XFS_IRECLAIMABLE) {
557 /* Drops i_flags_lock and RCU read lock. */
558 error = xfs_iget_recycle(pag, ip);
559 if (error == -EAGAIN)
560 goto out_skip;
561 if (error)
562 return error;
563 } else {
564 /* If the VFS inode is being torn down, pause and try again. */
565 if (!igrab(inode))
566 goto out_skip;
567
568 /* We've got a live one. */
569 spin_unlock(&ip->i_flags_lock);
570 rcu_read_unlock();
571 trace_xfs_iget_hit(ip);
572 }
573
574 if (lock_flags != 0)
575 xfs_ilock(ip, lock_flags);
576
577 if (!(flags & XFS_IGET_INCORE))
578 xfs_iflags_clear(ip, XFS_ISTALE);
579 XFS_STATS_INC(mp, xs_ig_found);
580
581 return 0;
582
583out_skip:
584 trace_xfs_iget_skip(ip);
585 XFS_STATS_INC(mp, xs_ig_frecycle);
586 error = -EAGAIN;
587out_error:
588 spin_unlock(&ip->i_flags_lock);
589 rcu_read_unlock();
590 return error;
591
592out_inodegc_flush:
593 spin_unlock(&ip->i_flags_lock);
594 rcu_read_unlock();
595 /*
596 * Do not wait for the workers, because the caller could hold an AGI
597 * buffer lock. We're just going to sleep in a loop anyway.
598 */
599 if (xfs_is_inodegc_enabled(mp))
600 xfs_inodegc_queue_all(mp);
601 return -EAGAIN;
602}
603
604static int
605xfs_iget_cache_miss(
606 struct xfs_mount *mp,
607 struct xfs_perag *pag,
608 xfs_trans_t *tp,
609 xfs_ino_t ino,
610 struct xfs_inode **ipp,
611 int flags,
612 int lock_flags)
613{
614 struct xfs_inode *ip;
615 int error;
616 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
617
618 ip = xfs_inode_alloc(mp, ino);
619 if (!ip)
620 return -ENOMEM;
621
622 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
623 if (error)
624 goto out_destroy;
625
626 /*
627 * For version 5 superblocks, if we are initialising a new inode and we
628 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
629 * simply build the new inode core with a random generation number.
630 *
631 * For version 4 (and older) superblocks, log recovery is dependent on
632 * the i_flushiter field being initialised from the current on-disk
633 * value and hence we must also read the inode off disk even when
634 * initializing new inodes.
635 */
636 if (xfs_has_v3inodes(mp) &&
637 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
638 VFS_I(ip)->i_generation = get_random_u32();
639 } else {
640 struct xfs_buf *bp;
641
642 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
643 if (error)
644 goto out_destroy;
645
646 error = xfs_inode_from_disk(ip,
647 xfs_buf_offset(bp, ip->i_imap.im_boffset));
648 if (!error)
649 xfs_buf_set_ref(bp, XFS_INO_REF);
650 else
651 xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
652 xfs_trans_brelse(tp, bp);
653
654 if (error)
655 goto out_destroy;
656 }
657
658 trace_xfs_iget_miss(ip);
659
660 /*
661 * Check the inode free state is valid. This also detects lookup
662 * racing with unlinks.
663 */
664 error = xfs_iget_check_free_state(ip, flags);
665 if (error)
666 goto out_destroy;
667
668 /*
669 * Preload the radix tree so we can insert safely under the
670 * write spinlock. Note that we cannot sleep inside the preload
671 * region.
672 */
673 if (radix_tree_preload(GFP_KERNEL | __GFP_NOLOCKDEP)) {
674 error = -EAGAIN;
675 goto out_destroy;
676 }
677
678 /*
679 * Because the inode hasn't been added to the radix-tree yet it can't
680 * be found by another thread, so we can do the non-sleeping lock here.
681 */
682 if (lock_flags) {
683 if (!xfs_ilock_nowait(ip, lock_flags))
684 BUG();
685 }
686
687 /*
688 * These values must be set before inserting the inode into the radix
689 * tree as the moment it is inserted a concurrent lookup (allowed by the
690 * RCU locking mechanism) can find it and that lookup must see that this
691 * is an inode currently under construction (i.e. that XFS_INEW is set).
692 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
693 * memory barrier that ensures this detection works correctly at lookup
694 * time.
695 */
696 if (flags & XFS_IGET_DONTCACHE)
697 d_mark_dontcache(VFS_I(ip));
698 ip->i_udquot = NULL;
699 ip->i_gdquot = NULL;
700 ip->i_pdquot = NULL;
701 xfs_iflags_set(ip, XFS_INEW);
702
703 /* insert the new inode */
704 spin_lock(&pag->pag_ici_lock);
705 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
706 if (unlikely(error)) {
707 WARN_ON(error != -EEXIST);
708 XFS_STATS_INC(mp, xs_ig_dup);
709 error = -EAGAIN;
710 goto out_preload_end;
711 }
712 spin_unlock(&pag->pag_ici_lock);
713 radix_tree_preload_end();
714
715 *ipp = ip;
716 return 0;
717
718out_preload_end:
719 spin_unlock(&pag->pag_ici_lock);
720 radix_tree_preload_end();
721 if (lock_flags)
722 xfs_iunlock(ip, lock_flags);
723out_destroy:
724 __destroy_inode(VFS_I(ip));
725 xfs_inode_free(ip);
726 return error;
727}
728
729/*
730 * Look up an inode by number in the given file system. The inode is looked up
731 * in the cache held in each AG. If the inode is found in the cache, initialise
732 * the vfs inode if necessary.
733 *
734 * If it is not in core, read it in from the file system's device, add it to the
735 * cache and initialise the vfs inode.
736 *
737 * The inode is locked according to the value of the lock_flags parameter.
738 * Inode lookup is only done during metadata operations and not as part of the
739 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
740 */
741int
742xfs_iget(
743 struct xfs_mount *mp,
744 struct xfs_trans *tp,
745 xfs_ino_t ino,
746 uint flags,
747 uint lock_flags,
748 struct xfs_inode **ipp)
749{
750 struct xfs_inode *ip;
751 struct xfs_perag *pag;
752 xfs_agino_t agino;
753 int error;
754
755 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
756
757 /* reject inode numbers outside existing AGs */
758 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
759 return -EINVAL;
760
761 XFS_STATS_INC(mp, xs_ig_attempts);
762
763 /* get the perag structure and ensure that it's inode capable */
764 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
765 agino = XFS_INO_TO_AGINO(mp, ino);
766
767again:
768 error = 0;
769 rcu_read_lock();
770 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
771
772 if (ip) {
773 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
774 if (error)
775 goto out_error_or_again;
776 } else {
777 rcu_read_unlock();
778 if (flags & XFS_IGET_INCORE) {
779 error = -ENODATA;
780 goto out_error_or_again;
781 }
782 XFS_STATS_INC(mp, xs_ig_missed);
783
784 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
785 flags, lock_flags);
786 if (error)
787 goto out_error_or_again;
788 }
789 xfs_perag_put(pag);
790
791 *ipp = ip;
792
793 /*
794 * If we have a real type for an on-disk inode, we can setup the inode
795 * now. If it's a new inode being created, xfs_init_new_inode will
796 * handle it.
797 */
798 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
799 xfs_setup_existing_inode(ip);
800 return 0;
801
802out_error_or_again:
803 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
804 error == -EAGAIN) {
805 delay(1);
806 goto again;
807 }
808 xfs_perag_put(pag);
809 return error;
810}
811
812/*
813 * Grab the inode for reclaim exclusively.
814 *
815 * We have found this inode via a lookup under RCU, so the inode may have
816 * already been freed, or it may be in the process of being recycled by
817 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
818 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
819 * will not be set. Hence we need to check for both these flag conditions to
820 * avoid inodes that are no longer reclaim candidates.
821 *
822 * Note: checking for other state flags here, under the i_flags_lock or not, is
823 * racy and should be avoided. Those races should be resolved only after we have
824 * ensured that we are able to reclaim this inode and the world can see that we
825 * are going to reclaim it.
826 *
827 * Return true if we grabbed it, false otherwise.
828 */
829static bool
830xfs_reclaim_igrab(
831 struct xfs_inode *ip,
832 struct xfs_icwalk *icw)
833{
834 ASSERT(rcu_read_lock_held());
835
836 spin_lock(&ip->i_flags_lock);
837 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
838 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
839 /* not a reclaim candidate. */
840 spin_unlock(&ip->i_flags_lock);
841 return false;
842 }
843
844 /* Don't reclaim a sick inode unless the caller asked for it. */
845 if (ip->i_sick &&
846 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
847 spin_unlock(&ip->i_flags_lock);
848 return false;
849 }
850
851 __xfs_iflags_set(ip, XFS_IRECLAIM);
852 spin_unlock(&ip->i_flags_lock);
853 return true;
854}
855
856/*
857 * Inode reclaim is non-blocking, so the default action if progress cannot be
858 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
859 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
860 * blocking anymore and hence we can wait for the inode to be able to reclaim
861 * it.
862 *
863 * We do no IO here - if callers require inodes to be cleaned they must push the
864 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
865 * done in the background in a non-blocking manner, and enables memory reclaim
866 * to make progress without blocking.
867 */
868static void
869xfs_reclaim_inode(
870 struct xfs_inode *ip,
871 struct xfs_perag *pag)
872{
873 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
874
875 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
876 goto out;
877 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
878 goto out_iunlock;
879
880 /*
881 * Check for log shutdown because aborting the inode can move the log
882 * tail and corrupt in memory state. This is fine if the log is shut
883 * down, but if the log is still active and only the mount is shut down
884 * then the in-memory log tail movement caused by the abort can be
885 * incorrectly propagated to disk.
886 */
887 if (xlog_is_shutdown(ip->i_mount->m_log)) {
888 xfs_iunpin_wait(ip);
889 xfs_iflush_shutdown_abort(ip);
890 goto reclaim;
891 }
892 if (xfs_ipincount(ip))
893 goto out_clear_flush;
894 if (!xfs_inode_clean(ip))
895 goto out_clear_flush;
896
897 xfs_iflags_clear(ip, XFS_IFLUSHING);
898reclaim:
899 trace_xfs_inode_reclaiming(ip);
900
901 /*
902 * Because we use RCU freeing we need to ensure the inode always appears
903 * to be reclaimed with an invalid inode number when in the free state.
904 * We do this as early as possible under the ILOCK so that
905 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
906 * detect races with us here. By doing this, we guarantee that once
907 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
908 * it will see either a valid inode that will serialise correctly, or it
909 * will see an invalid inode that it can skip.
910 */
911 spin_lock(&ip->i_flags_lock);
912 ip->i_flags = XFS_IRECLAIM;
913 ip->i_ino = 0;
914 ip->i_sick = 0;
915 ip->i_checked = 0;
916 spin_unlock(&ip->i_flags_lock);
917
918 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
919 xfs_iunlock(ip, XFS_ILOCK_EXCL);
920
921 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
922 /*
923 * Remove the inode from the per-AG radix tree.
924 *
925 * Because radix_tree_delete won't complain even if the item was never
926 * added to the tree assert that it's been there before to catch
927 * problems with the inode life time early on.
928 */
929 spin_lock(&pag->pag_ici_lock);
930 if (!radix_tree_delete(&pag->pag_ici_root,
931 XFS_INO_TO_AGINO(ip->i_mount, ino)))
932 ASSERT(0);
933 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
934 spin_unlock(&pag->pag_ici_lock);
935
936 /*
937 * Here we do an (almost) spurious inode lock in order to coordinate
938 * with inode cache radix tree lookups. This is because the lookup
939 * can reference the inodes in the cache without taking references.
940 *
941 * We make that OK here by ensuring that we wait until the inode is
942 * unlocked after the lookup before we go ahead and free it.
943 */
944 xfs_ilock(ip, XFS_ILOCK_EXCL);
945 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
946 xfs_iunlock(ip, XFS_ILOCK_EXCL);
947 ASSERT(xfs_inode_clean(ip));
948
949 __xfs_inode_free(ip);
950 return;
951
952out_clear_flush:
953 xfs_iflags_clear(ip, XFS_IFLUSHING);
954out_iunlock:
955 xfs_iunlock(ip, XFS_ILOCK_EXCL);
956out:
957 xfs_iflags_clear(ip, XFS_IRECLAIM);
958}
959
960/* Reclaim sick inodes if we're unmounting or the fs went down. */
961static inline bool
962xfs_want_reclaim_sick(
963 struct xfs_mount *mp)
964{
965 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
966 xfs_is_shutdown(mp);
967}
968
969void
970xfs_reclaim_inodes(
971 struct xfs_mount *mp)
972{
973 struct xfs_icwalk icw = {
974 .icw_flags = 0,
975 };
976
977 if (xfs_want_reclaim_sick(mp))
978 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
979
980 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
981 xfs_ail_push_all_sync(mp->m_ail);
982 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
983 }
984}
985
986/*
987 * The shrinker infrastructure determines how many inodes we should scan for
988 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
989 * push the AIL here. We also want to proactively free up memory if we can to
990 * minimise the amount of work memory reclaim has to do so we kick the
991 * background reclaim if it isn't already scheduled.
992 */
993long
994xfs_reclaim_inodes_nr(
995 struct xfs_mount *mp,
996 unsigned long nr_to_scan)
997{
998 struct xfs_icwalk icw = {
999 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
1000 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
1001 };
1002
1003 if (xfs_want_reclaim_sick(mp))
1004 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1005
1006 /* kick background reclaimer and push the AIL */
1007 xfs_reclaim_work_queue(mp);
1008 xfs_ail_push_all(mp->m_ail);
1009
1010 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1011 return 0;
1012}
1013
1014/*
1015 * Return the number of reclaimable inodes in the filesystem for
1016 * the shrinker to determine how much to reclaim.
1017 */
1018long
1019xfs_reclaim_inodes_count(
1020 struct xfs_mount *mp)
1021{
1022 struct xfs_perag *pag;
1023 xfs_agnumber_t ag = 0;
1024 long reclaimable = 0;
1025
1026 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1027 ag = pag->pag_agno + 1;
1028 reclaimable += pag->pag_ici_reclaimable;
1029 xfs_perag_put(pag);
1030 }
1031 return reclaimable;
1032}
1033
1034STATIC bool
1035xfs_icwalk_match_id(
1036 struct xfs_inode *ip,
1037 struct xfs_icwalk *icw)
1038{
1039 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1040 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1041 return false;
1042
1043 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1044 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1045 return false;
1046
1047 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1048 ip->i_projid != icw->icw_prid)
1049 return false;
1050
1051 return true;
1052}
1053
1054/*
1055 * A union-based inode filtering algorithm. Process the inode if any of the
1056 * criteria match. This is for global/internal scans only.
1057 */
1058STATIC bool
1059xfs_icwalk_match_id_union(
1060 struct xfs_inode *ip,
1061 struct xfs_icwalk *icw)
1062{
1063 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1064 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1065 return true;
1066
1067 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1068 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1069 return true;
1070
1071 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1072 ip->i_projid == icw->icw_prid)
1073 return true;
1074
1075 return false;
1076}
1077
1078/*
1079 * Is this inode @ip eligible for eof/cow block reclamation, given some
1080 * filtering parameters @icw? The inode is eligible if @icw is null or
1081 * if the predicate functions match.
1082 */
1083static bool
1084xfs_icwalk_match(
1085 struct xfs_inode *ip,
1086 struct xfs_icwalk *icw)
1087{
1088 bool match;
1089
1090 if (!icw)
1091 return true;
1092
1093 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1094 match = xfs_icwalk_match_id_union(ip, icw);
1095 else
1096 match = xfs_icwalk_match_id(ip, icw);
1097 if (!match)
1098 return false;
1099
1100 /* skip the inode if the file size is too small */
1101 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1102 XFS_ISIZE(ip) < icw->icw_min_file_size)
1103 return false;
1104
1105 return true;
1106}
1107
1108/*
1109 * This is a fast pass over the inode cache to try to get reclaim moving on as
1110 * many inodes as possible in a short period of time. It kicks itself every few
1111 * seconds, as well as being kicked by the inode cache shrinker when memory
1112 * goes low.
1113 */
1114void
1115xfs_reclaim_worker(
1116 struct work_struct *work)
1117{
1118 struct xfs_mount *mp = container_of(to_delayed_work(work),
1119 struct xfs_mount, m_reclaim_work);
1120
1121 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1122 xfs_reclaim_work_queue(mp);
1123}
1124
1125STATIC int
1126xfs_inode_free_eofblocks(
1127 struct xfs_inode *ip,
1128 struct xfs_icwalk *icw,
1129 unsigned int *lockflags)
1130{
1131 bool wait;
1132
1133 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1134
1135 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1136 return 0;
1137
1138 /*
1139 * If the mapping is dirty the operation can block and wait for some
1140 * time. Unless we are waiting, skip it.
1141 */
1142 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1143 return 0;
1144
1145 if (!xfs_icwalk_match(ip, icw))
1146 return 0;
1147
1148 /*
1149 * If the caller is waiting, return -EAGAIN to keep the background
1150 * scanner moving and revisit the inode in a subsequent pass.
1151 */
1152 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1153 if (wait)
1154 return -EAGAIN;
1155 return 0;
1156 }
1157 *lockflags |= XFS_IOLOCK_EXCL;
1158
1159 if (xfs_can_free_eofblocks(ip))
1160 return xfs_free_eofblocks(ip);
1161
1162 /* inode could be preallocated or append-only */
1163 trace_xfs_inode_free_eofblocks_invalid(ip);
1164 xfs_inode_clear_eofblocks_tag(ip);
1165 return 0;
1166}
1167
1168static void
1169xfs_blockgc_set_iflag(
1170 struct xfs_inode *ip,
1171 unsigned long iflag)
1172{
1173 struct xfs_mount *mp = ip->i_mount;
1174 struct xfs_perag *pag;
1175
1176 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1177
1178 /*
1179 * Don't bother locking the AG and looking up in the radix trees
1180 * if we already know that we have the tag set.
1181 */
1182 if (ip->i_flags & iflag)
1183 return;
1184 spin_lock(&ip->i_flags_lock);
1185 ip->i_flags |= iflag;
1186 spin_unlock(&ip->i_flags_lock);
1187
1188 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1189 spin_lock(&pag->pag_ici_lock);
1190
1191 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1192 XFS_ICI_BLOCKGC_TAG);
1193
1194 spin_unlock(&pag->pag_ici_lock);
1195 xfs_perag_put(pag);
1196}
1197
1198void
1199xfs_inode_set_eofblocks_tag(
1200 xfs_inode_t *ip)
1201{
1202 trace_xfs_inode_set_eofblocks_tag(ip);
1203 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1204}
1205
1206static void
1207xfs_blockgc_clear_iflag(
1208 struct xfs_inode *ip,
1209 unsigned long iflag)
1210{
1211 struct xfs_mount *mp = ip->i_mount;
1212 struct xfs_perag *pag;
1213 bool clear_tag;
1214
1215 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1216
1217 spin_lock(&ip->i_flags_lock);
1218 ip->i_flags &= ~iflag;
1219 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1220 spin_unlock(&ip->i_flags_lock);
1221
1222 if (!clear_tag)
1223 return;
1224
1225 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1226 spin_lock(&pag->pag_ici_lock);
1227
1228 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1229 XFS_ICI_BLOCKGC_TAG);
1230
1231 spin_unlock(&pag->pag_ici_lock);
1232 xfs_perag_put(pag);
1233}
1234
1235void
1236xfs_inode_clear_eofblocks_tag(
1237 xfs_inode_t *ip)
1238{
1239 trace_xfs_inode_clear_eofblocks_tag(ip);
1240 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1241}
1242
1243/*
1244 * Set ourselves up to free CoW blocks from this file. If it's already clean
1245 * then we can bail out quickly, but otherwise we must back off if the file
1246 * is undergoing some kind of write.
1247 */
1248static bool
1249xfs_prep_free_cowblocks(
1250 struct xfs_inode *ip)
1251{
1252 /*
1253 * Just clear the tag if we have an empty cow fork or none at all. It's
1254 * possible the inode was fully unshared since it was originally tagged.
1255 */
1256 if (!xfs_inode_has_cow_data(ip)) {
1257 trace_xfs_inode_free_cowblocks_invalid(ip);
1258 xfs_inode_clear_cowblocks_tag(ip);
1259 return false;
1260 }
1261
1262 /*
1263 * If the mapping is dirty or under writeback we cannot touch the
1264 * CoW fork. Leave it alone if we're in the midst of a directio.
1265 */
1266 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1267 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1268 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1269 atomic_read(&VFS_I(ip)->i_dio_count))
1270 return false;
1271
1272 return true;
1273}
1274
1275/*
1276 * Automatic CoW Reservation Freeing
1277 *
1278 * These functions automatically garbage collect leftover CoW reservations
1279 * that were made on behalf of a cowextsize hint when we start to run out
1280 * of quota or when the reservations sit around for too long. If the file
1281 * has dirty pages or is undergoing writeback, its CoW reservations will
1282 * be retained.
1283 *
1284 * The actual garbage collection piggybacks off the same code that runs
1285 * the speculative EOF preallocation garbage collector.
1286 */
1287STATIC int
1288xfs_inode_free_cowblocks(
1289 struct xfs_inode *ip,
1290 struct xfs_icwalk *icw,
1291 unsigned int *lockflags)
1292{
1293 bool wait;
1294 int ret = 0;
1295
1296 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1297
1298 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1299 return 0;
1300
1301 if (!xfs_prep_free_cowblocks(ip))
1302 return 0;
1303
1304 if (!xfs_icwalk_match(ip, icw))
1305 return 0;
1306
1307 /*
1308 * If the caller is waiting, return -EAGAIN to keep the background
1309 * scanner moving and revisit the inode in a subsequent pass.
1310 */
1311 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1312 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1313 if (wait)
1314 return -EAGAIN;
1315 return 0;
1316 }
1317 *lockflags |= XFS_IOLOCK_EXCL;
1318
1319 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1320 if (wait)
1321 return -EAGAIN;
1322 return 0;
1323 }
1324 *lockflags |= XFS_MMAPLOCK_EXCL;
1325
1326 /*
1327 * Check again, nobody else should be able to dirty blocks or change
1328 * the reflink iflag now that we have the first two locks held.
1329 */
1330 if (xfs_prep_free_cowblocks(ip))
1331 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1332 return ret;
1333}
1334
1335void
1336xfs_inode_set_cowblocks_tag(
1337 xfs_inode_t *ip)
1338{
1339 trace_xfs_inode_set_cowblocks_tag(ip);
1340 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1341}
1342
1343void
1344xfs_inode_clear_cowblocks_tag(
1345 xfs_inode_t *ip)
1346{
1347 trace_xfs_inode_clear_cowblocks_tag(ip);
1348 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1349}
1350
1351/* Disable post-EOF and CoW block auto-reclamation. */
1352void
1353xfs_blockgc_stop(
1354 struct xfs_mount *mp)
1355{
1356 struct xfs_perag *pag;
1357 xfs_agnumber_t agno;
1358
1359 if (!xfs_clear_blockgc_enabled(mp))
1360 return;
1361
1362 for_each_perag(mp, agno, pag)
1363 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1364 trace_xfs_blockgc_stop(mp, __return_address);
1365}
1366
1367/* Enable post-EOF and CoW block auto-reclamation. */
1368void
1369xfs_blockgc_start(
1370 struct xfs_mount *mp)
1371{
1372 struct xfs_perag *pag;
1373 xfs_agnumber_t agno;
1374
1375 if (xfs_set_blockgc_enabled(mp))
1376 return;
1377
1378 trace_xfs_blockgc_start(mp, __return_address);
1379 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1380 xfs_blockgc_queue(pag);
1381}
1382
1383/* Don't try to run block gc on an inode that's in any of these states. */
1384#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1385 XFS_NEED_INACTIVE | \
1386 XFS_INACTIVATING | \
1387 XFS_IRECLAIMABLE | \
1388 XFS_IRECLAIM)
1389/*
1390 * Decide if the given @ip is eligible for garbage collection of speculative
1391 * preallocations, and grab it if so. Returns true if it's ready to go or
1392 * false if we should just ignore it.
1393 */
1394static bool
1395xfs_blockgc_igrab(
1396 struct xfs_inode *ip)
1397{
1398 struct inode *inode = VFS_I(ip);
1399
1400 ASSERT(rcu_read_lock_held());
1401
1402 /* Check for stale RCU freed inode */
1403 spin_lock(&ip->i_flags_lock);
1404 if (!ip->i_ino)
1405 goto out_unlock_noent;
1406
1407 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1408 goto out_unlock_noent;
1409 spin_unlock(&ip->i_flags_lock);
1410
1411 /* nothing to sync during shutdown */
1412 if (xfs_is_shutdown(ip->i_mount))
1413 return false;
1414
1415 /* If we can't grab the inode, it must on it's way to reclaim. */
1416 if (!igrab(inode))
1417 return false;
1418
1419 /* inode is valid */
1420 return true;
1421
1422out_unlock_noent:
1423 spin_unlock(&ip->i_flags_lock);
1424 return false;
1425}
1426
1427/* Scan one incore inode for block preallocations that we can remove. */
1428static int
1429xfs_blockgc_scan_inode(
1430 struct xfs_inode *ip,
1431 struct xfs_icwalk *icw)
1432{
1433 unsigned int lockflags = 0;
1434 int error;
1435
1436 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1437 if (error)
1438 goto unlock;
1439
1440 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1441unlock:
1442 if (lockflags)
1443 xfs_iunlock(ip, lockflags);
1444 xfs_irele(ip);
1445 return error;
1446}
1447
1448/* Background worker that trims preallocated space. */
1449void
1450xfs_blockgc_worker(
1451 struct work_struct *work)
1452{
1453 struct xfs_perag *pag = container_of(to_delayed_work(work),
1454 struct xfs_perag, pag_blockgc_work);
1455 struct xfs_mount *mp = pag->pag_mount;
1456 int error;
1457
1458 trace_xfs_blockgc_worker(mp, __return_address);
1459
1460 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1461 if (error)
1462 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1463 pag->pag_agno, error);
1464 xfs_blockgc_queue(pag);
1465}
1466
1467/*
1468 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1469 * and cowblocks.
1470 */
1471int
1472xfs_blockgc_free_space(
1473 struct xfs_mount *mp,
1474 struct xfs_icwalk *icw)
1475{
1476 int error;
1477
1478 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1479
1480 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1481 if (error)
1482 return error;
1483
1484 return xfs_inodegc_flush(mp);
1485}
1486
1487/*
1488 * Reclaim all the free space that we can by scheduling the background blockgc
1489 * and inodegc workers immediately and waiting for them all to clear.
1490 */
1491int
1492xfs_blockgc_flush_all(
1493 struct xfs_mount *mp)
1494{
1495 struct xfs_perag *pag;
1496 xfs_agnumber_t agno;
1497
1498 trace_xfs_blockgc_flush_all(mp, __return_address);
1499
1500 /*
1501 * For each blockgc worker, move its queue time up to now. If it
1502 * wasn't queued, it will not be requeued. Then flush whatever's
1503 * left.
1504 */
1505 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1506 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1507 &pag->pag_blockgc_work, 0);
1508
1509 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1510 flush_delayed_work(&pag->pag_blockgc_work);
1511
1512 return xfs_inodegc_flush(mp);
1513}
1514
1515/*
1516 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1517 * quota caused an allocation failure, so we make a best effort by including
1518 * each quota under low free space conditions (less than 1% free space) in the
1519 * scan.
1520 *
1521 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1522 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1523 * MMAPLOCK.
1524 */
1525int
1526xfs_blockgc_free_dquots(
1527 struct xfs_mount *mp,
1528 struct xfs_dquot *udqp,
1529 struct xfs_dquot *gdqp,
1530 struct xfs_dquot *pdqp,
1531 unsigned int iwalk_flags)
1532{
1533 struct xfs_icwalk icw = {0};
1534 bool do_work = false;
1535
1536 if (!udqp && !gdqp && !pdqp)
1537 return 0;
1538
1539 /*
1540 * Run a scan to free blocks using the union filter to cover all
1541 * applicable quotas in a single scan.
1542 */
1543 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1544
1545 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1546 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1547 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1548 do_work = true;
1549 }
1550
1551 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1552 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1553 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1554 do_work = true;
1555 }
1556
1557 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1558 icw.icw_prid = pdqp->q_id;
1559 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1560 do_work = true;
1561 }
1562
1563 if (!do_work)
1564 return 0;
1565
1566 return xfs_blockgc_free_space(mp, &icw);
1567}
1568
1569/* Run cow/eofblocks scans on the quotas attached to the inode. */
1570int
1571xfs_blockgc_free_quota(
1572 struct xfs_inode *ip,
1573 unsigned int iwalk_flags)
1574{
1575 return xfs_blockgc_free_dquots(ip->i_mount,
1576 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1577 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1578 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1579}
1580
1581/* XFS Inode Cache Walking Code */
1582
1583/*
1584 * The inode lookup is done in batches to keep the amount of lock traffic and
1585 * radix tree lookups to a minimum. The batch size is a trade off between
1586 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1587 * be too greedy.
1588 */
1589#define XFS_LOOKUP_BATCH 32
1590
1591
1592/*
1593 * Decide if we want to grab this inode in anticipation of doing work towards
1594 * the goal.
1595 */
1596static inline bool
1597xfs_icwalk_igrab(
1598 enum xfs_icwalk_goal goal,
1599 struct xfs_inode *ip,
1600 struct xfs_icwalk *icw)
1601{
1602 switch (goal) {
1603 case XFS_ICWALK_BLOCKGC:
1604 return xfs_blockgc_igrab(ip);
1605 case XFS_ICWALK_RECLAIM:
1606 return xfs_reclaim_igrab(ip, icw);
1607 default:
1608 return false;
1609 }
1610}
1611
1612/*
1613 * Process an inode. Each processing function must handle any state changes
1614 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1615 */
1616static inline int
1617xfs_icwalk_process_inode(
1618 enum xfs_icwalk_goal goal,
1619 struct xfs_inode *ip,
1620 struct xfs_perag *pag,
1621 struct xfs_icwalk *icw)
1622{
1623 int error = 0;
1624
1625 switch (goal) {
1626 case XFS_ICWALK_BLOCKGC:
1627 error = xfs_blockgc_scan_inode(ip, icw);
1628 break;
1629 case XFS_ICWALK_RECLAIM:
1630 xfs_reclaim_inode(ip, pag);
1631 break;
1632 }
1633 return error;
1634}
1635
1636/*
1637 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1638 * process them in some manner.
1639 */
1640static int
1641xfs_icwalk_ag(
1642 struct xfs_perag *pag,
1643 enum xfs_icwalk_goal goal,
1644 struct xfs_icwalk *icw)
1645{
1646 struct xfs_mount *mp = pag->pag_mount;
1647 uint32_t first_index;
1648 int last_error = 0;
1649 int skipped;
1650 bool done;
1651 int nr_found;
1652
1653restart:
1654 done = false;
1655 skipped = 0;
1656 if (goal == XFS_ICWALK_RECLAIM)
1657 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1658 else
1659 first_index = 0;
1660 nr_found = 0;
1661 do {
1662 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1663 int error = 0;
1664 int i;
1665
1666 rcu_read_lock();
1667
1668 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1669 (void **) batch, first_index,
1670 XFS_LOOKUP_BATCH, goal);
1671 if (!nr_found) {
1672 done = true;
1673 rcu_read_unlock();
1674 break;
1675 }
1676
1677 /*
1678 * Grab the inodes before we drop the lock. if we found
1679 * nothing, nr == 0 and the loop will be skipped.
1680 */
1681 for (i = 0; i < nr_found; i++) {
1682 struct xfs_inode *ip = batch[i];
1683
1684 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1685 batch[i] = NULL;
1686
1687 /*
1688 * Update the index for the next lookup. Catch
1689 * overflows into the next AG range which can occur if
1690 * we have inodes in the last block of the AG and we
1691 * are currently pointing to the last inode.
1692 *
1693 * Because we may see inodes that are from the wrong AG
1694 * due to RCU freeing and reallocation, only update the
1695 * index if it lies in this AG. It was a race that lead
1696 * us to see this inode, so another lookup from the
1697 * same index will not find it again.
1698 */
1699 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1700 continue;
1701 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1702 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1703 done = true;
1704 }
1705
1706 /* unlock now we've grabbed the inodes. */
1707 rcu_read_unlock();
1708
1709 for (i = 0; i < nr_found; i++) {
1710 if (!batch[i])
1711 continue;
1712 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1713 icw);
1714 if (error == -EAGAIN) {
1715 skipped++;
1716 continue;
1717 }
1718 if (error && last_error != -EFSCORRUPTED)
1719 last_error = error;
1720 }
1721
1722 /* bail out if the filesystem is corrupted. */
1723 if (error == -EFSCORRUPTED)
1724 break;
1725
1726 cond_resched();
1727
1728 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1729 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1730 if (icw->icw_scan_limit <= 0)
1731 break;
1732 }
1733 } while (nr_found && !done);
1734
1735 if (goal == XFS_ICWALK_RECLAIM) {
1736 if (done)
1737 first_index = 0;
1738 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1739 }
1740
1741 if (skipped) {
1742 delay(1);
1743 goto restart;
1744 }
1745 return last_error;
1746}
1747
1748/* Walk all incore inodes to achieve a given goal. */
1749static int
1750xfs_icwalk(
1751 struct xfs_mount *mp,
1752 enum xfs_icwalk_goal goal,
1753 struct xfs_icwalk *icw)
1754{
1755 struct xfs_perag *pag;
1756 int error = 0;
1757 int last_error = 0;
1758 xfs_agnumber_t agno;
1759
1760 for_each_perag_tag(mp, agno, pag, goal) {
1761 error = xfs_icwalk_ag(pag, goal, icw);
1762 if (error) {
1763 last_error = error;
1764 if (error == -EFSCORRUPTED) {
1765 xfs_perag_rele(pag);
1766 break;
1767 }
1768 }
1769 }
1770 return last_error;
1771 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1772}
1773
1774#ifdef DEBUG
1775static void
1776xfs_check_delalloc(
1777 struct xfs_inode *ip,
1778 int whichfork)
1779{
1780 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1781 struct xfs_bmbt_irec got;
1782 struct xfs_iext_cursor icur;
1783
1784 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1785 return;
1786 do {
1787 if (isnullstartblock(got.br_startblock)) {
1788 xfs_warn(ip->i_mount,
1789 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1790 ip->i_ino,
1791 whichfork == XFS_DATA_FORK ? "data" : "cow",
1792 got.br_startoff, got.br_blockcount);
1793 }
1794 } while (xfs_iext_next_extent(ifp, &icur, &got));
1795}
1796#else
1797#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1798#endif
1799
1800/* Schedule the inode for reclaim. */
1801static void
1802xfs_inodegc_set_reclaimable(
1803 struct xfs_inode *ip)
1804{
1805 struct xfs_mount *mp = ip->i_mount;
1806 struct xfs_perag *pag;
1807
1808 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1809 xfs_check_delalloc(ip, XFS_DATA_FORK);
1810 xfs_check_delalloc(ip, XFS_COW_FORK);
1811 ASSERT(0);
1812 }
1813
1814 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1815 spin_lock(&pag->pag_ici_lock);
1816 spin_lock(&ip->i_flags_lock);
1817
1818 trace_xfs_inode_set_reclaimable(ip);
1819 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1820 ip->i_flags |= XFS_IRECLAIMABLE;
1821 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1822 XFS_ICI_RECLAIM_TAG);
1823
1824 spin_unlock(&ip->i_flags_lock);
1825 spin_unlock(&pag->pag_ici_lock);
1826 xfs_perag_put(pag);
1827}
1828
1829/*
1830 * Free all speculative preallocations and possibly even the inode itself.
1831 * This is the last chance to make changes to an otherwise unreferenced file
1832 * before incore reclamation happens.
1833 */
1834static int
1835xfs_inodegc_inactivate(
1836 struct xfs_inode *ip)
1837{
1838 int error;
1839
1840 trace_xfs_inode_inactivating(ip);
1841 error = xfs_inactive(ip);
1842 xfs_inodegc_set_reclaimable(ip);
1843 return error;
1844
1845}
1846
1847void
1848xfs_inodegc_worker(
1849 struct work_struct *work)
1850{
1851 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1852 struct xfs_inodegc, work);
1853 struct llist_node *node = llist_del_all(&gc->list);
1854 struct xfs_inode *ip, *n;
1855 struct xfs_mount *mp = gc->mp;
1856 unsigned int nofs_flag;
1857
1858 /*
1859 * Clear the cpu mask bit and ensure that we have seen the latest
1860 * update of the gc structure associated with this CPU. This matches
1861 * with the release semantics used when setting the cpumask bit in
1862 * xfs_inodegc_queue.
1863 */
1864 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1865 smp_mb__after_atomic();
1866
1867 WRITE_ONCE(gc->items, 0);
1868
1869 if (!node)
1870 return;
1871
1872 /*
1873 * We can allocate memory here while doing writeback on behalf of
1874 * memory reclaim. To avoid memory allocation deadlocks set the
1875 * task-wide nofs context for the following operations.
1876 */
1877 nofs_flag = memalloc_nofs_save();
1878
1879 ip = llist_entry(node, struct xfs_inode, i_gclist);
1880 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1881
1882 WRITE_ONCE(gc->shrinker_hits, 0);
1883 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1884 int error;
1885
1886 xfs_iflags_set(ip, XFS_INACTIVATING);
1887 error = xfs_inodegc_inactivate(ip);
1888 if (error && !gc->error)
1889 gc->error = error;
1890 }
1891
1892 memalloc_nofs_restore(nofs_flag);
1893}
1894
1895/*
1896 * Expedite all pending inodegc work to run immediately. This does not wait for
1897 * completion of the work.
1898 */
1899void
1900xfs_inodegc_push(
1901 struct xfs_mount *mp)
1902{
1903 if (!xfs_is_inodegc_enabled(mp))
1904 return;
1905 trace_xfs_inodegc_push(mp, __return_address);
1906 xfs_inodegc_queue_all(mp);
1907}
1908
1909/*
1910 * Force all currently queued inode inactivation work to run immediately and
1911 * wait for the work to finish.
1912 */
1913int
1914xfs_inodegc_flush(
1915 struct xfs_mount *mp)
1916{
1917 xfs_inodegc_push(mp);
1918 trace_xfs_inodegc_flush(mp, __return_address);
1919 return xfs_inodegc_wait_all(mp);
1920}
1921
1922/*
1923 * Flush all the pending work and then disable the inode inactivation background
1924 * workers and wait for them to stop. Caller must hold sb->s_umount to
1925 * coordinate changes in the inodegc_enabled state.
1926 */
1927void
1928xfs_inodegc_stop(
1929 struct xfs_mount *mp)
1930{
1931 bool rerun;
1932
1933 if (!xfs_clear_inodegc_enabled(mp))
1934 return;
1935
1936 /*
1937 * Drain all pending inodegc work, including inodes that could be
1938 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1939 * threads that sample the inodegc state just prior to us clearing it.
1940 * The inodegc flag state prevents new threads from queuing more
1941 * inodes, so we queue pending work items and flush the workqueue until
1942 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
1943 * here because it does not allow other unserialized mechanisms to
1944 * reschedule inodegc work while this draining is in progress.
1945 */
1946 xfs_inodegc_queue_all(mp);
1947 do {
1948 flush_workqueue(mp->m_inodegc_wq);
1949 rerun = xfs_inodegc_queue_all(mp);
1950 } while (rerun);
1951
1952 trace_xfs_inodegc_stop(mp, __return_address);
1953}
1954
1955/*
1956 * Enable the inode inactivation background workers and schedule deferred inode
1957 * inactivation work if there is any. Caller must hold sb->s_umount to
1958 * coordinate changes in the inodegc_enabled state.
1959 */
1960void
1961xfs_inodegc_start(
1962 struct xfs_mount *mp)
1963{
1964 if (xfs_set_inodegc_enabled(mp))
1965 return;
1966
1967 trace_xfs_inodegc_start(mp, __return_address);
1968 xfs_inodegc_queue_all(mp);
1969}
1970
1971#ifdef CONFIG_XFS_RT
1972static inline bool
1973xfs_inodegc_want_queue_rt_file(
1974 struct xfs_inode *ip)
1975{
1976 struct xfs_mount *mp = ip->i_mount;
1977
1978 if (!XFS_IS_REALTIME_INODE(ip))
1979 return false;
1980
1981 if (__percpu_counter_compare(&mp->m_frextents,
1982 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1983 XFS_FDBLOCKS_BATCH) < 0)
1984 return true;
1985
1986 return false;
1987}
1988#else
1989# define xfs_inodegc_want_queue_rt_file(ip) (false)
1990#endif /* CONFIG_XFS_RT */
1991
1992/*
1993 * Schedule the inactivation worker when:
1994 *
1995 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1996 * - There is less than 5% free space left.
1997 * - Any of the quotas for this inode are near an enforcement limit.
1998 */
1999static inline bool
2000xfs_inodegc_want_queue_work(
2001 struct xfs_inode *ip,
2002 unsigned int items)
2003{
2004 struct xfs_mount *mp = ip->i_mount;
2005
2006 if (items > mp->m_ino_geo.inodes_per_cluster)
2007 return true;
2008
2009 if (__percpu_counter_compare(&mp->m_fdblocks,
2010 mp->m_low_space[XFS_LOWSP_5_PCNT],
2011 XFS_FDBLOCKS_BATCH) < 0)
2012 return true;
2013
2014 if (xfs_inodegc_want_queue_rt_file(ip))
2015 return true;
2016
2017 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2018 return true;
2019
2020 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2021 return true;
2022
2023 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2024 return true;
2025
2026 return false;
2027}
2028
2029/*
2030 * Upper bound on the number of inodes in each AG that can be queued for
2031 * inactivation at any given time, to avoid monopolizing the workqueue.
2032 */
2033#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2034
2035/*
2036 * Make the frontend wait for inactivations when:
2037 *
2038 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2039 * - The queue depth exceeds the maximum allowable percpu backlog.
2040 *
2041 * Note: If we are in a NOFS context here (e.g. current thread is running a
2042 * transaction) the we don't want to block here as inodegc progress may require
2043 * filesystem resources we hold to make progress and that could result in a
2044 * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
2045 */
2046static inline bool
2047xfs_inodegc_want_flush_work(
2048 struct xfs_inode *ip,
2049 unsigned int items,
2050 unsigned int shrinker_hits)
2051{
2052 if (current->flags & PF_MEMALLOC_NOFS)
2053 return false;
2054
2055 if (shrinker_hits > 0)
2056 return true;
2057
2058 if (items > XFS_INODEGC_MAX_BACKLOG)
2059 return true;
2060
2061 return false;
2062}
2063
2064/*
2065 * Queue a background inactivation worker if there are inodes that need to be
2066 * inactivated and higher level xfs code hasn't disabled the background
2067 * workers.
2068 */
2069static void
2070xfs_inodegc_queue(
2071 struct xfs_inode *ip)
2072{
2073 struct xfs_mount *mp = ip->i_mount;
2074 struct xfs_inodegc *gc;
2075 int items;
2076 unsigned int shrinker_hits;
2077 unsigned int cpu_nr;
2078 unsigned long queue_delay = 1;
2079
2080 trace_xfs_inode_set_need_inactive(ip);
2081 spin_lock(&ip->i_flags_lock);
2082 ip->i_flags |= XFS_NEED_INACTIVE;
2083 spin_unlock(&ip->i_flags_lock);
2084
2085 cpu_nr = get_cpu();
2086 gc = this_cpu_ptr(mp->m_inodegc);
2087 llist_add(&ip->i_gclist, &gc->list);
2088 items = READ_ONCE(gc->items);
2089 WRITE_ONCE(gc->items, items + 1);
2090 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2091
2092 /*
2093 * Ensure the list add is always seen by anyone who finds the cpumask
2094 * bit set. This effectively gives the cpumask bit set operation
2095 * release ordering semantics.
2096 */
2097 smp_mb__before_atomic();
2098 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2099 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2100
2101 /*
2102 * We queue the work while holding the current CPU so that the work
2103 * is scheduled to run on this CPU.
2104 */
2105 if (!xfs_is_inodegc_enabled(mp)) {
2106 put_cpu();
2107 return;
2108 }
2109
2110 if (xfs_inodegc_want_queue_work(ip, items))
2111 queue_delay = 0;
2112
2113 trace_xfs_inodegc_queue(mp, __return_address);
2114 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2115 queue_delay);
2116 put_cpu();
2117
2118 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2119 trace_xfs_inodegc_throttle(mp, __return_address);
2120 flush_delayed_work(&gc->work);
2121 }
2122}
2123
2124/*
2125 * We set the inode flag atomically with the radix tree tag. Once we get tag
2126 * lookups on the radix tree, this inode flag can go away.
2127 *
2128 * We always use background reclaim here because even if the inode is clean, it
2129 * still may be under IO and hence we have wait for IO completion to occur
2130 * before we can reclaim the inode. The background reclaim path handles this
2131 * more efficiently than we can here, so simply let background reclaim tear down
2132 * all inodes.
2133 */
2134void
2135xfs_inode_mark_reclaimable(
2136 struct xfs_inode *ip)
2137{
2138 struct xfs_mount *mp = ip->i_mount;
2139 bool need_inactive;
2140
2141 XFS_STATS_INC(mp, vn_reclaim);
2142
2143 /*
2144 * We should never get here with any of the reclaim flags already set.
2145 */
2146 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2147
2148 need_inactive = xfs_inode_needs_inactive(ip);
2149 if (need_inactive) {
2150 xfs_inodegc_queue(ip);
2151 return;
2152 }
2153
2154 /* Going straight to reclaim, so drop the dquots. */
2155 xfs_qm_dqdetach(ip);
2156 xfs_inodegc_set_reclaimable(ip);
2157}
2158
2159/*
2160 * Register a phony shrinker so that we can run background inodegc sooner when
2161 * there's memory pressure. Inactivation does not itself free any memory but
2162 * it does make inodes reclaimable, which eventually frees memory.
2163 *
2164 * The count function, seek value, and batch value are crafted to trigger the
2165 * scan function during the second round of scanning. Hopefully this means
2166 * that we reclaimed enough memory that initiating metadata transactions won't
2167 * make things worse.
2168 */
2169#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2170#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2171
2172static unsigned long
2173xfs_inodegc_shrinker_count(
2174 struct shrinker *shrink,
2175 struct shrink_control *sc)
2176{
2177 struct xfs_mount *mp = shrink->private_data;
2178 struct xfs_inodegc *gc;
2179 int cpu;
2180
2181 if (!xfs_is_inodegc_enabled(mp))
2182 return 0;
2183
2184 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2185 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2186 if (!llist_empty(&gc->list))
2187 return XFS_INODEGC_SHRINKER_COUNT;
2188 }
2189
2190 return 0;
2191}
2192
2193static unsigned long
2194xfs_inodegc_shrinker_scan(
2195 struct shrinker *shrink,
2196 struct shrink_control *sc)
2197{
2198 struct xfs_mount *mp = shrink->private_data;
2199 struct xfs_inodegc *gc;
2200 int cpu;
2201 bool no_items = true;
2202
2203 if (!xfs_is_inodegc_enabled(mp))
2204 return SHRINK_STOP;
2205
2206 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2207
2208 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2209 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2210 if (!llist_empty(&gc->list)) {
2211 unsigned int h = READ_ONCE(gc->shrinker_hits);
2212
2213 WRITE_ONCE(gc->shrinker_hits, h + 1);
2214 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2215 no_items = false;
2216 }
2217 }
2218
2219 /*
2220 * If there are no inodes to inactivate, we don't want the shrinker
2221 * to think there's deferred work to call us back about.
2222 */
2223 if (no_items)
2224 return LONG_MAX;
2225
2226 return SHRINK_STOP;
2227}
2228
2229/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2230int
2231xfs_inodegc_register_shrinker(
2232 struct xfs_mount *mp)
2233{
2234 mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
2235 "xfs-inodegc:%s",
2236 mp->m_super->s_id);
2237 if (!mp->m_inodegc_shrinker)
2238 return -ENOMEM;
2239
2240 mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
2241 mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
2242 mp->m_inodegc_shrinker->seeks = 0;
2243 mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
2244 mp->m_inodegc_shrinker->private_data = mp;
2245
2246 shrinker_register(mp->m_inodegc_shrinker);
2247
2248 return 0;
2249}