Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_sb.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_iwalk.h"
17#include "xfs_quota.h"
18#include "xfs_bmap.h"
19#include "xfs_bmap_util.h"
20#include "xfs_trans.h"
21#include "xfs_trans_space.h"
22#include "xfs_qm.h"
23#include "xfs_trace.h"
24#include "xfs_icache.h"
25#include "xfs_error.h"
26#include "xfs_ag.h"
27#include "xfs_ialloc.h"
28#include "xfs_log_priv.h"
29#include "xfs_health.h"
30
31/*
32 * The global quota manager. There is only one of these for the entire
33 * system, _not_ one per file system. XQM keeps track of the overall
34 * quota functionality, including maintaining the freelist and hash
35 * tables of dquots.
36 */
37STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp);
38STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp);
39
40STATIC void xfs_qm_destroy_quotainos(struct xfs_quotainfo *qi);
41STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
42/*
43 * We use the batch lookup interface to iterate over the dquots as it
44 * currently is the only interface into the radix tree code that allows
45 * fuzzy lookups instead of exact matches. Holding the lock over multiple
46 * operations is fine as all callers are used either during mount/umount
47 * or quotaoff.
48 */
49#define XFS_DQ_LOOKUP_BATCH 32
50
51STATIC int
52xfs_qm_dquot_walk(
53 struct xfs_mount *mp,
54 xfs_dqtype_t type,
55 int (*execute)(struct xfs_dquot *dqp, void *data),
56 void *data)
57{
58 struct xfs_quotainfo *qi = mp->m_quotainfo;
59 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
60 uint32_t next_index;
61 int last_error = 0;
62 int skipped;
63 int nr_found;
64
65restart:
66 skipped = 0;
67 next_index = 0;
68 nr_found = 0;
69
70 while (1) {
71 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
72 int error;
73 int i;
74
75 mutex_lock(&qi->qi_tree_lock);
76 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
77 next_index, XFS_DQ_LOOKUP_BATCH);
78 if (!nr_found) {
79 mutex_unlock(&qi->qi_tree_lock);
80 break;
81 }
82
83 for (i = 0; i < nr_found; i++) {
84 struct xfs_dquot *dqp = batch[i];
85
86 next_index = dqp->q_id + 1;
87
88 error = execute(batch[i], data);
89 if (error == -EAGAIN) {
90 skipped++;
91 continue;
92 }
93 if (error && last_error != -EFSCORRUPTED)
94 last_error = error;
95 }
96
97 mutex_unlock(&qi->qi_tree_lock);
98
99 /* bail out if the filesystem is corrupted. */
100 if (last_error == -EFSCORRUPTED) {
101 skipped = 0;
102 break;
103 }
104 /* we're done if id overflows back to zero */
105 if (!next_index)
106 break;
107 }
108
109 if (skipped) {
110 delay(1);
111 goto restart;
112 }
113
114 return last_error;
115}
116
117
118/*
119 * Purge a dquot from all tracking data structures and free it.
120 */
121STATIC int
122xfs_qm_dqpurge(
123 struct xfs_dquot *dqp,
124 void *data)
125{
126 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
127 int error = -EAGAIN;
128
129 xfs_dqlock(dqp);
130 if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0)
131 goto out_unlock;
132
133 dqp->q_flags |= XFS_DQFLAG_FREEING;
134
135 xfs_dqflock(dqp);
136
137 /*
138 * If we are turning this type of quotas off, we don't care
139 * about the dirty metadata sitting in this dquot. OTOH, if
140 * we're unmounting, we do care, so we flush it and wait.
141 */
142 if (XFS_DQ_IS_DIRTY(dqp)) {
143 struct xfs_buf *bp = NULL;
144
145 /*
146 * We don't care about getting disk errors here. We need
147 * to purge this dquot anyway, so we go ahead regardless.
148 */
149 error = xfs_qm_dqflush(dqp, &bp);
150 if (!error) {
151 error = xfs_bwrite(bp);
152 xfs_buf_relse(bp);
153 } else if (error == -EAGAIN) {
154 dqp->q_flags &= ~XFS_DQFLAG_FREEING;
155 goto out_unlock;
156 }
157 xfs_dqflock(dqp);
158 }
159
160 ASSERT(atomic_read(&dqp->q_pincount) == 0);
161 ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) ||
162 !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags));
163
164 xfs_dqfunlock(dqp);
165 xfs_dqunlock(dqp);
166
167 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
168 qi->qi_dquots--;
169
170 /*
171 * We move dquots to the freelist as soon as their reference count
172 * hits zero, so it really should be on the freelist here.
173 */
174 ASSERT(!list_empty(&dqp->q_lru));
175 list_lru_del_obj(&qi->qi_lru, &dqp->q_lru);
176 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
177
178 xfs_qm_dqdestroy(dqp);
179 return 0;
180
181out_unlock:
182 xfs_dqunlock(dqp);
183 return error;
184}
185
186/*
187 * Purge the dquot cache.
188 */
189static void
190xfs_qm_dqpurge_all(
191 struct xfs_mount *mp)
192{
193 xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL);
194 xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL);
195 xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL);
196}
197
198/*
199 * Just destroy the quotainfo structure.
200 */
201void
202xfs_qm_unmount(
203 struct xfs_mount *mp)
204{
205 if (mp->m_quotainfo) {
206 xfs_qm_dqpurge_all(mp);
207 xfs_qm_destroy_quotainfo(mp);
208 }
209}
210
211/*
212 * Called from the vfsops layer.
213 */
214void
215xfs_qm_unmount_quotas(
216 xfs_mount_t *mp)
217{
218 /*
219 * Release the dquots that root inode, et al might be holding,
220 * before we flush quotas and blow away the quotainfo structure.
221 */
222 ASSERT(mp->m_rootip);
223 xfs_qm_dqdetach(mp->m_rootip);
224 if (mp->m_rbmip)
225 xfs_qm_dqdetach(mp->m_rbmip);
226 if (mp->m_rsumip)
227 xfs_qm_dqdetach(mp->m_rsumip);
228
229 /*
230 * Release the quota inodes.
231 */
232 if (mp->m_quotainfo) {
233 if (mp->m_quotainfo->qi_uquotaip) {
234 xfs_irele(mp->m_quotainfo->qi_uquotaip);
235 mp->m_quotainfo->qi_uquotaip = NULL;
236 }
237 if (mp->m_quotainfo->qi_gquotaip) {
238 xfs_irele(mp->m_quotainfo->qi_gquotaip);
239 mp->m_quotainfo->qi_gquotaip = NULL;
240 }
241 if (mp->m_quotainfo->qi_pquotaip) {
242 xfs_irele(mp->m_quotainfo->qi_pquotaip);
243 mp->m_quotainfo->qi_pquotaip = NULL;
244 }
245 }
246}
247
248STATIC int
249xfs_qm_dqattach_one(
250 struct xfs_inode *ip,
251 xfs_dqtype_t type,
252 bool doalloc,
253 struct xfs_dquot **IO_idqpp)
254{
255 struct xfs_dquot *dqp;
256 int error;
257
258 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
259 error = 0;
260
261 /*
262 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
263 * or &i_gdquot. This made the code look weird, but made the logic a lot
264 * simpler.
265 */
266 dqp = *IO_idqpp;
267 if (dqp) {
268 trace_xfs_dqattach_found(dqp);
269 return 0;
270 }
271
272 /*
273 * Find the dquot from somewhere. This bumps the reference count of
274 * dquot and returns it locked. This can return ENOENT if dquot didn't
275 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
276 * turned off suddenly.
277 */
278 error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp);
279 if (error)
280 return error;
281
282 trace_xfs_dqattach_get(dqp);
283
284 /*
285 * dqget may have dropped and re-acquired the ilock, but it guarantees
286 * that the dquot returned is the one that should go in the inode.
287 */
288 *IO_idqpp = dqp;
289 xfs_dqunlock(dqp);
290 return 0;
291}
292
293static bool
294xfs_qm_need_dqattach(
295 struct xfs_inode *ip)
296{
297 struct xfs_mount *mp = ip->i_mount;
298
299 if (!XFS_IS_QUOTA_ON(mp))
300 return false;
301 if (!XFS_NOT_DQATTACHED(mp, ip))
302 return false;
303 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
304 return false;
305 return true;
306}
307
308/*
309 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
310 * into account.
311 * If @doalloc is true, the dquot(s) will be allocated if needed.
312 * Inode may get unlocked and relocked in here, and the caller must deal with
313 * the consequences.
314 */
315int
316xfs_qm_dqattach_locked(
317 xfs_inode_t *ip,
318 bool doalloc)
319{
320 xfs_mount_t *mp = ip->i_mount;
321 int error = 0;
322
323 if (!xfs_qm_need_dqattach(ip))
324 return 0;
325
326 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
327
328 if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
329 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER,
330 doalloc, &ip->i_udquot);
331 if (error)
332 goto done;
333 ASSERT(ip->i_udquot);
334 }
335
336 if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
337 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP,
338 doalloc, &ip->i_gdquot);
339 if (error)
340 goto done;
341 ASSERT(ip->i_gdquot);
342 }
343
344 if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
345 error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ,
346 doalloc, &ip->i_pdquot);
347 if (error)
348 goto done;
349 ASSERT(ip->i_pdquot);
350 }
351
352done:
353 /*
354 * Don't worry about the dquots that we may have attached before any
355 * error - they'll get detached later if it has not already been done.
356 */
357 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
358 return error;
359}
360
361int
362xfs_qm_dqattach(
363 struct xfs_inode *ip)
364{
365 int error;
366
367 if (!xfs_qm_need_dqattach(ip))
368 return 0;
369
370 xfs_ilock(ip, XFS_ILOCK_EXCL);
371 error = xfs_qm_dqattach_locked(ip, false);
372 xfs_iunlock(ip, XFS_ILOCK_EXCL);
373
374 return error;
375}
376
377/*
378 * Release dquots (and their references) if any.
379 * The inode should be locked EXCL except when this's called by
380 * xfs_ireclaim.
381 */
382void
383xfs_qm_dqdetach(
384 xfs_inode_t *ip)
385{
386 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
387 return;
388
389 trace_xfs_dquot_dqdetach(ip);
390
391 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
392 if (ip->i_udquot) {
393 xfs_qm_dqrele(ip->i_udquot);
394 ip->i_udquot = NULL;
395 }
396 if (ip->i_gdquot) {
397 xfs_qm_dqrele(ip->i_gdquot);
398 ip->i_gdquot = NULL;
399 }
400 if (ip->i_pdquot) {
401 xfs_qm_dqrele(ip->i_pdquot);
402 ip->i_pdquot = NULL;
403 }
404}
405
406struct xfs_qm_isolate {
407 struct list_head buffers;
408 struct list_head dispose;
409};
410
411static enum lru_status
412xfs_qm_dquot_isolate(
413 struct list_head *item,
414 struct list_lru_one *lru,
415 spinlock_t *lru_lock,
416 void *arg)
417 __releases(lru_lock) __acquires(lru_lock)
418{
419 struct xfs_dquot *dqp = container_of(item,
420 struct xfs_dquot, q_lru);
421 struct xfs_qm_isolate *isol = arg;
422
423 if (!xfs_dqlock_nowait(dqp))
424 goto out_miss_busy;
425
426 /*
427 * If something else is freeing this dquot and hasn't yet removed it
428 * from the LRU, leave it for the freeing task to complete the freeing
429 * process rather than risk it being free from under us here.
430 */
431 if (dqp->q_flags & XFS_DQFLAG_FREEING)
432 goto out_miss_unlock;
433
434 /*
435 * This dquot has acquired a reference in the meantime remove it from
436 * the freelist and try again.
437 */
438 if (dqp->q_nrefs) {
439 xfs_dqunlock(dqp);
440 XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
441
442 trace_xfs_dqreclaim_want(dqp);
443 list_lru_isolate(lru, &dqp->q_lru);
444 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
445 return LRU_REMOVED;
446 }
447
448 /*
449 * If the dquot is dirty, flush it. If it's already being flushed, just
450 * skip it so there is time for the IO to complete before we try to
451 * reclaim it again on the next LRU pass.
452 */
453 if (!xfs_dqflock_nowait(dqp))
454 goto out_miss_unlock;
455
456 if (XFS_DQ_IS_DIRTY(dqp)) {
457 struct xfs_buf *bp = NULL;
458 int error;
459
460 trace_xfs_dqreclaim_dirty(dqp);
461
462 /* we have to drop the LRU lock to flush the dquot */
463 spin_unlock(lru_lock);
464
465 error = xfs_qm_dqflush(dqp, &bp);
466 if (error)
467 goto out_unlock_dirty;
468
469 xfs_buf_delwri_queue(bp, &isol->buffers);
470 xfs_buf_relse(bp);
471 goto out_unlock_dirty;
472 }
473 xfs_dqfunlock(dqp);
474
475 /*
476 * Prevent lookups now that we are past the point of no return.
477 */
478 dqp->q_flags |= XFS_DQFLAG_FREEING;
479 xfs_dqunlock(dqp);
480
481 ASSERT(dqp->q_nrefs == 0);
482 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
483 XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
484 trace_xfs_dqreclaim_done(dqp);
485 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
486 return LRU_REMOVED;
487
488out_miss_unlock:
489 xfs_dqunlock(dqp);
490out_miss_busy:
491 trace_xfs_dqreclaim_busy(dqp);
492 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
493 return LRU_SKIP;
494
495out_unlock_dirty:
496 trace_xfs_dqreclaim_busy(dqp);
497 XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
498 xfs_dqunlock(dqp);
499 spin_lock(lru_lock);
500 return LRU_RETRY;
501}
502
503static unsigned long
504xfs_qm_shrink_scan(
505 struct shrinker *shrink,
506 struct shrink_control *sc)
507{
508 struct xfs_quotainfo *qi = shrink->private_data;
509 struct xfs_qm_isolate isol;
510 unsigned long freed;
511 int error;
512
513 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
514 return 0;
515
516 INIT_LIST_HEAD(&isol.buffers);
517 INIT_LIST_HEAD(&isol.dispose);
518
519 freed = list_lru_shrink_walk(&qi->qi_lru, sc,
520 xfs_qm_dquot_isolate, &isol);
521
522 error = xfs_buf_delwri_submit(&isol.buffers);
523 if (error)
524 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
525
526 while (!list_empty(&isol.dispose)) {
527 struct xfs_dquot *dqp;
528
529 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
530 list_del_init(&dqp->q_lru);
531 xfs_qm_dqfree_one(dqp);
532 }
533
534 return freed;
535}
536
537static unsigned long
538xfs_qm_shrink_count(
539 struct shrinker *shrink,
540 struct shrink_control *sc)
541{
542 struct xfs_quotainfo *qi = shrink->private_data;
543
544 return list_lru_shrink_count(&qi->qi_lru, sc);
545}
546
547STATIC void
548xfs_qm_set_defquota(
549 struct xfs_mount *mp,
550 xfs_dqtype_t type,
551 struct xfs_quotainfo *qinf)
552{
553 struct xfs_dquot *dqp;
554 struct xfs_def_quota *defq;
555 int error;
556
557 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
558 if (error)
559 return;
560
561 defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
562
563 /*
564 * Timers and warnings have been already set, let's just set the
565 * default limits for this quota type
566 */
567 defq->blk.hard = dqp->q_blk.hardlimit;
568 defq->blk.soft = dqp->q_blk.softlimit;
569 defq->ino.hard = dqp->q_ino.hardlimit;
570 defq->ino.soft = dqp->q_ino.softlimit;
571 defq->rtb.hard = dqp->q_rtb.hardlimit;
572 defq->rtb.soft = dqp->q_rtb.softlimit;
573 xfs_qm_dqdestroy(dqp);
574}
575
576/* Initialize quota time limits from the root dquot. */
577static void
578xfs_qm_init_timelimits(
579 struct xfs_mount *mp,
580 xfs_dqtype_t type)
581{
582 struct xfs_quotainfo *qinf = mp->m_quotainfo;
583 struct xfs_def_quota *defq;
584 struct xfs_dquot *dqp;
585 int error;
586
587 defq = xfs_get_defquota(qinf, type);
588
589 defq->blk.time = XFS_QM_BTIMELIMIT;
590 defq->ino.time = XFS_QM_ITIMELIMIT;
591 defq->rtb.time = XFS_QM_RTBTIMELIMIT;
592
593 /*
594 * We try to get the limits from the superuser's limits fields.
595 * This is quite hacky, but it is standard quota practice.
596 *
597 * Since we may not have done a quotacheck by this point, just read
598 * the dquot without attaching it to any hashtables or lists.
599 */
600 error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
601 if (error)
602 return;
603
604 /*
605 * The warnings and timers set the grace period given to
606 * a user or group before he or she can not perform any
607 * more writing. If it is zero, a default is used.
608 */
609 if (dqp->q_blk.timer)
610 defq->blk.time = dqp->q_blk.timer;
611 if (dqp->q_ino.timer)
612 defq->ino.time = dqp->q_ino.timer;
613 if (dqp->q_rtb.timer)
614 defq->rtb.time = dqp->q_rtb.timer;
615
616 xfs_qm_dqdestroy(dqp);
617}
618
619/*
620 * This initializes all the quota information that's kept in the
621 * mount structure
622 */
623STATIC int
624xfs_qm_init_quotainfo(
625 struct xfs_mount *mp)
626{
627 struct xfs_quotainfo *qinf;
628 int error;
629
630 ASSERT(XFS_IS_QUOTA_ON(mp));
631
632 qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
633 GFP_KERNEL | __GFP_NOFAIL);
634
635 error = list_lru_init(&qinf->qi_lru);
636 if (error)
637 goto out_free_qinf;
638
639 /*
640 * See if quotainodes are setup, and if not, allocate them,
641 * and change the superblock accordingly.
642 */
643 error = xfs_qm_init_quotainos(mp);
644 if (error)
645 goto out_free_lru;
646
647 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL);
648 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL);
649 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL);
650 mutex_init(&qinf->qi_tree_lock);
651
652 /* mutex used to serialize quotaoffs */
653 mutex_init(&qinf->qi_quotaofflock);
654
655 /* Precalc some constants */
656 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
657 qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
658 if (xfs_has_bigtime(mp)) {
659 qinf->qi_expiry_min =
660 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN);
661 qinf->qi_expiry_max =
662 xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX);
663 } else {
664 qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN;
665 qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX;
666 }
667 trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min,
668 qinf->qi_expiry_max);
669
670 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
671
672 xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER);
673 xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP);
674 xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ);
675
676 if (XFS_IS_UQUOTA_ON(mp))
677 xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf);
678 if (XFS_IS_GQUOTA_ON(mp))
679 xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf);
680 if (XFS_IS_PQUOTA_ON(mp))
681 xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
682
683 qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
684 mp->m_super->s_id);
685 if (!qinf->qi_shrinker) {
686 error = -ENOMEM;
687 goto out_free_inos;
688 }
689
690 qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
691 qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
692 qinf->qi_shrinker->private_data = qinf;
693
694 shrinker_register(qinf->qi_shrinker);
695
696 xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks);
697 xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks);
698
699 return 0;
700
701out_free_inos:
702 mutex_destroy(&qinf->qi_quotaofflock);
703 mutex_destroy(&qinf->qi_tree_lock);
704 xfs_qm_destroy_quotainos(qinf);
705out_free_lru:
706 list_lru_destroy(&qinf->qi_lru);
707out_free_qinf:
708 kfree(qinf);
709 mp->m_quotainfo = NULL;
710 return error;
711}
712
713/*
714 * Gets called when unmounting a filesystem or when all quotas get
715 * turned off.
716 * This purges the quota inodes, destroys locks and frees itself.
717 */
718void
719xfs_qm_destroy_quotainfo(
720 struct xfs_mount *mp)
721{
722 struct xfs_quotainfo *qi;
723
724 qi = mp->m_quotainfo;
725 ASSERT(qi != NULL);
726
727 shrinker_free(qi->qi_shrinker);
728 list_lru_destroy(&qi->qi_lru);
729 xfs_qm_destroy_quotainos(qi);
730 mutex_destroy(&qi->qi_tree_lock);
731 mutex_destroy(&qi->qi_quotaofflock);
732 kfree(qi);
733 mp->m_quotainfo = NULL;
734}
735
736/*
737 * Create an inode and return with a reference already taken, but unlocked
738 * This is how we create quota inodes
739 */
740STATIC int
741xfs_qm_qino_alloc(
742 struct xfs_mount *mp,
743 struct xfs_inode **ipp,
744 unsigned int flags)
745{
746 struct xfs_trans *tp;
747 int error;
748 bool need_alloc = true;
749
750 *ipp = NULL;
751 /*
752 * With superblock that doesn't have separate pquotino, we
753 * share an inode between gquota and pquota. If the on-disk
754 * superblock has GQUOTA and the filesystem is now mounted
755 * with PQUOTA, just use sb_gquotino for sb_pquotino and
756 * vice-versa.
757 */
758 if (!xfs_has_pquotino(mp) &&
759 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
760 xfs_ino_t ino = NULLFSINO;
761
762 if ((flags & XFS_QMOPT_PQUOTA) &&
763 (mp->m_sb.sb_gquotino != NULLFSINO)) {
764 ino = mp->m_sb.sb_gquotino;
765 if (XFS_IS_CORRUPT(mp,
766 mp->m_sb.sb_pquotino != NULLFSINO)) {
767 xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA);
768 return -EFSCORRUPTED;
769 }
770 } else if ((flags & XFS_QMOPT_GQUOTA) &&
771 (mp->m_sb.sb_pquotino != NULLFSINO)) {
772 ino = mp->m_sb.sb_pquotino;
773 if (XFS_IS_CORRUPT(mp,
774 mp->m_sb.sb_gquotino != NULLFSINO)) {
775 xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA);
776 return -EFSCORRUPTED;
777 }
778 }
779 if (ino != NULLFSINO) {
780 error = xfs_iget(mp, NULL, ino, 0, 0, ipp);
781 if (error)
782 return error;
783 mp->m_sb.sb_gquotino = NULLFSINO;
784 mp->m_sb.sb_pquotino = NULLFSINO;
785 need_alloc = false;
786 }
787 }
788
789 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
790 need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
791 0, 0, &tp);
792 if (error)
793 return error;
794
795 if (need_alloc) {
796 struct xfs_icreate_args args = {
797 .mode = S_IFREG,
798 .flags = XFS_ICREATE_UNLINKABLE,
799 };
800 xfs_ino_t ino;
801
802 error = xfs_dialloc(&tp, 0, S_IFREG, &ino);
803 if (!error)
804 error = xfs_icreate(tp, ino, &args, ipp);
805 if (error) {
806 xfs_trans_cancel(tp);
807 return error;
808 }
809 }
810
811 /*
812 * Make the changes in the superblock, and log those too.
813 * sbfields arg may contain fields other than *QUOTINO;
814 * VERSIONNUM for example.
815 */
816 spin_lock(&mp->m_sb_lock);
817 if (flags & XFS_QMOPT_SBVERSION) {
818 ASSERT(!xfs_has_quota(mp));
819
820 xfs_add_quota(mp);
821 mp->m_sb.sb_uquotino = NULLFSINO;
822 mp->m_sb.sb_gquotino = NULLFSINO;
823 mp->m_sb.sb_pquotino = NULLFSINO;
824
825 /* qflags will get updated fully _after_ quotacheck */
826 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
827 }
828 if (flags & XFS_QMOPT_UQUOTA)
829 mp->m_sb.sb_uquotino = (*ipp)->i_ino;
830 else if (flags & XFS_QMOPT_GQUOTA)
831 mp->m_sb.sb_gquotino = (*ipp)->i_ino;
832 else
833 mp->m_sb.sb_pquotino = (*ipp)->i_ino;
834 spin_unlock(&mp->m_sb_lock);
835 xfs_log_sb(tp);
836
837 error = xfs_trans_commit(tp);
838 if (error) {
839 ASSERT(xfs_is_shutdown(mp));
840 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
841 }
842 if (need_alloc) {
843 xfs_iunlock(*ipp, XFS_ILOCK_EXCL);
844 xfs_finish_inode_setup(*ipp);
845 }
846 return error;
847}
848
849
850STATIC void
851xfs_qm_reset_dqcounts(
852 struct xfs_mount *mp,
853 struct xfs_buf *bp,
854 xfs_dqid_t id,
855 xfs_dqtype_t type)
856{
857 struct xfs_dqblk *dqb;
858 int j;
859
860 trace_xfs_reset_dqcounts(bp, _RET_IP_);
861
862 /*
863 * Reset all counters and timers. They'll be
864 * started afresh by xfs_qm_quotacheck.
865 */
866#ifdef DEBUG
867 j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
868 sizeof(struct xfs_dqblk);
869 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
870#endif
871 dqb = bp->b_addr;
872 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
873 struct xfs_disk_dquot *ddq;
874
875 ddq = (struct xfs_disk_dquot *)&dqb[j];
876
877 /*
878 * Do a sanity check, and if needed, repair the dqblk. Don't
879 * output any warnings because it's perfectly possible to
880 * find uninitialised dquot blks. See comment in
881 * xfs_dquot_verify.
882 */
883 if (xfs_dqblk_verify(mp, &dqb[j], id + j) ||
884 (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type)
885 xfs_dqblk_repair(mp, &dqb[j], id + j, type);
886
887 /*
888 * Reset type in case we are reusing group quota file for
889 * project quotas or vice versa
890 */
891 ddq->d_type = type;
892 ddq->d_bcount = 0;
893 ddq->d_icount = 0;
894 ddq->d_rtbcount = 0;
895
896 /*
897 * dquot id 0 stores the default grace period and the maximum
898 * warning limit that were set by the administrator, so we
899 * should not reset them.
900 */
901 if (ddq->d_id != 0) {
902 ddq->d_btimer = 0;
903 ddq->d_itimer = 0;
904 ddq->d_rtbtimer = 0;
905 ddq->d_bwarns = 0;
906 ddq->d_iwarns = 0;
907 ddq->d_rtbwarns = 0;
908 if (xfs_has_bigtime(mp))
909 ddq->d_type |= XFS_DQTYPE_BIGTIME;
910 }
911
912 if (xfs_has_crc(mp)) {
913 xfs_update_cksum((char *)&dqb[j],
914 sizeof(struct xfs_dqblk),
915 XFS_DQUOT_CRC_OFF);
916 }
917 }
918}
919
920STATIC int
921xfs_qm_reset_dqcounts_all(
922 struct xfs_mount *mp,
923 xfs_dqid_t firstid,
924 xfs_fsblock_t bno,
925 xfs_filblks_t blkcnt,
926 xfs_dqtype_t type,
927 struct list_head *buffer_list)
928{
929 struct xfs_buf *bp;
930 int error = 0;
931
932 ASSERT(blkcnt > 0);
933
934 /*
935 * Blkcnt arg can be a very big number, and might even be
936 * larger than the log itself. So, we have to break it up into
937 * manageable-sized transactions.
938 * Note that we don't start a permanent transaction here; we might
939 * not be able to get a log reservation for the whole thing up front,
940 * and we don't really care to either, because we just discard
941 * everything if we were to crash in the middle of this loop.
942 */
943 while (blkcnt--) {
944 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
945 XFS_FSB_TO_DADDR(mp, bno),
946 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
947 &xfs_dquot_buf_ops);
948
949 /*
950 * CRC and validation errors will return a EFSCORRUPTED here. If
951 * this occurs, re-read without CRC validation so that we can
952 * repair the damage via xfs_qm_reset_dqcounts(). This process
953 * will leave a trace in the log indicating corruption has
954 * been detected.
955 */
956 if (error == -EFSCORRUPTED) {
957 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
958 XFS_FSB_TO_DADDR(mp, bno),
959 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
960 NULL);
961 }
962
963 if (error)
964 break;
965
966 /*
967 * A corrupt buffer might not have a verifier attached, so
968 * make sure we have the correct one attached before writeback
969 * occurs.
970 */
971 bp->b_ops = &xfs_dquot_buf_ops;
972 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
973 xfs_buf_delwri_queue(bp, buffer_list);
974 xfs_buf_relse(bp);
975
976 /* goto the next block. */
977 bno++;
978 firstid += mp->m_quotainfo->qi_dqperchunk;
979 }
980
981 return error;
982}
983
984/*
985 * Iterate over all allocated dquot blocks in this quota inode, zeroing all
986 * counters for every chunk of dquots that we find.
987 */
988STATIC int
989xfs_qm_reset_dqcounts_buf(
990 struct xfs_mount *mp,
991 struct xfs_inode *qip,
992 xfs_dqtype_t type,
993 struct list_head *buffer_list)
994{
995 struct xfs_bmbt_irec *map;
996 int i, nmaps; /* number of map entries */
997 int error; /* return value */
998 xfs_fileoff_t lblkno;
999 xfs_filblks_t maxlblkcnt;
1000 xfs_dqid_t firstid;
1001 xfs_fsblock_t rablkno;
1002 xfs_filblks_t rablkcnt;
1003
1004 error = 0;
1005 /*
1006 * This looks racy, but we can't keep an inode lock across a
1007 * trans_reserve. But, this gets called during quotacheck, and that
1008 * happens only at mount time which is single threaded.
1009 */
1010 if (qip->i_nblocks == 0)
1011 return 0;
1012
1013 map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
1014 GFP_KERNEL | __GFP_NOFAIL);
1015
1016 lblkno = 0;
1017 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1018 do {
1019 uint lock_mode;
1020
1021 nmaps = XFS_DQITER_MAP_SIZE;
1022 /*
1023 * We aren't changing the inode itself. Just changing
1024 * some of its data. No new blocks are added here, and
1025 * the inode is never added to the transaction.
1026 */
1027 lock_mode = xfs_ilock_data_map_shared(qip);
1028 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1029 map, &nmaps, 0);
1030 xfs_iunlock(qip, lock_mode);
1031 if (error)
1032 break;
1033
1034 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1035 for (i = 0; i < nmaps; i++) {
1036 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1037 ASSERT(map[i].br_blockcount);
1038
1039
1040 lblkno += map[i].br_blockcount;
1041
1042 if (map[i].br_startblock == HOLESTARTBLOCK)
1043 continue;
1044
1045 firstid = (xfs_dqid_t) map[i].br_startoff *
1046 mp->m_quotainfo->qi_dqperchunk;
1047 /*
1048 * Do a read-ahead on the next extent.
1049 */
1050 if ((i+1 < nmaps) &&
1051 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1052 rablkcnt = map[i+1].br_blockcount;
1053 rablkno = map[i+1].br_startblock;
1054 while (rablkcnt--) {
1055 xfs_buf_readahead(mp->m_ddev_targp,
1056 XFS_FSB_TO_DADDR(mp, rablkno),
1057 mp->m_quotainfo->qi_dqchunklen,
1058 &xfs_dquot_buf_ops);
1059 rablkno++;
1060 }
1061 }
1062 /*
1063 * Iterate thru all the blks in the extent and
1064 * reset the counters of all the dquots inside them.
1065 */
1066 error = xfs_qm_reset_dqcounts_all(mp, firstid,
1067 map[i].br_startblock,
1068 map[i].br_blockcount,
1069 type, buffer_list);
1070 if (error)
1071 goto out;
1072 }
1073 } while (nmaps > 0);
1074
1075out:
1076 kfree(map);
1077 return error;
1078}
1079
1080/*
1081 * Called by dqusage_adjust in doing a quotacheck.
1082 *
1083 * Given the inode, and a dquot id this updates both the incore dqout as well
1084 * as the buffer copy. This is so that once the quotacheck is done, we can
1085 * just log all the buffers, as opposed to logging numerous updates to
1086 * individual dquots.
1087 */
1088STATIC int
1089xfs_qm_quotacheck_dqadjust(
1090 struct xfs_inode *ip,
1091 xfs_dqtype_t type,
1092 xfs_qcnt_t nblks,
1093 xfs_qcnt_t rtblks)
1094{
1095 struct xfs_mount *mp = ip->i_mount;
1096 struct xfs_dquot *dqp;
1097 xfs_dqid_t id;
1098 int error;
1099
1100 id = xfs_qm_id_for_quotatype(ip, type);
1101 error = xfs_qm_dqget(mp, id, type, true, &dqp);
1102 if (error) {
1103 /*
1104 * Shouldn't be able to turn off quotas here.
1105 */
1106 ASSERT(error != -ESRCH);
1107 ASSERT(error != -ENOENT);
1108 return error;
1109 }
1110
1111 trace_xfs_dqadjust(dqp);
1112
1113 /*
1114 * Adjust the inode count and the block count to reflect this inode's
1115 * resource usage.
1116 */
1117 dqp->q_ino.count++;
1118 dqp->q_ino.reserved++;
1119 if (nblks) {
1120 dqp->q_blk.count += nblks;
1121 dqp->q_blk.reserved += nblks;
1122 }
1123 if (rtblks) {
1124 dqp->q_rtb.count += rtblks;
1125 dqp->q_rtb.reserved += rtblks;
1126 }
1127
1128 /*
1129 * Set default limits, adjust timers (since we changed usages)
1130 *
1131 * There are no timers for the default values set in the root dquot.
1132 */
1133 if (dqp->q_id) {
1134 xfs_qm_adjust_dqlimits(dqp);
1135 xfs_qm_adjust_dqtimers(dqp);
1136 }
1137
1138 dqp->q_flags |= XFS_DQFLAG_DIRTY;
1139 xfs_qm_dqput(dqp);
1140 return 0;
1141}
1142
1143/*
1144 * callback routine supplied to bulkstat(). Given an inumber, find its
1145 * dquots and update them to account for resources taken by that inode.
1146 */
1147/* ARGSUSED */
1148STATIC int
1149xfs_qm_dqusage_adjust(
1150 struct xfs_mount *mp,
1151 struct xfs_trans *tp,
1152 xfs_ino_t ino,
1153 void *data)
1154{
1155 struct xfs_inode *ip;
1156 xfs_qcnt_t nblks;
1157 xfs_filblks_t rtblks = 0; /* total rt blks */
1158 int error;
1159
1160 ASSERT(XFS_IS_QUOTA_ON(mp));
1161
1162 /*
1163 * rootino must have its resources accounted for, not so with the quota
1164 * inodes.
1165 */
1166 if (xfs_is_quota_inode(&mp->m_sb, ino))
1167 return 0;
1168
1169 /*
1170 * We don't _need_ to take the ilock EXCL here because quotacheck runs
1171 * at mount time and therefore nobody will be racing chown/chproj.
1172 */
1173 error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip);
1174 if (error == -EINVAL || error == -ENOENT)
1175 return 0;
1176 if (error)
1177 return error;
1178
1179 /*
1180 * Reload the incore unlinked list to avoid failure in inodegc.
1181 * Use an unlocked check here because unrecovered unlinked inodes
1182 * should be somewhat rare.
1183 */
1184 if (xfs_inode_unlinked_incomplete(ip)) {
1185 error = xfs_inode_reload_unlinked(ip);
1186 if (error) {
1187 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1188 goto error0;
1189 }
1190 }
1191
1192 ASSERT(ip->i_delayed_blks == 0);
1193
1194 if (XFS_IS_REALTIME_INODE(ip)) {
1195 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1196
1197 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1198 if (error)
1199 goto error0;
1200
1201 xfs_bmap_count_leaves(ifp, &rtblks);
1202 }
1203
1204 nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
1205 xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
1206
1207 /*
1208 * Add the (disk blocks and inode) resources occupied by this
1209 * inode to its dquots. We do this adjustment in the incore dquot,
1210 * and also copy the changes to its buffer.
1211 * We don't care about putting these changes in a transaction
1212 * envelope because if we crash in the middle of a 'quotacheck'
1213 * we have to start from the beginning anyway.
1214 * Once we're done, we'll log all the dquot bufs.
1215 *
1216 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1217 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1218 */
1219 if (XFS_IS_UQUOTA_ON(mp)) {
1220 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks,
1221 rtblks);
1222 if (error)
1223 goto error0;
1224 }
1225
1226 if (XFS_IS_GQUOTA_ON(mp)) {
1227 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks,
1228 rtblks);
1229 if (error)
1230 goto error0;
1231 }
1232
1233 if (XFS_IS_PQUOTA_ON(mp)) {
1234 error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks,
1235 rtblks);
1236 if (error)
1237 goto error0;
1238 }
1239
1240error0:
1241 xfs_irele(ip);
1242 return error;
1243}
1244
1245STATIC int
1246xfs_qm_flush_one(
1247 struct xfs_dquot *dqp,
1248 void *data)
1249{
1250 struct xfs_mount *mp = dqp->q_mount;
1251 struct list_head *buffer_list = data;
1252 struct xfs_buf *bp = NULL;
1253 int error = 0;
1254
1255 xfs_dqlock(dqp);
1256 if (dqp->q_flags & XFS_DQFLAG_FREEING)
1257 goto out_unlock;
1258 if (!XFS_DQ_IS_DIRTY(dqp))
1259 goto out_unlock;
1260
1261 /*
1262 * The only way the dquot is already flush locked by the time quotacheck
1263 * gets here is if reclaim flushed it before the dqadjust walk dirtied
1264 * it for the final time. Quotacheck collects all dquot bufs in the
1265 * local delwri queue before dquots are dirtied, so reclaim can't have
1266 * possibly queued it for I/O. The only way out is to push the buffer to
1267 * cycle the flush lock.
1268 */
1269 if (!xfs_dqflock_nowait(dqp)) {
1270 /* buf is pinned in-core by delwri list */
1271 error = xfs_buf_incore(mp->m_ddev_targp, dqp->q_blkno,
1272 mp->m_quotainfo->qi_dqchunklen, 0, &bp);
1273 if (error)
1274 goto out_unlock;
1275
1276 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1277 error = -EAGAIN;
1278 xfs_buf_relse(bp);
1279 goto out_unlock;
1280 }
1281 xfs_buf_unlock(bp);
1282
1283 xfs_buf_delwri_pushbuf(bp, buffer_list);
1284 xfs_buf_rele(bp);
1285
1286 error = -EAGAIN;
1287 goto out_unlock;
1288 }
1289
1290 error = xfs_qm_dqflush(dqp, &bp);
1291 if (error)
1292 goto out_unlock;
1293
1294 xfs_buf_delwri_queue(bp, buffer_list);
1295 xfs_buf_relse(bp);
1296out_unlock:
1297 xfs_dqunlock(dqp);
1298 return error;
1299}
1300
1301/*
1302 * Walk thru all the filesystem inodes and construct a consistent view
1303 * of the disk quota world. If the quotacheck fails, disable quotas.
1304 */
1305STATIC int
1306xfs_qm_quotacheck(
1307 xfs_mount_t *mp)
1308{
1309 int error, error2;
1310 uint flags;
1311 LIST_HEAD (buffer_list);
1312 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1313 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1314 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1315
1316 flags = 0;
1317
1318 ASSERT(uip || gip || pip);
1319 ASSERT(XFS_IS_QUOTA_ON(mp));
1320
1321 xfs_notice(mp, "Quotacheck needed: Please wait.");
1322
1323 /*
1324 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1325 * their counters to zero. We need a clean slate.
1326 * We don't log our changes till later.
1327 */
1328 if (uip) {
1329 error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER,
1330 &buffer_list);
1331 if (error)
1332 goto error_return;
1333 flags |= XFS_UQUOTA_CHKD;
1334 }
1335
1336 if (gip) {
1337 error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP,
1338 &buffer_list);
1339 if (error)
1340 goto error_return;
1341 flags |= XFS_GQUOTA_CHKD;
1342 }
1343
1344 if (pip) {
1345 error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ,
1346 &buffer_list);
1347 if (error)
1348 goto error_return;
1349 flags |= XFS_PQUOTA_CHKD;
1350 }
1351
1352 xfs_set_quotacheck_running(mp);
1353 error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
1354 NULL);
1355 xfs_clear_quotacheck_running(mp);
1356
1357 /*
1358 * On error, the inode walk may have partially populated the dquot
1359 * caches. We must purge them before disabling quota and tearing down
1360 * the quotainfo, or else the dquots will leak.
1361 */
1362 if (error)
1363 goto error_purge;
1364
1365 /*
1366 * We've made all the changes that we need to make incore. Flush them
1367 * down to disk buffers if everything was updated successfully.
1368 */
1369 if (XFS_IS_UQUOTA_ON(mp)) {
1370 error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one,
1371 &buffer_list);
1372 }
1373 if (XFS_IS_GQUOTA_ON(mp)) {
1374 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one,
1375 &buffer_list);
1376 if (!error)
1377 error = error2;
1378 }
1379 if (XFS_IS_PQUOTA_ON(mp)) {
1380 error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one,
1381 &buffer_list);
1382 if (!error)
1383 error = error2;
1384 }
1385
1386 error2 = xfs_buf_delwri_submit(&buffer_list);
1387 if (!error)
1388 error = error2;
1389
1390 /*
1391 * We can get this error if we couldn't do a dquot allocation inside
1392 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1393 * dirty dquots that might be cached, we just want to get rid of them
1394 * and turn quotaoff. The dquots won't be attached to any of the inodes
1395 * at this point (because we intentionally didn't in dqget_noattach).
1396 */
1397 if (error)
1398 goto error_purge;
1399
1400 /*
1401 * If one type of quotas is off, then it will lose its
1402 * quotachecked status, since we won't be doing accounting for
1403 * that type anymore.
1404 */
1405 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1406 mp->m_qflags |= flags;
1407
1408error_return:
1409 xfs_buf_delwri_cancel(&buffer_list);
1410
1411 if (error) {
1412 xfs_warn(mp,
1413 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1414 error);
1415 /*
1416 * We must turn off quotas.
1417 */
1418 ASSERT(mp->m_quotainfo != NULL);
1419 xfs_qm_destroy_quotainfo(mp);
1420 if (xfs_mount_reset_sbqflags(mp)) {
1421 xfs_warn(mp,
1422 "Quotacheck: Failed to reset quota flags.");
1423 }
1424 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
1425 } else {
1426 xfs_notice(mp, "Quotacheck: Done.");
1427 xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK);
1428 }
1429
1430 return error;
1431
1432error_purge:
1433 /*
1434 * On error, we may have inodes queued for inactivation. This may try
1435 * to attach dquots to the inode before running cleanup operations on
1436 * the inode and this can race with the xfs_qm_destroy_quotainfo() call
1437 * below that frees mp->m_quotainfo. To avoid this race, flush all the
1438 * pending inodegc operations before we purge the dquots from memory,
1439 * ensuring that background inactivation is idle whilst we turn off
1440 * quotas.
1441 */
1442 xfs_inodegc_flush(mp);
1443 xfs_qm_dqpurge_all(mp);
1444 goto error_return;
1445
1446}
1447
1448/*
1449 * This is called from xfs_mountfs to start quotas and initialize all
1450 * necessary data structures like quotainfo. This is also responsible for
1451 * running a quotacheck as necessary. We are guaranteed that the superblock
1452 * is consistently read in at this point.
1453 *
1454 * If we fail here, the mount will continue with quota turned off. We don't
1455 * need to inidicate success or failure at all.
1456 */
1457void
1458xfs_qm_mount_quotas(
1459 struct xfs_mount *mp)
1460{
1461 int error = 0;
1462 uint sbf;
1463
1464 /*
1465 * If quotas on realtime volumes is not supported, we disable
1466 * quotas immediately.
1467 */
1468 if (mp->m_sb.sb_rextents) {
1469 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
1470 mp->m_qflags = 0;
1471 goto write_changes;
1472 }
1473
1474 ASSERT(XFS_IS_QUOTA_ON(mp));
1475
1476 /*
1477 * Allocate the quotainfo structure inside the mount struct, and
1478 * create quotainode(s), and change/rev superblock if necessary.
1479 */
1480 error = xfs_qm_init_quotainfo(mp);
1481 if (error) {
1482 /*
1483 * We must turn off quotas.
1484 */
1485 ASSERT(mp->m_quotainfo == NULL);
1486 mp->m_qflags = 0;
1487 goto write_changes;
1488 }
1489 /*
1490 * If any of the quotas are not consistent, do a quotacheck.
1491 */
1492 if (XFS_QM_NEED_QUOTACHECK(mp)) {
1493 error = xfs_qm_quotacheck(mp);
1494 if (error) {
1495 /* Quotacheck failed and disabled quotas. */
1496 return;
1497 }
1498 }
1499 /*
1500 * If one type of quotas is off, then it will lose its
1501 * quotachecked status, since we won't be doing accounting for
1502 * that type anymore.
1503 */
1504 if (!XFS_IS_UQUOTA_ON(mp))
1505 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
1506 if (!XFS_IS_GQUOTA_ON(mp))
1507 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
1508 if (!XFS_IS_PQUOTA_ON(mp))
1509 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
1510
1511 write_changes:
1512 /*
1513 * We actually don't have to acquire the m_sb_lock at all.
1514 * This can only be called from mount, and that's single threaded. XXX
1515 */
1516 spin_lock(&mp->m_sb_lock);
1517 sbf = mp->m_sb.sb_qflags;
1518 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
1519 spin_unlock(&mp->m_sb_lock);
1520
1521 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
1522 if (xfs_sync_sb(mp, false)) {
1523 /*
1524 * We could only have been turning quotas off.
1525 * We aren't in very good shape actually because
1526 * the incore structures are convinced that quotas are
1527 * off, but the on disk superblock doesn't know that !
1528 */
1529 ASSERT(!(XFS_IS_QUOTA_ON(mp)));
1530 xfs_alert(mp, "%s: Superblock update failed!",
1531 __func__);
1532 }
1533 }
1534
1535 if (error) {
1536 xfs_warn(mp, "Failed to initialize disk quotas.");
1537 return;
1538 }
1539}
1540
1541/*
1542 * This is called after the superblock has been read in and we're ready to
1543 * iget the quota inodes.
1544 */
1545STATIC int
1546xfs_qm_init_quotainos(
1547 xfs_mount_t *mp)
1548{
1549 struct xfs_inode *uip = NULL;
1550 struct xfs_inode *gip = NULL;
1551 struct xfs_inode *pip = NULL;
1552 int error;
1553 uint flags = 0;
1554
1555 ASSERT(mp->m_quotainfo);
1556
1557 /*
1558 * Get the uquota and gquota inodes
1559 */
1560 if (xfs_has_quota(mp)) {
1561 if (XFS_IS_UQUOTA_ON(mp) &&
1562 mp->m_sb.sb_uquotino != NULLFSINO) {
1563 ASSERT(mp->m_sb.sb_uquotino > 0);
1564 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1565 0, 0, &uip);
1566 if (error)
1567 return error;
1568 }
1569 if (XFS_IS_GQUOTA_ON(mp) &&
1570 mp->m_sb.sb_gquotino != NULLFSINO) {
1571 ASSERT(mp->m_sb.sb_gquotino > 0);
1572 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1573 0, 0, &gip);
1574 if (error)
1575 goto error_rele;
1576 }
1577 if (XFS_IS_PQUOTA_ON(mp) &&
1578 mp->m_sb.sb_pquotino != NULLFSINO) {
1579 ASSERT(mp->m_sb.sb_pquotino > 0);
1580 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1581 0, 0, &pip);
1582 if (error)
1583 goto error_rele;
1584 }
1585 } else {
1586 flags |= XFS_QMOPT_SBVERSION;
1587 }
1588
1589 /*
1590 * Create the three inodes, if they don't exist already. The changes
1591 * made above will get added to a transaction and logged in one of
1592 * the qino_alloc calls below. If the device is readonly,
1593 * temporarily switch to read-write to do this.
1594 */
1595 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1596 error = xfs_qm_qino_alloc(mp, &uip,
1597 flags | XFS_QMOPT_UQUOTA);
1598 if (error)
1599 goto error_rele;
1600
1601 flags &= ~XFS_QMOPT_SBVERSION;
1602 }
1603 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1604 error = xfs_qm_qino_alloc(mp, &gip,
1605 flags | XFS_QMOPT_GQUOTA);
1606 if (error)
1607 goto error_rele;
1608
1609 flags &= ~XFS_QMOPT_SBVERSION;
1610 }
1611 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1612 error = xfs_qm_qino_alloc(mp, &pip,
1613 flags | XFS_QMOPT_PQUOTA);
1614 if (error)
1615 goto error_rele;
1616 }
1617
1618 mp->m_quotainfo->qi_uquotaip = uip;
1619 mp->m_quotainfo->qi_gquotaip = gip;
1620 mp->m_quotainfo->qi_pquotaip = pip;
1621
1622 return 0;
1623
1624error_rele:
1625 if (uip)
1626 xfs_irele(uip);
1627 if (gip)
1628 xfs_irele(gip);
1629 if (pip)
1630 xfs_irele(pip);
1631 return error;
1632}
1633
1634STATIC void
1635xfs_qm_destroy_quotainos(
1636 struct xfs_quotainfo *qi)
1637{
1638 if (qi->qi_uquotaip) {
1639 xfs_irele(qi->qi_uquotaip);
1640 qi->qi_uquotaip = NULL; /* paranoia */
1641 }
1642 if (qi->qi_gquotaip) {
1643 xfs_irele(qi->qi_gquotaip);
1644 qi->qi_gquotaip = NULL;
1645 }
1646 if (qi->qi_pquotaip) {
1647 xfs_irele(qi->qi_pquotaip);
1648 qi->qi_pquotaip = NULL;
1649 }
1650}
1651
1652STATIC void
1653xfs_qm_dqfree_one(
1654 struct xfs_dquot *dqp)
1655{
1656 struct xfs_mount *mp = dqp->q_mount;
1657 struct xfs_quotainfo *qi = mp->m_quotainfo;
1658
1659 mutex_lock(&qi->qi_tree_lock);
1660 radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id);
1661
1662 qi->qi_dquots--;
1663 mutex_unlock(&qi->qi_tree_lock);
1664
1665 xfs_qm_dqdestroy(dqp);
1666}
1667
1668/* --------------- utility functions for vnodeops ---------------- */
1669
1670
1671/*
1672 * Given an inode, a uid, gid and prid make sure that we have
1673 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1674 * quotas by creating this file.
1675 * This also attaches dquot(s) to the given inode after locking it,
1676 * and returns the dquots corresponding to the uid and/or gid.
1677 *
1678 * in : inode (unlocked)
1679 * out : udquot, gdquot with references taken and unlocked
1680 */
1681int
1682xfs_qm_vop_dqalloc(
1683 struct xfs_inode *ip,
1684 kuid_t uid,
1685 kgid_t gid,
1686 prid_t prid,
1687 uint flags,
1688 struct xfs_dquot **O_udqpp,
1689 struct xfs_dquot **O_gdqpp,
1690 struct xfs_dquot **O_pdqpp)
1691{
1692 struct xfs_mount *mp = ip->i_mount;
1693 struct inode *inode = VFS_I(ip);
1694 struct user_namespace *user_ns = inode->i_sb->s_user_ns;
1695 struct xfs_dquot *uq = NULL;
1696 struct xfs_dquot *gq = NULL;
1697 struct xfs_dquot *pq = NULL;
1698 int error;
1699 uint lockflags;
1700
1701 if (!XFS_IS_QUOTA_ON(mp))
1702 return 0;
1703
1704 lockflags = XFS_ILOCK_EXCL;
1705 xfs_ilock(ip, lockflags);
1706
1707 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1708 gid = inode->i_gid;
1709
1710 /*
1711 * Attach the dquot(s) to this inode, doing a dquot allocation
1712 * if necessary. The dquot(s) will not be locked.
1713 */
1714 if (XFS_NOT_DQATTACHED(mp, ip)) {
1715 error = xfs_qm_dqattach_locked(ip, true);
1716 if (error) {
1717 xfs_iunlock(ip, lockflags);
1718 return error;
1719 }
1720 }
1721
1722 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1723 ASSERT(O_udqpp);
1724 if (!uid_eq(inode->i_uid, uid)) {
1725 /*
1726 * What we need is the dquot that has this uid, and
1727 * if we send the inode to dqget, the uid of the inode
1728 * takes priority over what's sent in the uid argument.
1729 * We must unlock inode here before calling dqget if
1730 * we're not sending the inode, because otherwise
1731 * we'll deadlock by doing trans_reserve while
1732 * holding ilock.
1733 */
1734 xfs_iunlock(ip, lockflags);
1735 error = xfs_qm_dqget(mp, from_kuid(user_ns, uid),
1736 XFS_DQTYPE_USER, true, &uq);
1737 if (error) {
1738 ASSERT(error != -ENOENT);
1739 return error;
1740 }
1741 /*
1742 * Get the ilock in the right order.
1743 */
1744 xfs_dqunlock(uq);
1745 lockflags = XFS_ILOCK_SHARED;
1746 xfs_ilock(ip, lockflags);
1747 } else {
1748 /*
1749 * Take an extra reference, because we'll return
1750 * this to caller
1751 */
1752 ASSERT(ip->i_udquot);
1753 uq = xfs_qm_dqhold(ip->i_udquot);
1754 }
1755 }
1756 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1757 ASSERT(O_gdqpp);
1758 if (!gid_eq(inode->i_gid, gid)) {
1759 xfs_iunlock(ip, lockflags);
1760 error = xfs_qm_dqget(mp, from_kgid(user_ns, gid),
1761 XFS_DQTYPE_GROUP, true, &gq);
1762 if (error) {
1763 ASSERT(error != -ENOENT);
1764 goto error_rele;
1765 }
1766 xfs_dqunlock(gq);
1767 lockflags = XFS_ILOCK_SHARED;
1768 xfs_ilock(ip, lockflags);
1769 } else {
1770 ASSERT(ip->i_gdquot);
1771 gq = xfs_qm_dqhold(ip->i_gdquot);
1772 }
1773 }
1774 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1775 ASSERT(O_pdqpp);
1776 if (ip->i_projid != prid) {
1777 xfs_iunlock(ip, lockflags);
1778 error = xfs_qm_dqget(mp, prid,
1779 XFS_DQTYPE_PROJ, true, &pq);
1780 if (error) {
1781 ASSERT(error != -ENOENT);
1782 goto error_rele;
1783 }
1784 xfs_dqunlock(pq);
1785 lockflags = XFS_ILOCK_SHARED;
1786 xfs_ilock(ip, lockflags);
1787 } else {
1788 ASSERT(ip->i_pdquot);
1789 pq = xfs_qm_dqhold(ip->i_pdquot);
1790 }
1791 }
1792 trace_xfs_dquot_dqalloc(ip);
1793
1794 xfs_iunlock(ip, lockflags);
1795 if (O_udqpp)
1796 *O_udqpp = uq;
1797 else
1798 xfs_qm_dqrele(uq);
1799 if (O_gdqpp)
1800 *O_gdqpp = gq;
1801 else
1802 xfs_qm_dqrele(gq);
1803 if (O_pdqpp)
1804 *O_pdqpp = pq;
1805 else
1806 xfs_qm_dqrele(pq);
1807 return 0;
1808
1809error_rele:
1810 xfs_qm_dqrele(gq);
1811 xfs_qm_dqrele(uq);
1812 return error;
1813}
1814
1815/*
1816 * Actually transfer ownership, and do dquot modifications.
1817 * These were already reserved.
1818 */
1819struct xfs_dquot *
1820xfs_qm_vop_chown(
1821 struct xfs_trans *tp,
1822 struct xfs_inode *ip,
1823 struct xfs_dquot **IO_olddq,
1824 struct xfs_dquot *newdq)
1825{
1826 struct xfs_dquot *prevdq;
1827 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1828 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1829
1830
1831 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1832 ASSERT(XFS_IS_QUOTA_ON(ip->i_mount));
1833
1834 /* old dquot */
1835 prevdq = *IO_olddq;
1836 ASSERT(prevdq);
1837 ASSERT(prevdq != newdq);
1838
1839 xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks));
1840 xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1841
1842 /* the sparkling new dquot */
1843 xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks);
1844 xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1845
1846 /*
1847 * Back when we made quota reservations for the chown, we reserved the
1848 * ondisk blocks + delalloc blocks with the new dquot. Now that we've
1849 * switched the dquots, decrease the new dquot's block reservation
1850 * (having already bumped up the real counter) so that we don't have
1851 * any reservation to give back when we commit.
1852 */
1853 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_RES_BLKS,
1854 -ip->i_delayed_blks);
1855
1856 /*
1857 * Give the incore reservation for delalloc blocks back to the old
1858 * dquot. We don't normally handle delalloc quota reservations
1859 * transactionally, so just lock the dquot and subtract from the
1860 * reservation. Dirty the transaction because it's too late to turn
1861 * back now.
1862 */
1863 tp->t_flags |= XFS_TRANS_DIRTY;
1864 xfs_dqlock(prevdq);
1865 ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks);
1866 prevdq->q_blk.reserved -= ip->i_delayed_blks;
1867 xfs_dqunlock(prevdq);
1868
1869 /*
1870 * Take an extra reference, because the inode is going to keep
1871 * this dquot pointer even after the trans_commit.
1872 */
1873 *IO_olddq = xfs_qm_dqhold(newdq);
1874
1875 return prevdq;
1876}
1877
1878int
1879xfs_qm_vop_rename_dqattach(
1880 struct xfs_inode **i_tab)
1881{
1882 struct xfs_mount *mp = i_tab[0]->i_mount;
1883 int i;
1884
1885 if (!XFS_IS_QUOTA_ON(mp))
1886 return 0;
1887
1888 for (i = 0; (i < 4 && i_tab[i]); i++) {
1889 struct xfs_inode *ip = i_tab[i];
1890 int error;
1891
1892 /*
1893 * Watch out for duplicate entries in the table.
1894 */
1895 if (i == 0 || ip != i_tab[i-1]) {
1896 if (XFS_NOT_DQATTACHED(mp, ip)) {
1897 error = xfs_qm_dqattach(ip);
1898 if (error)
1899 return error;
1900 }
1901 }
1902 }
1903 return 0;
1904}
1905
1906void
1907xfs_qm_vop_create_dqattach(
1908 struct xfs_trans *tp,
1909 struct xfs_inode *ip,
1910 struct xfs_dquot *udqp,
1911 struct xfs_dquot *gdqp,
1912 struct xfs_dquot *pdqp)
1913{
1914 struct xfs_mount *mp = tp->t_mountp;
1915
1916 if (!XFS_IS_QUOTA_ON(mp))
1917 return;
1918
1919 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1920
1921 if (udqp && XFS_IS_UQUOTA_ON(mp)) {
1922 ASSERT(ip->i_udquot == NULL);
1923 ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
1924
1925 ip->i_udquot = xfs_qm_dqhold(udqp);
1926 }
1927 if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
1928 ASSERT(ip->i_gdquot == NULL);
1929 ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
1930
1931 ip->i_gdquot = xfs_qm_dqhold(gdqp);
1932 }
1933 if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
1934 ASSERT(ip->i_pdquot == NULL);
1935 ASSERT(ip->i_projid == pdqp->q_id);
1936
1937 ip->i_pdquot = xfs_qm_dqhold(pdqp);
1938 }
1939
1940 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
1941}
1942
1943/* Decide if this inode's dquot is near an enforcement boundary. */
1944bool
1945xfs_inode_near_dquot_enforcement(
1946 struct xfs_inode *ip,
1947 xfs_dqtype_t type)
1948{
1949 struct xfs_dquot *dqp;
1950 int64_t freesp;
1951
1952 /* We only care for quotas that are enabled and enforced. */
1953 dqp = xfs_inode_dquot(ip, type);
1954 if (!dqp || !xfs_dquot_is_enforced(dqp))
1955 return false;
1956
1957 if (xfs_dquot_res_over_limits(&dqp->q_ino) ||
1958 xfs_dquot_res_over_limits(&dqp->q_rtb))
1959 return true;
1960
1961 /* For space on the data device, check the various thresholds. */
1962 if (!dqp->q_prealloc_hi_wmark)
1963 return false;
1964
1965 if (dqp->q_blk.reserved < dqp->q_prealloc_lo_wmark)
1966 return false;
1967
1968 if (dqp->q_blk.reserved >= dqp->q_prealloc_hi_wmark)
1969 return true;
1970
1971 freesp = dqp->q_prealloc_hi_wmark - dqp->q_blk.reserved;
1972 if (freesp < dqp->q_low_space[XFS_QLOWSP_5_PCNT])
1973 return true;
1974
1975 return false;
1976}