Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 87 lines 3.4 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2022-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6#ifndef XFS_DRAIN_H_ 7#define XFS_DRAIN_H_ 8 9struct xfs_group; 10struct xfs_perag; 11 12#ifdef CONFIG_XFS_DRAIN_INTENTS 13/* 14 * Passive drain mechanism. This data structure tracks a count of some items 15 * and contains a waitqueue for callers who would like to wake up when the 16 * count hits zero. 17 */ 18struct xfs_defer_drain { 19 /* Number of items pending in some part of the filesystem. */ 20 atomic_t dr_count; 21 22 /* Queue to wait for dri_count to go to zero */ 23 struct wait_queue_head dr_waiters; 24}; 25 26void xfs_defer_drain_init(struct xfs_defer_drain *dr); 27void xfs_defer_drain_free(struct xfs_defer_drain *dr); 28 29void xfs_defer_drain_wait_disable(void); 30void xfs_defer_drain_wait_enable(void); 31 32/* 33 * Deferred Work Intent Drains 34 * =========================== 35 * 36 * When a writer thread executes a chain of log intent items, the AG header 37 * buffer locks will cycle during a transaction roll to get from one intent 38 * item to the next in a chain. Although scrub takes all AG header buffer 39 * locks, this isn't sufficient to guard against scrub checking an AG while 40 * that writer thread is in the middle of finishing a chain because there's no 41 * higher level locking primitive guarding allocation groups. 42 * 43 * When there's a collision, cross-referencing between data structures (e.g. 44 * rmapbt and refcountbt) yields false corruption events; if repair is running, 45 * this results in incorrect repairs, which is catastrophic. 46 * 47 * The solution is to the perag structure the count of active intents and make 48 * scrub wait until it has both AG header buffer locks and the intent counter 49 * reaches zero. It is therefore critical that deferred work threads hold the 50 * AGI or AGF buffers when decrementing the intent counter. 51 * 52 * Given a list of deferred work items, the deferred work manager will complete 53 * a work item and all the sub-items that the parent item creates before moving 54 * on to the next work item in the list. This is also true for all levels of 55 * sub-items. Writer threads are permitted to queue multiple work items 56 * targetting the same AG, so a deferred work item (such as a BUI) that creates 57 * sub-items (such as RUIs) must bump the intent counter and maintain it until 58 * the sub-items can themselves bump the intent counter. 59 * 60 * Therefore, the intent count tracks entire lifetimes of deferred work items. 61 * All functions that create work items must increment the intent counter as 62 * soon as the item is added to the transaction and cannot drop the counter 63 * until the item is finished or cancelled. 64 * 65 * The same principles apply to realtime groups because the rt metadata inode 66 * ILOCKs are not held across transaction rolls. 67 */ 68struct xfs_group *xfs_group_intent_get(struct xfs_mount *mp, 69 xfs_fsblock_t fsbno, enum xfs_group_type type); 70void xfs_group_intent_put(struct xfs_group *rtg); 71 72int xfs_group_intent_drain(struct xfs_group *xg); 73bool xfs_group_intent_busy(struct xfs_group *xg); 74 75#else 76struct xfs_defer_drain { /* empty */ }; 77 78#define xfs_defer_drain_free(dr) ((void)0) 79#define xfs_defer_drain_init(dr) ((void)0) 80 81#define xfs_group_intent_get(_mp, _fsbno, _type) \ 82 xfs_group_get_by_fsb((_mp), (_fsbno), (_type)) 83#define xfs_group_intent_put(xg) xfs_group_put(xg) 84 85#endif /* CONFIG_XFS_DRAIN_INTENTS */ 86 87#endif /* XFS_DRAIN_H_ */