Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 Red Hat, Inc.
4 */
5
6#include "xfs.h"
7#include "xfs_shared.h"
8#include "xfs_format.h"
9#include "xfs_trans_resv.h"
10#include "xfs_mount.h"
11#include "xfs_error.h"
12#include "xfs_trace.h"
13#include "xfs_extent_busy.h"
14#include "xfs_group.h"
15
16/*
17 * Groups can have passive and active references.
18 *
19 * For passive references the code freeing a group is responsible for cleaning
20 * up objects that hold the passive references (e.g. cached buffers).
21 * Routines manipulating passive references are xfs_group_get, xfs_group_hold
22 * and xfs_group_put.
23 *
24 * Active references are for short term access to the group for walking trees or
25 * accessing state. If a group is being shrunk or offlined, the lookup will fail
26 * to find that group and return NULL instead.
27 * Routines manipulating active references are xfs_group_grab and
28 * xfs_group_rele.
29 */
30
31struct xfs_group *
32xfs_group_get(
33 struct xfs_mount *mp,
34 uint32_t index,
35 enum xfs_group_type type)
36{
37 struct xfs_group *xg;
38
39 rcu_read_lock();
40 xg = xa_load(&mp->m_groups[type].xa, index);
41 if (xg) {
42 trace_xfs_group_get(xg, _RET_IP_);
43 ASSERT(atomic_read(&xg->xg_ref) >= 0);
44 atomic_inc(&xg->xg_ref);
45 }
46 rcu_read_unlock();
47 return xg;
48}
49
50struct xfs_group *
51xfs_group_hold(
52 struct xfs_group *xg)
53{
54 ASSERT(atomic_read(&xg->xg_ref) > 0 ||
55 atomic_read(&xg->xg_active_ref) > 0);
56
57 trace_xfs_group_hold(xg, _RET_IP_);
58 atomic_inc(&xg->xg_ref);
59 return xg;
60}
61
62void
63xfs_group_put(
64 struct xfs_group *xg)
65{
66 trace_xfs_group_put(xg, _RET_IP_);
67
68 ASSERT(atomic_read(&xg->xg_ref) > 0);
69 atomic_dec(&xg->xg_ref);
70}
71
72struct xfs_group *
73xfs_group_grab(
74 struct xfs_mount *mp,
75 uint32_t index,
76 enum xfs_group_type type)
77{
78 struct xfs_group *xg;
79
80 rcu_read_lock();
81 xg = xa_load(&mp->m_groups[type].xa, index);
82 if (xg) {
83 trace_xfs_group_grab(xg, _RET_IP_);
84 if (!atomic_inc_not_zero(&xg->xg_active_ref))
85 xg = NULL;
86 }
87 rcu_read_unlock();
88 return xg;
89}
90
91/*
92 * Iterate to the next group. To start the iteration at @start_index, a %NULL
93 * @xg is passed, else the previous group returned from this function. The
94 * caller should break out of the loop when this returns %NULL. If the caller
95 * wants to break out of a loop that did not finish it needs to release the
96 * active reference to @xg using xfs_group_rele() itself.
97 */
98struct xfs_group *
99xfs_group_next_range(
100 struct xfs_mount *mp,
101 struct xfs_group *xg,
102 uint32_t start_index,
103 uint32_t end_index,
104 enum xfs_group_type type)
105{
106 uint32_t index = start_index;
107
108 if (xg) {
109 index = xg->xg_gno + 1;
110 xfs_group_rele(xg);
111 }
112 if (index > end_index)
113 return NULL;
114 return xfs_group_grab(mp, index, type);
115}
116
117/*
118 * Find the next group after @xg, or the first group if @xg is NULL.
119 */
120struct xfs_group *
121xfs_group_grab_next_mark(
122 struct xfs_mount *mp,
123 struct xfs_group *xg,
124 xa_mark_t mark,
125 enum xfs_group_type type)
126{
127 unsigned long index = 0;
128
129 if (xg) {
130 index = xg->xg_gno + 1;
131 xfs_group_rele(xg);
132 }
133
134 rcu_read_lock();
135 xg = xa_find(&mp->m_groups[type].xa, &index, ULONG_MAX, mark);
136 if (xg) {
137 trace_xfs_group_grab_next_tag(xg, _RET_IP_);
138 if (!atomic_inc_not_zero(&xg->xg_active_ref))
139 xg = NULL;
140 }
141 rcu_read_unlock();
142 return xg;
143}
144
145void
146xfs_group_rele(
147 struct xfs_group *xg)
148{
149 trace_xfs_group_rele(xg, _RET_IP_);
150 atomic_dec(&xg->xg_active_ref);
151}
152
153void
154xfs_group_free(
155 struct xfs_mount *mp,
156 uint32_t index,
157 enum xfs_group_type type,
158 void (*uninit)(struct xfs_group *xg))
159{
160 struct xfs_group *xg = xa_erase(&mp->m_groups[type].xa, index);
161
162 XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0);
163
164 xfs_defer_drain_free(&xg->xg_intents_drain);
165#ifdef __KERNEL__
166 if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
167 kfree(xg->xg_busy_extents);
168#endif
169
170 if (uninit)
171 uninit(xg);
172
173 /* drop the mount's active reference */
174 xfs_group_rele(xg);
175 XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) > 0);
176 XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) < 0);
177 kfree_rcu_mightsleep(xg);
178}
179
180int
181xfs_group_insert(
182 struct xfs_mount *mp,
183 struct xfs_group *xg,
184 uint32_t index,
185 enum xfs_group_type type)
186{
187 int error;
188
189 xg->xg_mount = mp;
190 xg->xg_gno = index;
191 xg->xg_type = type;
192
193#ifdef __KERNEL__
194 if (xfs_group_has_extent_busy(mp, type)) {
195 xg->xg_busy_extents = xfs_extent_busy_alloc();
196 if (!xg->xg_busy_extents)
197 return -ENOMEM;
198 }
199 spin_lock_init(&xg->xg_state_lock);
200 xfs_hooks_init(&xg->xg_rmap_update_hooks);
201#endif
202 xfs_defer_drain_init(&xg->xg_intents_drain);
203
204 /* Active ref owned by mount indicates group is online. */
205 atomic_set(&xg->xg_active_ref, 1);
206
207 error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL);
208 if (error) {
209 WARN_ON_ONCE(error == -EBUSY);
210 goto out_drain;
211 }
212
213 return 0;
214out_drain:
215 xfs_defer_drain_free(&xg->xg_intents_drain);
216#ifdef __KERNEL__
217 if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
218 kfree(xg->xg_busy_extents);
219#endif
220 return error;
221}
222
223struct xfs_group *
224xfs_group_get_by_fsb(
225 struct xfs_mount *mp,
226 xfs_fsblock_t fsbno,
227 enum xfs_group_type type)
228{
229 return xfs_group_get(mp, xfs_fsb_to_gno(mp, fsbno, type), type);
230}