Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/drivers/staging/erofs/utils.c
4 *
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13
14#include "internal.h"
15#include <linux/pagevec.h>
16
17struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
18{
19 struct page *page;
20
21 if (!list_empty(pool)) {
22 page = lru_to_page(pool);
23 list_del(&page->lru);
24 } else {
25 page = alloc_pages(gfp | __GFP_NOFAIL, 0);
26
27 BUG_ON(page == NULL);
28 BUG_ON(page->mapping != NULL);
29 }
30 return page;
31}
32
33/* global shrink count (for all mounted EROFS instances) */
34static atomic_long_t erofs_global_shrink_cnt;
35
36#ifdef CONFIG_EROFS_FS_ZIP
37
38struct erofs_workgroup *erofs_find_workgroup(
39 struct super_block *sb, pgoff_t index, bool *tag)
40{
41 struct erofs_sb_info *sbi = EROFS_SB(sb);
42 struct erofs_workgroup *grp;
43 int oldcount;
44
45repeat:
46 rcu_read_lock();
47 grp = radix_tree_lookup(&sbi->workstn_tree, index);
48 if (grp != NULL) {
49 *tag = xa_pointer_tag(grp);
50 grp = xa_untag_pointer(grp);
51
52 if (erofs_workgroup_get(grp, &oldcount)) {
53 /* prefer to relax rcu read side */
54 rcu_read_unlock();
55 goto repeat;
56 }
57
58 /* decrease refcount added by erofs_workgroup_put */
59 if (unlikely(oldcount == 1))
60 atomic_long_dec(&erofs_global_shrink_cnt);
61 BUG_ON(index != grp->index);
62 }
63 rcu_read_unlock();
64 return grp;
65}
66
67int erofs_register_workgroup(struct super_block *sb,
68 struct erofs_workgroup *grp,
69 bool tag)
70{
71 struct erofs_sb_info *sbi;
72 int err;
73
74 /* grp->refcount should not < 1 */
75 BUG_ON(!atomic_read(&grp->refcount));
76
77 err = radix_tree_preload(GFP_NOFS);
78 if (err)
79 return err;
80
81 sbi = EROFS_SB(sb);
82 erofs_workstn_lock(sbi);
83
84 grp = xa_tag_pointer(grp, tag);
85
86 err = radix_tree_insert(&sbi->workstn_tree,
87 grp->index, grp);
88
89 if (!err) {
90 __erofs_workgroup_get(grp);
91 }
92
93 erofs_workstn_unlock(sbi);
94 radix_tree_preload_end();
95 return err;
96}
97
98extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
99
100int erofs_workgroup_put(struct erofs_workgroup *grp)
101{
102 int count = atomic_dec_return(&grp->refcount);
103
104 if (count == 1)
105 atomic_long_inc(&erofs_global_shrink_cnt);
106 else if (!count) {
107 atomic_long_dec(&erofs_global_shrink_cnt);
108 erofs_workgroup_free_rcu(grp);
109 }
110 return count;
111}
112
113unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
114 unsigned long nr_shrink,
115 bool cleanup)
116{
117 pgoff_t first_index = 0;
118 void *batch[PAGEVEC_SIZE];
119 unsigned int freed = 0;
120
121 int i, found;
122repeat:
123 erofs_workstn_lock(sbi);
124
125 found = radix_tree_gang_lookup(&sbi->workstn_tree,
126 batch, first_index, PAGEVEC_SIZE);
127
128 for (i = 0; i < found; ++i) {
129 int cnt;
130 struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
131
132 first_index = grp->index + 1;
133
134 cnt = atomic_read(&grp->refcount);
135 BUG_ON(cnt <= 0);
136
137 if (cleanup)
138 BUG_ON(cnt != 1);
139
140#ifndef EROFS_FS_HAS_MANAGED_CACHE
141 else if (cnt > 1)
142#else
143 if (!erofs_workgroup_try_to_freeze(grp, 1))
144#endif
145 continue;
146
147 if (xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
148 grp->index)) != grp) {
149#ifdef EROFS_FS_HAS_MANAGED_CACHE
150skip:
151 erofs_workgroup_unfreeze(grp, 1);
152#endif
153 continue;
154 }
155
156#ifdef EROFS_FS_HAS_MANAGED_CACHE
157 if (erofs_try_to_free_all_cached_pages(sbi, grp))
158 goto skip;
159
160 erofs_workgroup_unfreeze(grp, 1);
161#endif
162 /* (rarely) grabbed again when freeing */
163 erofs_workgroup_put(grp);
164
165 ++freed;
166 if (unlikely(!--nr_shrink))
167 break;
168 }
169 erofs_workstn_unlock(sbi);
170
171 if (i && nr_shrink)
172 goto repeat;
173 return freed;
174}
175
176#endif
177
178/* protected by 'erofs_sb_list_lock' */
179static unsigned int shrinker_run_no;
180
181/* protects the mounted 'erofs_sb_list' */
182static DEFINE_SPINLOCK(erofs_sb_list_lock);
183static LIST_HEAD(erofs_sb_list);
184
185void erofs_register_super(struct super_block *sb)
186{
187 struct erofs_sb_info *sbi = EROFS_SB(sb);
188
189 mutex_init(&sbi->umount_mutex);
190
191 spin_lock(&erofs_sb_list_lock);
192 list_add(&sbi->list, &erofs_sb_list);
193 spin_unlock(&erofs_sb_list_lock);
194}
195
196void erofs_unregister_super(struct super_block *sb)
197{
198 spin_lock(&erofs_sb_list_lock);
199 list_del(&EROFS_SB(sb)->list);
200 spin_unlock(&erofs_sb_list_lock);
201}
202
203unsigned long erofs_shrink_count(struct shrinker *shrink,
204 struct shrink_control *sc)
205{
206 return atomic_long_read(&erofs_global_shrink_cnt);
207}
208
209unsigned long erofs_shrink_scan(struct shrinker *shrink,
210 struct shrink_control *sc)
211{
212 struct erofs_sb_info *sbi;
213 struct list_head *p;
214
215 unsigned long nr = sc->nr_to_scan;
216 unsigned int run_no;
217 unsigned long freed = 0;
218
219 spin_lock(&erofs_sb_list_lock);
220 do
221 run_no = ++shrinker_run_no;
222 while (run_no == 0);
223
224 /* Iterate over all mounted superblocks and try to shrink them */
225 p = erofs_sb_list.next;
226 while (p != &erofs_sb_list) {
227 sbi = list_entry(p, struct erofs_sb_info, list);
228
229 /*
230 * We move the ones we do to the end of the list, so we stop
231 * when we see one we have already done.
232 */
233 if (sbi->shrinker_run_no == run_no)
234 break;
235
236 if (!mutex_trylock(&sbi->umount_mutex)) {
237 p = p->next;
238 continue;
239 }
240
241 spin_unlock(&erofs_sb_list_lock);
242 sbi->shrinker_run_no = run_no;
243
244#ifdef CONFIG_EROFS_FS_ZIP
245 freed += erofs_shrink_workstation(sbi, nr, false);
246#endif
247
248 spin_lock(&erofs_sb_list_lock);
249 /* Get the next list element before we move this one */
250 p = p->next;
251
252 /*
253 * Move this one to the end of the list to provide some
254 * fairness.
255 */
256 list_move_tail(&sbi->list, &erofs_sb_list);
257 mutex_unlock(&sbi->umount_mutex);
258
259 if (freed >= nr)
260 break;
261 }
262 spin_unlock(&erofs_sb_list_lock);
263 return freed;
264}
265