Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <linux/blkdev.h>
3
4#include "../dm-core.h"
5#include "pcache_internal.h"
6#include "cache_dev.h"
7#include "backing_dev.h"
8#include "cache.h"
9#include "dm_pcache.h"
10
11static struct kmem_cache *backing_req_cache;
12static struct kmem_cache *backing_bvec_cache;
13
14static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
15{
16 mempool_exit(&backing_dev->req_pool);
17 mempool_exit(&backing_dev->bvec_pool);
18}
19
20static void req_submit_fn(struct work_struct *work);
21static void req_complete_fn(struct work_struct *work);
22static int backing_dev_init(struct dm_pcache *pcache)
23{
24 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
25 int ret;
26
27 ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
28 if (ret)
29 goto err;
30
31 ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
32 if (ret)
33 goto req_pool_exit;
34
35 INIT_LIST_HEAD(&backing_dev->submit_list);
36 INIT_LIST_HEAD(&backing_dev->complete_list);
37 spin_lock_init(&backing_dev->submit_lock);
38 spin_lock_init(&backing_dev->complete_lock);
39 INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
40 INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
41 atomic_set(&backing_dev->inflight_reqs, 0);
42 init_waitqueue_head(&backing_dev->inflight_wq);
43
44 return 0;
45
46req_pool_exit:
47 mempool_exit(&backing_dev->req_pool);
48err:
49 return ret;
50}
51
52int backing_dev_start(struct dm_pcache *pcache)
53{
54 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
55 int ret;
56
57 ret = backing_dev_init(pcache);
58 if (ret)
59 return ret;
60
61 backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
62
63 return 0;
64}
65
66void backing_dev_stop(struct dm_pcache *pcache)
67{
68 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
69
70 /*
71 * There should not be any new request comming, just wait
72 * inflight requests done.
73 */
74 wait_event(backing_dev->inflight_wq,
75 atomic_read(&backing_dev->inflight_reqs) == 0);
76
77 flush_work(&backing_dev->req_submit_work);
78 flush_work(&backing_dev->req_complete_work);
79
80 backing_dev_exit(backing_dev);
81}
82
83/* pcache_backing_dev_req functions */
84void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
85{
86 struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
87
88 if (backing_req->end_req)
89 backing_req->end_req(backing_req, backing_req->ret);
90
91 switch (backing_req->type) {
92 case BACKING_DEV_REQ_TYPE_REQ:
93 if (backing_req->req.upper_req)
94 pcache_req_put(backing_req->req.upper_req, backing_req->ret);
95 break;
96 case BACKING_DEV_REQ_TYPE_KMEM:
97 if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
98 mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
99 break;
100 default:
101 BUG();
102 }
103
104 mempool_free(backing_req, &backing_dev->req_pool);
105
106 if (atomic_dec_and_test(&backing_dev->inflight_reqs))
107 wake_up(&backing_dev->inflight_wq);
108}
109
110static void req_complete_fn(struct work_struct *work)
111{
112 struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
113 struct pcache_backing_dev_req *backing_req;
114 LIST_HEAD(tmp_list);
115
116 spin_lock_irq(&backing_dev->complete_lock);
117 list_splice_init(&backing_dev->complete_list, &tmp_list);
118 spin_unlock_irq(&backing_dev->complete_lock);
119
120 while (!list_empty(&tmp_list)) {
121 backing_req = list_first_entry(&tmp_list,
122 struct pcache_backing_dev_req, node);
123 list_del_init(&backing_req->node);
124 backing_dev_req_end(backing_req);
125 }
126}
127
128static void backing_dev_bio_end(struct bio *bio)
129{
130 struct pcache_backing_dev_req *backing_req = bio->bi_private;
131 struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
132 unsigned long flags;
133
134 backing_req->ret = blk_status_to_errno(bio->bi_status);
135
136 spin_lock_irqsave(&backing_dev->complete_lock, flags);
137 list_move_tail(&backing_req->node, &backing_dev->complete_list);
138 queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
139 spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
140}
141
142static void req_submit_fn(struct work_struct *work)
143{
144 struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
145 struct pcache_backing_dev_req *backing_req;
146 LIST_HEAD(tmp_list);
147
148 spin_lock(&backing_dev->submit_lock);
149 list_splice_init(&backing_dev->submit_list, &tmp_list);
150 spin_unlock(&backing_dev->submit_lock);
151
152 while (!list_empty(&tmp_list)) {
153 backing_req = list_first_entry(&tmp_list,
154 struct pcache_backing_dev_req, node);
155 list_del_init(&backing_req->node);
156 submit_bio_noacct(&backing_req->bio);
157 }
158}
159
160void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
161{
162 struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
163
164 if (direct) {
165 submit_bio_noacct(&backing_req->bio);
166 return;
167 }
168
169 spin_lock(&backing_dev->submit_lock);
170 list_add_tail(&backing_req->node, &backing_dev->submit_list);
171 queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
172 spin_unlock(&backing_dev->submit_lock);
173}
174
175static void bio_map(struct bio *bio, void *base, size_t size)
176{
177 struct page *page;
178 unsigned int offset;
179 unsigned int len;
180
181 if (!is_vmalloc_addr(base)) {
182 page = virt_to_page(base);
183 offset = offset_in_page(base);
184
185 BUG_ON(!bio_add_page(bio, page, size, offset));
186 return;
187 }
188
189 flush_kernel_vmap_range(base, size);
190 while (size) {
191 page = vmalloc_to_page(base);
192 offset = offset_in_page(base);
193 len = min_t(size_t, PAGE_SIZE - offset, size);
194
195 BUG_ON(!bio_add_page(bio, page, len, offset));
196 size -= len;
197 base += len;
198 }
199}
200
201static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
202 struct pcache_backing_dev_req_opts *opts)
203{
204 struct pcache_request *pcache_req = opts->req.upper_req;
205 struct pcache_backing_dev_req *backing_req;
206 struct bio *orig = pcache_req->bio;
207
208 backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
209 if (!backing_req)
210 return NULL;
211
212 memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
213
214 bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
215
216 backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
217 backing_req->backing_dev = backing_dev;
218 atomic_inc(&backing_dev->inflight_reqs);
219
220 return backing_req;
221}
222
223static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
224 struct pcache_backing_dev_req_opts *opts)
225{
226 struct pcache_backing_dev_req *backing_req;
227 u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
228
229 backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
230 if (!backing_req)
231 return NULL;
232
233 memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
234
235 if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
236 backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
237 if (!backing_req->kmem.bvecs)
238 goto free_backing_req;
239 } else {
240 backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
241 }
242
243 backing_req->kmem.n_vecs = n_vecs;
244 backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
245 backing_req->backing_dev = backing_dev;
246 atomic_inc(&backing_dev->inflight_reqs);
247
248 return backing_req;
249
250free_backing_req:
251 mempool_free(backing_req, &backing_dev->req_pool);
252 return NULL;
253}
254
255struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
256 struct pcache_backing_dev_req_opts *opts)
257{
258 if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
259 return req_type_req_alloc(backing_dev, opts);
260
261 if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
262 return kmem_type_req_alloc(backing_dev, opts);
263
264 BUG();
265}
266
267static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
268 struct pcache_backing_dev_req_opts *opts)
269{
270 struct pcache_request *pcache_req = opts->req.upper_req;
271 struct bio *clone;
272 u32 off = opts->req.req_off;
273 u32 len = opts->req.len;
274
275 clone = &backing_req->bio;
276 BUG_ON(off & SECTOR_MASK);
277 BUG_ON(len & SECTOR_MASK);
278 bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
279
280 clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
281 clone->bi_private = backing_req;
282 clone->bi_end_io = backing_dev_bio_end;
283
284 INIT_LIST_HEAD(&backing_req->node);
285 backing_req->end_req = opts->end_fn;
286
287 pcache_req_get(pcache_req);
288 backing_req->req.upper_req = pcache_req;
289 backing_req->req.bio_off = off;
290}
291
292static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
293 struct pcache_backing_dev_req_opts *opts)
294{
295 struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
296 struct bio *backing_bio;
297
298 bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
299 backing_req->kmem.n_vecs, opts->kmem.opf);
300
301 backing_bio = &backing_req->bio;
302 bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
303
304 backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
305 backing_bio->bi_private = backing_req;
306 backing_bio->bi_end_io = backing_dev_bio_end;
307
308 INIT_LIST_HEAD(&backing_req->node);
309 backing_req->end_req = opts->end_fn;
310 backing_req->priv_data = opts->priv_data;
311}
312
313void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
314 struct pcache_backing_dev_req_opts *opts)
315{
316 if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
317 return req_type_req_init(backing_req, opts);
318
319 if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
320 return kmem_type_req_init(backing_req, opts);
321
322 BUG();
323}
324
325struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
326 struct pcache_backing_dev_req_opts *opts)
327{
328 struct pcache_backing_dev_req *backing_req;
329
330 backing_req = backing_dev_req_alloc(backing_dev, opts);
331 if (!backing_req)
332 return NULL;
333
334 backing_dev_req_init(backing_req, opts);
335
336 return backing_req;
337}
338
339void backing_dev_flush(struct pcache_backing_dev *backing_dev)
340{
341 blkdev_issue_flush(backing_dev->dm_dev->bdev);
342}
343
344int pcache_backing_init(void)
345{
346 u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
347 int ret;
348
349 backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
350 if (!backing_req_cache) {
351 ret = -ENOMEM;
352 goto err;
353 }
354
355 backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
356 max_bvecs * sizeof(struct bio_vec),
357 0, 0, NULL);
358 if (!backing_bvec_cache) {
359 ret = -ENOMEM;
360 goto destroy_req_cache;
361 }
362
363 return 0;
364destroy_req_cache:
365 kmem_cache_destroy(backing_req_cache);
366err:
367 return ret;
368}
369
370void pcache_backing_exit(void)
371{
372 kmem_cache_destroy(backing_bvec_cache);
373 kmem_cache_destroy(backing_req_cache);
374}