Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <linux/module.h>
3#include <linux/blkdev.h>
4#include <linux/bio.h>
5
6#include "../dm-core.h"
7#include "cache_dev.h"
8#include "backing_dev.h"
9#include "cache.h"
10#include "dm_pcache.h"
11
12void pcache_defer_reqs_kick(struct dm_pcache *pcache)
13{
14 struct pcache_cache *cache = &pcache->cache;
15
16 spin_lock(&cache->seg_map_lock);
17 if (!cache->cache_full)
18 queue_work(pcache->task_wq, &pcache->defered_req_work);
19 spin_unlock(&cache->seg_map_lock);
20}
21
22static void defer_req(struct pcache_request *pcache_req)
23{
24 struct dm_pcache *pcache = pcache_req->pcache;
25
26 BUG_ON(!list_empty(&pcache_req->list_node));
27
28 spin_lock(&pcache->defered_req_list_lock);
29 list_add(&pcache_req->list_node, &pcache->defered_req_list);
30 pcache_defer_reqs_kick(pcache);
31 spin_unlock(&pcache->defered_req_list_lock);
32}
33
34static void defered_req_fn(struct work_struct *work)
35{
36 struct dm_pcache *pcache = container_of(work, struct dm_pcache, defered_req_work);
37 struct pcache_request *pcache_req;
38 LIST_HEAD(tmp_list);
39 int ret;
40
41 if (pcache_is_stopping(pcache))
42 return;
43
44 spin_lock(&pcache->defered_req_list_lock);
45 list_splice_init(&pcache->defered_req_list, &tmp_list);
46 spin_unlock(&pcache->defered_req_list_lock);
47
48 while (!list_empty(&tmp_list)) {
49 pcache_req = list_first_entry(&tmp_list,
50 struct pcache_request, list_node);
51 list_del_init(&pcache_req->list_node);
52 pcache_req->ret = 0;
53 ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
54 if (ret == -EBUSY)
55 defer_req(pcache_req);
56 else
57 pcache_req_put(pcache_req, ret);
58 }
59}
60
61void pcache_req_get(struct pcache_request *pcache_req)
62{
63 kref_get(&pcache_req->ref);
64}
65
66static void end_req(struct kref *ref)
67{
68 struct pcache_request *pcache_req = container_of(ref, struct pcache_request, ref);
69 struct dm_pcache *pcache = pcache_req->pcache;
70 struct bio *bio = pcache_req->bio;
71 int ret = pcache_req->ret;
72
73 if (ret == -EBUSY) {
74 pcache_req_get(pcache_req);
75 defer_req(pcache_req);
76 } else {
77 bio->bi_status = errno_to_blk_status(ret);
78 bio_endio(bio);
79
80 if (atomic_dec_and_test(&pcache->inflight_reqs))
81 wake_up(&pcache->inflight_wq);
82 }
83}
84
85void pcache_req_put(struct pcache_request *pcache_req, int ret)
86{
87 /* Set the return status if it is not already set */
88 if (ret && !pcache_req->ret)
89 pcache_req->ret = ret;
90
91 kref_put(&pcache_req->ref, end_req);
92}
93
94static bool at_least_one_arg(struct dm_arg_set *as, char **error)
95{
96 if (!as->argc) {
97 *error = "Insufficient args";
98 return false;
99 }
100
101 return true;
102}
103
104static int parse_cache_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
105 char **error)
106{
107 int ret;
108
109 if (!at_least_one_arg(as, error))
110 return -EINVAL;
111 ret = dm_get_device(pcache->ti, dm_shift_arg(as),
112 BLK_OPEN_READ | BLK_OPEN_WRITE,
113 &pcache->cache_dev.dm_dev);
114 if (ret) {
115 *error = "Error opening cache device";
116 return ret;
117 }
118
119 return 0;
120}
121
122static int parse_backing_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
123 char **error)
124{
125 int ret;
126
127 if (!at_least_one_arg(as, error))
128 return -EINVAL;
129
130 ret = dm_get_device(pcache->ti, dm_shift_arg(as),
131 BLK_OPEN_READ | BLK_OPEN_WRITE,
132 &pcache->backing_dev.dm_dev);
133 if (ret) {
134 *error = "Error opening backing device";
135 return ret;
136 }
137
138 return 0;
139}
140
141static void pcache_init_opts(struct pcache_cache_options *opts)
142{
143 opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
144 opts->data_crc = false;
145}
146
147static int parse_cache_opts(struct dm_pcache *pcache, struct dm_arg_set *as,
148 char **error)
149{
150 struct pcache_cache_options *opts = &pcache->opts;
151 static const struct dm_arg _args[] = {
152 {0, 4, "Invalid number of cache option arguments"},
153 };
154 unsigned int argc;
155 const char *arg;
156 int ret;
157
158 pcache_init_opts(opts);
159 if (!as->argc)
160 return 0;
161
162 ret = dm_read_arg_group(_args, as, &argc, error);
163 if (ret)
164 return -EINVAL;
165
166 while (argc) {
167 arg = dm_shift_arg(as);
168 argc--;
169
170 if (!strcmp(arg, "cache_mode")) {
171 arg = dm_shift_arg(as);
172 if (!strcmp(arg, "writeback")) {
173 opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
174 } else {
175 *error = "Invalid cache mode parameter";
176 return -EINVAL;
177 }
178 argc--;
179 } else if (!strcmp(arg, "data_crc")) {
180 arg = dm_shift_arg(as);
181 if (!strcmp(arg, "true")) {
182 opts->data_crc = true;
183 } else if (!strcmp(arg, "false")) {
184 opts->data_crc = false;
185 } else {
186 *error = "Invalid data crc parameter";
187 return -EINVAL;
188 }
189 argc--;
190 } else {
191 *error = "Unrecognised cache option requested";
192 return -EINVAL;
193 }
194 }
195
196 return 0;
197}
198
199static int pcache_start(struct dm_pcache *pcache, char **error)
200{
201 int ret;
202
203 ret = cache_dev_start(pcache);
204 if (ret) {
205 *error = "Failed to start cache dev";
206 return ret;
207 }
208
209 ret = backing_dev_start(pcache);
210 if (ret) {
211 *error = "Failed to start backing dev";
212 goto stop_cache;
213 }
214
215 ret = pcache_cache_start(pcache);
216 if (ret) {
217 *error = "Failed to start pcache";
218 goto stop_backing;
219 }
220
221 return 0;
222stop_backing:
223 backing_dev_stop(pcache);
224stop_cache:
225 cache_dev_stop(pcache);
226
227 return ret;
228}
229
230static void pcache_destroy_args(struct dm_pcache *pcache)
231{
232 if (pcache->cache_dev.dm_dev)
233 dm_put_device(pcache->ti, pcache->cache_dev.dm_dev);
234 if (pcache->backing_dev.dm_dev)
235 dm_put_device(pcache->ti, pcache->backing_dev.dm_dev);
236}
237
238static int pcache_parse_args(struct dm_pcache *pcache, unsigned int argc, char **argv,
239 char **error)
240{
241 struct dm_arg_set as;
242 int ret;
243
244 as.argc = argc;
245 as.argv = argv;
246
247 /*
248 * Parse cache device
249 */
250 ret = parse_cache_dev(pcache, &as, error);
251 if (ret)
252 return ret;
253 /*
254 * Parse backing device
255 */
256 ret = parse_backing_dev(pcache, &as, error);
257 if (ret)
258 goto out;
259 /*
260 * Parse optional arguments
261 */
262 ret = parse_cache_opts(pcache, &as, error);
263 if (ret)
264 goto out;
265
266 return 0;
267out:
268 pcache_destroy_args(pcache);
269 return ret;
270}
271
272static int dm_pcache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
273{
274 struct mapped_device *md = ti->table->md;
275 struct dm_pcache *pcache;
276 int ret;
277
278 if (md->map) {
279 ti->error = "Don't support table loading for live md";
280 return -EOPNOTSUPP;
281 }
282
283 /* Allocate memory for the cache structure */
284 pcache = kzalloc(sizeof(struct dm_pcache), GFP_KERNEL);
285 if (!pcache)
286 return -ENOMEM;
287
288 pcache->task_wq = alloc_workqueue("pcache-%s-wq", WQ_UNBOUND | WQ_MEM_RECLAIM,
289 0, md->name);
290 if (!pcache->task_wq) {
291 ret = -ENOMEM;
292 goto free_pcache;
293 }
294
295 spin_lock_init(&pcache->defered_req_list_lock);
296 INIT_LIST_HEAD(&pcache->defered_req_list);
297 INIT_WORK(&pcache->defered_req_work, defered_req_fn);
298 pcache->ti = ti;
299
300 ret = pcache_parse_args(pcache, argc, argv, &ti->error);
301 if (ret)
302 goto destroy_wq;
303
304 ret = pcache_start(pcache, &ti->error);
305 if (ret)
306 goto destroy_args;
307
308 ti->num_flush_bios = 1;
309 ti->flush_supported = true;
310 ti->per_io_data_size = sizeof(struct pcache_request);
311 ti->private = pcache;
312 atomic_set(&pcache->inflight_reqs, 0);
313 atomic_set(&pcache->state, PCACHE_STATE_RUNNING);
314 init_waitqueue_head(&pcache->inflight_wq);
315
316 return 0;
317destroy_args:
318 pcache_destroy_args(pcache);
319destroy_wq:
320 destroy_workqueue(pcache->task_wq);
321free_pcache:
322 kfree(pcache);
323
324 return ret;
325}
326
327static void defer_req_stop(struct dm_pcache *pcache)
328{
329 struct pcache_request *pcache_req;
330 LIST_HEAD(tmp_list);
331
332 flush_work(&pcache->defered_req_work);
333
334 spin_lock(&pcache->defered_req_list_lock);
335 list_splice_init(&pcache->defered_req_list, &tmp_list);
336 spin_unlock(&pcache->defered_req_list_lock);
337
338 while (!list_empty(&tmp_list)) {
339 pcache_req = list_first_entry(&tmp_list,
340 struct pcache_request, list_node);
341 list_del_init(&pcache_req->list_node);
342 pcache_req_put(pcache_req, -EIO);
343 }
344}
345
346static void dm_pcache_dtr(struct dm_target *ti)
347{
348 struct dm_pcache *pcache;
349
350 pcache = ti->private;
351 atomic_set(&pcache->state, PCACHE_STATE_STOPPING);
352 defer_req_stop(pcache);
353
354 wait_event(pcache->inflight_wq,
355 atomic_read(&pcache->inflight_reqs) == 0);
356
357 pcache_cache_stop(pcache);
358 backing_dev_stop(pcache);
359 cache_dev_stop(pcache);
360
361 pcache_destroy_args(pcache);
362 drain_workqueue(pcache->task_wq);
363 destroy_workqueue(pcache->task_wq);
364
365 kfree(pcache);
366}
367
368static int dm_pcache_map_bio(struct dm_target *ti, struct bio *bio)
369{
370 struct pcache_request *pcache_req = dm_per_bio_data(bio, sizeof(struct pcache_request));
371 struct dm_pcache *pcache = ti->private;
372 int ret;
373
374 pcache_req->pcache = pcache;
375 kref_init(&pcache_req->ref);
376 pcache_req->ret = 0;
377 pcache_req->bio = bio;
378 pcache_req->off = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
379 pcache_req->data_len = bio->bi_iter.bi_size;
380 INIT_LIST_HEAD(&pcache_req->list_node);
381 atomic_inc(&pcache->inflight_reqs);
382
383 ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
384 if (ret == -EBUSY)
385 defer_req(pcache_req);
386 else
387 pcache_req_put(pcache_req, ret);
388
389 return DM_MAPIO_SUBMITTED;
390}
391
392static void dm_pcache_status(struct dm_target *ti, status_type_t type,
393 unsigned int status_flags, char *result,
394 unsigned int maxlen)
395{
396 struct dm_pcache *pcache = ti->private;
397 struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
398 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
399 struct pcache_cache *cache = &pcache->cache;
400 unsigned int sz = 0;
401
402 switch (type) {
403 case STATUSTYPE_INFO:
404 DMEMIT("%x %u %u %u %u %x %u:%u %u:%u %u:%u",
405 cache_dev->sb_flags,
406 cache_dev->seg_num,
407 cache->n_segs,
408 bitmap_weight(cache->seg_map, cache->n_segs),
409 pcache_cache_get_gc_percent(cache),
410 cache->cache_info.flags,
411 cache->key_head.cache_seg->cache_seg_id,
412 cache->key_head.seg_off,
413 cache->dirty_tail.cache_seg->cache_seg_id,
414 cache->dirty_tail.seg_off,
415 cache->key_tail.cache_seg->cache_seg_id,
416 cache->key_tail.seg_off);
417 break;
418 case STATUSTYPE_TABLE:
419 DMEMIT("%s %s 4 cache_mode writeback crc %s",
420 cache_dev->dm_dev->name,
421 backing_dev->dm_dev->name,
422 cache_data_crc_on(cache) ? "true" : "false");
423 break;
424 case STATUSTYPE_IMA:
425 *result = '\0';
426 break;
427 }
428}
429
430static int dm_pcache_message(struct dm_target *ti, unsigned int argc,
431 char **argv, char *result, unsigned int maxlen)
432{
433 struct dm_pcache *pcache = ti->private;
434 unsigned long val;
435
436 if (argc != 2)
437 goto err;
438
439 if (!strcasecmp(argv[0], "gc_percent")) {
440 if (kstrtoul(argv[1], 10, &val))
441 goto err;
442
443 return pcache_cache_set_gc_percent(&pcache->cache, val);
444 }
445err:
446 return -EINVAL;
447}
448
449static struct target_type dm_pcache_target = {
450 .name = "pcache",
451 .version = {0, 1, 0},
452 .module = THIS_MODULE,
453 .features = DM_TARGET_SINGLETON,
454 .ctr = dm_pcache_ctr,
455 .dtr = dm_pcache_dtr,
456 .map = dm_pcache_map_bio,
457 .status = dm_pcache_status,
458 .message = dm_pcache_message,
459};
460
461static int __init dm_pcache_init(void)
462{
463 int ret;
464
465 ret = pcache_backing_init();
466 if (ret)
467 goto err;
468
469 ret = pcache_cache_init();
470 if (ret)
471 goto backing_exit;
472
473 ret = dm_register_target(&dm_pcache_target);
474 if (ret)
475 goto cache_exit;
476 return 0;
477
478cache_exit:
479 pcache_cache_exit();
480backing_exit:
481 pcache_backing_exit();
482err:
483 return ret;
484}
485module_init(dm_pcache_init);
486
487static void __exit dm_pcache_exit(void)
488{
489 dm_unregister_target(&dm_pcache_target);
490 pcache_cache_exit();
491 pcache_backing_exit();
492}
493module_exit(dm_pcache_exit);
494
495MODULE_DESCRIPTION("dm-pcache Persistent Cache for block device");
496MODULE_AUTHOR("Dongsheng Yang <dongsheng.yang@linux.dev>");
497MODULE_LICENSE("GPL");