Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12#include <linux/kthread.h>
13#include <linux/dm-io.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/dax.h>
16#include <linux/pfn_t.h>
17#include <linux/libnvdimm.h>
18
19#define DM_MSG_PREFIX "writecache"
20
21#define HIGH_WATERMARK 50
22#define LOW_WATERMARK 45
23#define MAX_WRITEBACK_JOBS 0
24#define ENDIO_LATENCY 16
25#define WRITEBACK_LATENCY 64
26#define AUTOCOMMIT_BLOCKS_SSD 65536
27#define AUTOCOMMIT_BLOCKS_PMEM 64
28#define AUTOCOMMIT_MSEC 1000
29
30#define BITMAP_GRANULARITY 65536
31#if BITMAP_GRANULARITY < PAGE_SIZE
32#undef BITMAP_GRANULARITY
33#define BITMAP_GRANULARITY PAGE_SIZE
34#endif
35
36#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
37#define DM_WRITECACHE_HAS_PMEM
38#endif
39
40#ifdef DM_WRITECACHE_HAS_PMEM
41#define pmem_assign(dest, src) \
42do { \
43 typeof(dest) uniq = (src); \
44 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
45} while (0)
46#else
47#define pmem_assign(dest, src) ((dest) = (src))
48#endif
49
50#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
51#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
52#endif
53
54#define MEMORY_SUPERBLOCK_MAGIC 0x23489321
55#define MEMORY_SUPERBLOCK_VERSION 1
56
57struct wc_memory_entry {
58 __le64 original_sector;
59 __le64 seq_count;
60};
61
62struct wc_memory_superblock {
63 union {
64 struct {
65 __le32 magic;
66 __le32 version;
67 __le32 block_size;
68 __le32 pad;
69 __le64 n_blocks;
70 __le64 seq_count;
71 };
72 __le64 padding[8];
73 };
74 struct wc_memory_entry entries[0];
75};
76
77struct wc_entry {
78 struct rb_node rb_node;
79 struct list_head lru;
80 unsigned short wc_list_contiguous;
81 bool write_in_progress
82#if BITS_PER_LONG == 64
83 :1
84#endif
85 ;
86 unsigned long index
87#if BITS_PER_LONG == 64
88 :47
89#endif
90 ;
91#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
92 uint64_t original_sector;
93 uint64_t seq_count;
94#endif
95};
96
97#ifdef DM_WRITECACHE_HAS_PMEM
98#define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
99#define WC_MODE_FUA(wc) ((wc)->writeback_fua)
100#else
101#define WC_MODE_PMEM(wc) false
102#define WC_MODE_FUA(wc) false
103#endif
104#define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
105
106struct dm_writecache {
107 struct mutex lock;
108 struct list_head lru;
109 union {
110 struct list_head freelist;
111 struct {
112 struct rb_root freetree;
113 struct wc_entry *current_free;
114 };
115 };
116 struct rb_root tree;
117
118 size_t freelist_size;
119 size_t writeback_size;
120 size_t freelist_high_watermark;
121 size_t freelist_low_watermark;
122
123 unsigned uncommitted_blocks;
124 unsigned autocommit_blocks;
125 unsigned max_writeback_jobs;
126
127 int error;
128
129 unsigned long autocommit_jiffies;
130 struct timer_list autocommit_timer;
131 struct wait_queue_head freelist_wait;
132
133 atomic_t bio_in_progress[2];
134 struct wait_queue_head bio_in_progress_wait[2];
135
136 struct dm_target *ti;
137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev;
139 sector_t start_sector;
140 void *memory_map;
141 uint64_t memory_map_size;
142 size_t metadata_sectors;
143 size_t n_blocks;
144 uint64_t seq_count;
145 void *block_start;
146 struct wc_entry *entries;
147 unsigned block_size;
148 unsigned char block_size_bits;
149
150 bool pmem_mode:1;
151 bool writeback_fua:1;
152
153 bool overwrote_committed:1;
154 bool memory_vmapped:1;
155
156 bool high_wm_percent_set:1;
157 bool low_wm_percent_set:1;
158 bool max_writeback_jobs_set:1;
159 bool autocommit_blocks_set:1;
160 bool autocommit_time_set:1;
161 bool writeback_fua_set:1;
162 bool flush_on_suspend:1;
163
164 unsigned writeback_all;
165 struct workqueue_struct *writeback_wq;
166 struct work_struct writeback_work;
167 struct work_struct flush_work;
168
169 struct dm_io_client *dm_io;
170
171 raw_spinlock_t endio_list_lock;
172 struct list_head endio_list;
173 struct task_struct *endio_thread;
174
175 struct task_struct *flush_thread;
176 struct bio_list flush_list;
177
178 struct dm_kcopyd_client *dm_kcopyd;
179 unsigned long *dirty_bitmap;
180 unsigned dirty_bitmap_size;
181
182 struct bio_set bio_set;
183 mempool_t copy_pool;
184};
185
186#define WB_LIST_INLINE 16
187
188struct writeback_struct {
189 struct list_head endio_entry;
190 struct dm_writecache *wc;
191 struct wc_entry **wc_list;
192 unsigned wc_list_n;
193 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
194 struct bio bio;
195};
196
197struct copy_struct {
198 struct list_head endio_entry;
199 struct dm_writecache *wc;
200 struct wc_entry *e;
201 unsigned n_entries;
202 int error;
203};
204
205DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
206 "A percentage of time allocated for data copying");
207
208static void wc_lock(struct dm_writecache *wc)
209{
210 mutex_lock(&wc->lock);
211}
212
213static void wc_unlock(struct dm_writecache *wc)
214{
215 mutex_unlock(&wc->lock);
216}
217
218#ifdef DM_WRITECACHE_HAS_PMEM
219static int persistent_memory_claim(struct dm_writecache *wc)
220{
221 int r;
222 loff_t s;
223 long p, da;
224 pfn_t pfn;
225 int id;
226 struct page **pages;
227
228 wc->memory_vmapped = false;
229
230 if (!wc->ssd_dev->dax_dev) {
231 r = -EOPNOTSUPP;
232 goto err1;
233 }
234 s = wc->memory_map_size;
235 p = s >> PAGE_SHIFT;
236 if (!p) {
237 r = -EINVAL;
238 goto err1;
239 }
240 if (p != s >> PAGE_SHIFT) {
241 r = -EOVERFLOW;
242 goto err1;
243 }
244
245 id = dax_read_lock();
246
247 da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
248 if (da < 0) {
249 wc->memory_map = NULL;
250 r = da;
251 goto err2;
252 }
253 if (!pfn_t_has_page(pfn)) {
254 wc->memory_map = NULL;
255 r = -EOPNOTSUPP;
256 goto err2;
257 }
258 if (da != p) {
259 long i;
260 wc->memory_map = NULL;
261 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
262 if (!pages) {
263 r = -ENOMEM;
264 goto err2;
265 }
266 i = 0;
267 do {
268 long daa;
269 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
270 NULL, &pfn);
271 if (daa <= 0) {
272 r = daa ? daa : -EINVAL;
273 goto err3;
274 }
275 if (!pfn_t_has_page(pfn)) {
276 r = -EOPNOTSUPP;
277 goto err3;
278 }
279 while (daa-- && i < p) {
280 pages[i++] = pfn_t_to_page(pfn);
281 pfn.val++;
282 }
283 } while (i < p);
284 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
285 if (!wc->memory_map) {
286 r = -ENOMEM;
287 goto err3;
288 }
289 kvfree(pages);
290 wc->memory_vmapped = true;
291 }
292
293 dax_read_unlock(id);
294
295 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
296 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
297
298 return 0;
299err3:
300 kvfree(pages);
301err2:
302 dax_read_unlock(id);
303err1:
304 return r;
305}
306#else
307static int persistent_memory_claim(struct dm_writecache *wc)
308{
309 BUG();
310}
311#endif
312
313static void persistent_memory_release(struct dm_writecache *wc)
314{
315 if (wc->memory_vmapped)
316 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
317}
318
319static struct page *persistent_memory_page(void *addr)
320{
321 if (is_vmalloc_addr(addr))
322 return vmalloc_to_page(addr);
323 else
324 return virt_to_page(addr);
325}
326
327static unsigned persistent_memory_page_offset(void *addr)
328{
329 return (unsigned long)addr & (PAGE_SIZE - 1);
330}
331
332static void persistent_memory_flush_cache(void *ptr, size_t size)
333{
334 if (is_vmalloc_addr(ptr))
335 flush_kernel_vmap_range(ptr, size);
336}
337
338static void persistent_memory_invalidate_cache(void *ptr, size_t size)
339{
340 if (is_vmalloc_addr(ptr))
341 invalidate_kernel_vmap_range(ptr, size);
342}
343
344static struct wc_memory_superblock *sb(struct dm_writecache *wc)
345{
346 return wc->memory_map;
347}
348
349static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
350{
351 return &sb(wc)->entries[e->index];
352}
353
354static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
355{
356 return (char *)wc->block_start + (e->index << wc->block_size_bits);
357}
358
359static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
360{
361 return wc->start_sector + wc->metadata_sectors +
362 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
363}
364
365static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
366{
367#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
368 return e->original_sector;
369#else
370 return le64_to_cpu(memory_entry(wc, e)->original_sector);
371#endif
372}
373
374static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
375{
376#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
377 return e->seq_count;
378#else
379 return le64_to_cpu(memory_entry(wc, e)->seq_count);
380#endif
381}
382
383static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
384{
385#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
386 e->seq_count = -1;
387#endif
388 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
389}
390
391static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
392 uint64_t original_sector, uint64_t seq_count)
393{
394 struct wc_memory_entry me;
395#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
396 e->original_sector = original_sector;
397 e->seq_count = seq_count;
398#endif
399 me.original_sector = cpu_to_le64(original_sector);
400 me.seq_count = cpu_to_le64(seq_count);
401 pmem_assign(*memory_entry(wc, e), me);
402}
403
404#define writecache_error(wc, err, msg, arg...) \
405do { \
406 if (!cmpxchg(&(wc)->error, 0, err)) \
407 DMERR(msg, ##arg); \
408 wake_up(&(wc)->freelist_wait); \
409} while (0)
410
411#define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
412
413static void writecache_flush_all_metadata(struct dm_writecache *wc)
414{
415 if (!WC_MODE_PMEM(wc))
416 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
417}
418
419static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
420{
421 if (!WC_MODE_PMEM(wc))
422 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
423 wc->dirty_bitmap);
424}
425
426static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
427
428struct io_notify {
429 struct dm_writecache *wc;
430 struct completion c;
431 atomic_t count;
432};
433
434static void writecache_notify_io(unsigned long error, void *context)
435{
436 struct io_notify *endio = context;
437
438 if (unlikely(error != 0))
439 writecache_error(endio->wc, -EIO, "error writing metadata");
440 BUG_ON(atomic_read(&endio->count) <= 0);
441 if (atomic_dec_and_test(&endio->count))
442 complete(&endio->c);
443}
444
445static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
446{
447 wait_event(wc->bio_in_progress_wait[direction],
448 !atomic_read(&wc->bio_in_progress[direction]));
449}
450
451static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
452{
453 struct dm_io_region region;
454 struct dm_io_request req;
455 struct io_notify endio = {
456 wc,
457 COMPLETION_INITIALIZER_ONSTACK(endio.c),
458 ATOMIC_INIT(1),
459 };
460 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
461 unsigned i = 0;
462
463 while (1) {
464 unsigned j;
465 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
466 if (unlikely(i == bitmap_bits))
467 break;
468 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
469
470 region.bdev = wc->ssd_dev->bdev;
471 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
472 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
473
474 if (unlikely(region.sector >= wc->metadata_sectors))
475 break;
476 if (unlikely(region.sector + region.count > wc->metadata_sectors))
477 region.count = wc->metadata_sectors - region.sector;
478
479 region.sector += wc->start_sector;
480 atomic_inc(&endio.count);
481 req.bi_op = REQ_OP_WRITE;
482 req.bi_op_flags = REQ_SYNC;
483 req.mem.type = DM_IO_VMA;
484 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
485 req.client = wc->dm_io;
486 req.notify.fn = writecache_notify_io;
487 req.notify.context = &endio;
488
489 /* writing via async dm-io (implied by notify.fn above) won't return an error */
490 (void) dm_io(&req, 1, ®ion, NULL);
491 i = j;
492 }
493
494 writecache_notify_io(0, &endio);
495 wait_for_completion_io(&endio.c);
496
497 if (wait_for_ios)
498 writecache_wait_for_ios(wc, WRITE);
499
500 writecache_disk_flush(wc, wc->ssd_dev);
501
502 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
503}
504
505static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
506{
507 if (WC_MODE_PMEM(wc))
508 wmb();
509 else
510 ssd_commit_flushed(wc, wait_for_ios);
511}
512
513static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
514{
515 int r;
516 struct dm_io_region region;
517 struct dm_io_request req;
518
519 region.bdev = dev->bdev;
520 region.sector = 0;
521 region.count = 0;
522 req.bi_op = REQ_OP_WRITE;
523 req.bi_op_flags = REQ_PREFLUSH;
524 req.mem.type = DM_IO_KMEM;
525 req.mem.ptr.addr = NULL;
526 req.client = wc->dm_io;
527 req.notify.fn = NULL;
528
529 r = dm_io(&req, 1, ®ion, NULL);
530 if (unlikely(r))
531 writecache_error(wc, r, "error flushing metadata: %d", r);
532}
533
534#define WFE_RETURN_FOLLOWING 1
535#define WFE_LOWEST_SEQ 2
536
537static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
538 uint64_t block, int flags)
539{
540 struct wc_entry *e;
541 struct rb_node *node = wc->tree.rb_node;
542
543 if (unlikely(!node))
544 return NULL;
545
546 while (1) {
547 e = container_of(node, struct wc_entry, rb_node);
548 if (read_original_sector(wc, e) == block)
549 break;
550
551 node = (read_original_sector(wc, e) >= block ?
552 e->rb_node.rb_left : e->rb_node.rb_right);
553 if (unlikely(!node)) {
554 if (!(flags & WFE_RETURN_FOLLOWING))
555 return NULL;
556 if (read_original_sector(wc, e) >= block) {
557 return e;
558 } else {
559 node = rb_next(&e->rb_node);
560 if (unlikely(!node))
561 return NULL;
562 e = container_of(node, struct wc_entry, rb_node);
563 return e;
564 }
565 }
566 }
567
568 while (1) {
569 struct wc_entry *e2;
570 if (flags & WFE_LOWEST_SEQ)
571 node = rb_prev(&e->rb_node);
572 else
573 node = rb_next(&e->rb_node);
574 if (unlikely(!node))
575 return e;
576 e2 = container_of(node, struct wc_entry, rb_node);
577 if (read_original_sector(wc, e2) != block)
578 return e;
579 e = e2;
580 }
581}
582
583static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
584{
585 struct wc_entry *e;
586 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
587
588 while (*node) {
589 e = container_of(*node, struct wc_entry, rb_node);
590 parent = &e->rb_node;
591 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
592 node = &parent->rb_left;
593 else
594 node = &parent->rb_right;
595 }
596 rb_link_node(&ins->rb_node, parent, node);
597 rb_insert_color(&ins->rb_node, &wc->tree);
598 list_add(&ins->lru, &wc->lru);
599}
600
601static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
602{
603 list_del(&e->lru);
604 rb_erase(&e->rb_node, &wc->tree);
605}
606
607static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
608{
609 if (WC_MODE_SORT_FREELIST(wc)) {
610 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
611 if (unlikely(!*node))
612 wc->current_free = e;
613 while (*node) {
614 parent = *node;
615 if (&e->rb_node < *node)
616 node = &parent->rb_left;
617 else
618 node = &parent->rb_right;
619 }
620 rb_link_node(&e->rb_node, parent, node);
621 rb_insert_color(&e->rb_node, &wc->freetree);
622 } else {
623 list_add_tail(&e->lru, &wc->freelist);
624 }
625 wc->freelist_size++;
626}
627
628static inline void writecache_verify_watermark(struct dm_writecache *wc)
629{
630 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
631 queue_work(wc->writeback_wq, &wc->writeback_work);
632}
633
634static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
635{
636 struct wc_entry *e;
637
638 if (WC_MODE_SORT_FREELIST(wc)) {
639 struct rb_node *next;
640 if (unlikely(!wc->current_free))
641 return NULL;
642 e = wc->current_free;
643 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
644 return NULL;
645 next = rb_next(&e->rb_node);
646 rb_erase(&e->rb_node, &wc->freetree);
647 if (unlikely(!next))
648 next = rb_first(&wc->freetree);
649 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
650 } else {
651 if (unlikely(list_empty(&wc->freelist)))
652 return NULL;
653 e = container_of(wc->freelist.next, struct wc_entry, lru);
654 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
655 return NULL;
656 list_del(&e->lru);
657 }
658 wc->freelist_size--;
659
660 writecache_verify_watermark(wc);
661
662 return e;
663}
664
665static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
666{
667 writecache_unlink(wc, e);
668 writecache_add_to_freelist(wc, e);
669 clear_seq_count(wc, e);
670 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
671 if (unlikely(waitqueue_active(&wc->freelist_wait)))
672 wake_up(&wc->freelist_wait);
673}
674
675static void writecache_wait_on_freelist(struct dm_writecache *wc)
676{
677 DEFINE_WAIT(wait);
678
679 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
680 wc_unlock(wc);
681 io_schedule();
682 finish_wait(&wc->freelist_wait, &wait);
683 wc_lock(wc);
684}
685
686static void writecache_poison_lists(struct dm_writecache *wc)
687{
688 /*
689 * Catch incorrect access to these values while the device is suspended.
690 */
691 memset(&wc->tree, -1, sizeof wc->tree);
692 wc->lru.next = LIST_POISON1;
693 wc->lru.prev = LIST_POISON2;
694 wc->freelist.next = LIST_POISON1;
695 wc->freelist.prev = LIST_POISON2;
696}
697
698static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
699{
700 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
701 if (WC_MODE_PMEM(wc))
702 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
703}
704
705static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
706{
707 return read_seq_count(wc, e) < wc->seq_count;
708}
709
710static void writecache_flush(struct dm_writecache *wc)
711{
712 struct wc_entry *e, *e2;
713 bool need_flush_after_free;
714
715 wc->uncommitted_blocks = 0;
716 del_timer(&wc->autocommit_timer);
717
718 if (list_empty(&wc->lru))
719 return;
720
721 e = container_of(wc->lru.next, struct wc_entry, lru);
722 if (writecache_entry_is_committed(wc, e)) {
723 if (wc->overwrote_committed) {
724 writecache_wait_for_ios(wc, WRITE);
725 writecache_disk_flush(wc, wc->ssd_dev);
726 wc->overwrote_committed = false;
727 }
728 return;
729 }
730 while (1) {
731 writecache_flush_entry(wc, e);
732 if (unlikely(e->lru.next == &wc->lru))
733 break;
734 e2 = container_of(e->lru.next, struct wc_entry, lru);
735 if (writecache_entry_is_committed(wc, e2))
736 break;
737 e = e2;
738 cond_resched();
739 }
740 writecache_commit_flushed(wc, true);
741
742 wc->seq_count++;
743 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
744 writecache_flush_region(wc, &sb(wc)->seq_count, sizeof sb(wc)->seq_count);
745 writecache_commit_flushed(wc, false);
746
747 wc->overwrote_committed = false;
748
749 need_flush_after_free = false;
750 while (1) {
751 /* Free another committed entry with lower seq-count */
752 struct rb_node *rb_node = rb_prev(&e->rb_node);
753
754 if (rb_node) {
755 e2 = container_of(rb_node, struct wc_entry, rb_node);
756 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
757 likely(!e2->write_in_progress)) {
758 writecache_free_entry(wc, e2);
759 need_flush_after_free = true;
760 }
761 }
762 if (unlikely(e->lru.prev == &wc->lru))
763 break;
764 e = container_of(e->lru.prev, struct wc_entry, lru);
765 cond_resched();
766 }
767
768 if (need_flush_after_free)
769 writecache_commit_flushed(wc, false);
770}
771
772static void writecache_flush_work(struct work_struct *work)
773{
774 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
775
776 wc_lock(wc);
777 writecache_flush(wc);
778 wc_unlock(wc);
779}
780
781static void writecache_autocommit_timer(struct timer_list *t)
782{
783 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
784 if (!writecache_has_error(wc))
785 queue_work(wc->writeback_wq, &wc->flush_work);
786}
787
788static void writecache_schedule_autocommit(struct dm_writecache *wc)
789{
790 if (!timer_pending(&wc->autocommit_timer))
791 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
792}
793
794static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
795{
796 struct wc_entry *e;
797 bool discarded_something = false;
798
799 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
800 if (unlikely(!e))
801 return;
802
803 while (read_original_sector(wc, e) < end) {
804 struct rb_node *node = rb_next(&e->rb_node);
805
806 if (likely(!e->write_in_progress)) {
807 if (!discarded_something) {
808 writecache_wait_for_ios(wc, READ);
809 writecache_wait_for_ios(wc, WRITE);
810 discarded_something = true;
811 }
812 writecache_free_entry(wc, e);
813 }
814
815 if (unlikely(!node))
816 break;
817
818 e = container_of(node, struct wc_entry, rb_node);
819 }
820
821 if (discarded_something)
822 writecache_commit_flushed(wc, false);
823}
824
825static bool writecache_wait_for_writeback(struct dm_writecache *wc)
826{
827 if (wc->writeback_size) {
828 writecache_wait_on_freelist(wc);
829 return true;
830 }
831 return false;
832}
833
834static void writecache_suspend(struct dm_target *ti)
835{
836 struct dm_writecache *wc = ti->private;
837 bool flush_on_suspend;
838
839 del_timer_sync(&wc->autocommit_timer);
840
841 wc_lock(wc);
842 writecache_flush(wc);
843 flush_on_suspend = wc->flush_on_suspend;
844 if (flush_on_suspend) {
845 wc->flush_on_suspend = false;
846 wc->writeback_all++;
847 queue_work(wc->writeback_wq, &wc->writeback_work);
848 }
849 wc_unlock(wc);
850
851 drain_workqueue(wc->writeback_wq);
852
853 wc_lock(wc);
854 if (flush_on_suspend)
855 wc->writeback_all--;
856 while (writecache_wait_for_writeback(wc));
857
858 if (WC_MODE_PMEM(wc))
859 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
860
861 writecache_poison_lists(wc);
862
863 wc_unlock(wc);
864}
865
866static int writecache_alloc_entries(struct dm_writecache *wc)
867{
868 size_t b;
869
870 if (wc->entries)
871 return 0;
872 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
873 if (!wc->entries)
874 return -ENOMEM;
875 for (b = 0; b < wc->n_blocks; b++) {
876 struct wc_entry *e = &wc->entries[b];
877 e->index = b;
878 e->write_in_progress = false;
879 }
880
881 return 0;
882}
883
884static void writecache_resume(struct dm_target *ti)
885{
886 struct dm_writecache *wc = ti->private;
887 size_t b;
888 bool need_flush = false;
889 __le64 sb_seq_count;
890 int r;
891
892 wc_lock(wc);
893
894 if (WC_MODE_PMEM(wc))
895 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
896
897 wc->tree = RB_ROOT;
898 INIT_LIST_HEAD(&wc->lru);
899 if (WC_MODE_SORT_FREELIST(wc)) {
900 wc->freetree = RB_ROOT;
901 wc->current_free = NULL;
902 } else {
903 INIT_LIST_HEAD(&wc->freelist);
904 }
905 wc->freelist_size = 0;
906
907 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
908 if (r) {
909 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
910 sb_seq_count = cpu_to_le64(0);
911 }
912 wc->seq_count = le64_to_cpu(sb_seq_count);
913
914#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
915 for (b = 0; b < wc->n_blocks; b++) {
916 struct wc_entry *e = &wc->entries[b];
917 struct wc_memory_entry wme;
918 if (writecache_has_error(wc)) {
919 e->original_sector = -1;
920 e->seq_count = -1;
921 continue;
922 }
923 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
924 if (r) {
925 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
926 (unsigned long)b, r);
927 e->original_sector = -1;
928 e->seq_count = -1;
929 } else {
930 e->original_sector = le64_to_cpu(wme.original_sector);
931 e->seq_count = le64_to_cpu(wme.seq_count);
932 }
933 }
934#endif
935 for (b = 0; b < wc->n_blocks; b++) {
936 struct wc_entry *e = &wc->entries[b];
937 if (!writecache_entry_is_committed(wc, e)) {
938 if (read_seq_count(wc, e) != -1) {
939erase_this:
940 clear_seq_count(wc, e);
941 need_flush = true;
942 }
943 writecache_add_to_freelist(wc, e);
944 } else {
945 struct wc_entry *old;
946
947 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
948 if (!old) {
949 writecache_insert_entry(wc, e);
950 } else {
951 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
952 writecache_error(wc, -EINVAL,
953 "two identical entries, position %llu, sector %llu, sequence %llu",
954 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
955 (unsigned long long)read_seq_count(wc, e));
956 }
957 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
958 goto erase_this;
959 } else {
960 writecache_free_entry(wc, old);
961 writecache_insert_entry(wc, e);
962 need_flush = true;
963 }
964 }
965 }
966 cond_resched();
967 }
968
969 if (need_flush) {
970 writecache_flush_all_metadata(wc);
971 writecache_commit_flushed(wc, false);
972 }
973
974 writecache_verify_watermark(wc);
975
976 wc_unlock(wc);
977}
978
979static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
980{
981 if (argc != 1)
982 return -EINVAL;
983
984 wc_lock(wc);
985 if (dm_suspended(wc->ti)) {
986 wc_unlock(wc);
987 return -EBUSY;
988 }
989 if (writecache_has_error(wc)) {
990 wc_unlock(wc);
991 return -EIO;
992 }
993
994 writecache_flush(wc);
995 wc->writeback_all++;
996 queue_work(wc->writeback_wq, &wc->writeback_work);
997 wc_unlock(wc);
998
999 flush_workqueue(wc->writeback_wq);
1000
1001 wc_lock(wc);
1002 wc->writeback_all--;
1003 if (writecache_has_error(wc)) {
1004 wc_unlock(wc);
1005 return -EIO;
1006 }
1007 wc_unlock(wc);
1008
1009 return 0;
1010}
1011
1012static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1013{
1014 if (argc != 1)
1015 return -EINVAL;
1016
1017 wc_lock(wc);
1018 wc->flush_on_suspend = true;
1019 wc_unlock(wc);
1020
1021 return 0;
1022}
1023
1024static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1025 char *result, unsigned maxlen)
1026{
1027 int r = -EINVAL;
1028 struct dm_writecache *wc = ti->private;
1029
1030 if (!strcasecmp(argv[0], "flush"))
1031 r = process_flush_mesg(argc, argv, wc);
1032 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1033 r = process_flush_on_suspend_mesg(argc, argv, wc);
1034 else
1035 DMERR("unrecognised message received: %s", argv[0]);
1036
1037 return r;
1038}
1039
1040static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1041{
1042 void *buf;
1043 unsigned long flags;
1044 unsigned size;
1045 int rw = bio_data_dir(bio);
1046 unsigned remaining_size = wc->block_size;
1047
1048 do {
1049 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1050 buf = bvec_kmap_irq(&bv, &flags);
1051 size = bv.bv_len;
1052 if (unlikely(size > remaining_size))
1053 size = remaining_size;
1054
1055 if (rw == READ) {
1056 int r;
1057 r = memcpy_mcsafe(buf, data, size);
1058 flush_dcache_page(bio_page(bio));
1059 if (unlikely(r)) {
1060 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1061 bio->bi_status = BLK_STS_IOERR;
1062 }
1063 } else {
1064 flush_dcache_page(bio_page(bio));
1065 memcpy_flushcache(data, buf, size);
1066 }
1067
1068 bvec_kunmap_irq(buf, &flags);
1069
1070 data = (char *)data + size;
1071 remaining_size -= size;
1072 bio_advance(bio, size);
1073 } while (unlikely(remaining_size));
1074}
1075
1076static int writecache_flush_thread(void *data)
1077{
1078 struct dm_writecache *wc = data;
1079
1080 while (1) {
1081 struct bio *bio;
1082
1083 wc_lock(wc);
1084 bio = bio_list_pop(&wc->flush_list);
1085 if (!bio) {
1086 set_current_state(TASK_INTERRUPTIBLE);
1087 wc_unlock(wc);
1088
1089 if (unlikely(kthread_should_stop())) {
1090 set_current_state(TASK_RUNNING);
1091 break;
1092 }
1093
1094 schedule();
1095 continue;
1096 }
1097
1098 if (bio_op(bio) == REQ_OP_DISCARD) {
1099 writecache_discard(wc, bio->bi_iter.bi_sector,
1100 bio_end_sector(bio));
1101 wc_unlock(wc);
1102 bio_set_dev(bio, wc->dev->bdev);
1103 generic_make_request(bio);
1104 } else {
1105 writecache_flush(wc);
1106 wc_unlock(wc);
1107 if (writecache_has_error(wc))
1108 bio->bi_status = BLK_STS_IOERR;
1109 bio_endio(bio);
1110 }
1111 }
1112
1113 return 0;
1114}
1115
1116static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1117{
1118 if (bio_list_empty(&wc->flush_list))
1119 wake_up_process(wc->flush_thread);
1120 bio_list_add(&wc->flush_list, bio);
1121}
1122
1123static int writecache_map(struct dm_target *ti, struct bio *bio)
1124{
1125 struct wc_entry *e;
1126 struct dm_writecache *wc = ti->private;
1127
1128 bio->bi_private = NULL;
1129
1130 wc_lock(wc);
1131
1132 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1133 if (writecache_has_error(wc))
1134 goto unlock_error;
1135 if (WC_MODE_PMEM(wc)) {
1136 writecache_flush(wc);
1137 if (writecache_has_error(wc))
1138 goto unlock_error;
1139 goto unlock_submit;
1140 } else {
1141 writecache_offload_bio(wc, bio);
1142 goto unlock_return;
1143 }
1144 }
1145
1146 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1147
1148 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1149 (wc->block_size / 512 - 1)) != 0)) {
1150 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1151 (unsigned long long)bio->bi_iter.bi_sector,
1152 bio->bi_iter.bi_size, wc->block_size);
1153 goto unlock_error;
1154 }
1155
1156 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1157 if (writecache_has_error(wc))
1158 goto unlock_error;
1159 if (WC_MODE_PMEM(wc)) {
1160 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1161 goto unlock_remap_origin;
1162 } else {
1163 writecache_offload_bio(wc, bio);
1164 goto unlock_return;
1165 }
1166 }
1167
1168 if (bio_data_dir(bio) == READ) {
1169read_next_block:
1170 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1171 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1172 if (WC_MODE_PMEM(wc)) {
1173 bio_copy_block(wc, bio, memory_data(wc, e));
1174 if (bio->bi_iter.bi_size)
1175 goto read_next_block;
1176 goto unlock_submit;
1177 } else {
1178 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1179 bio_set_dev(bio, wc->ssd_dev->bdev);
1180 bio->bi_iter.bi_sector = cache_sector(wc, e);
1181 if (!writecache_entry_is_committed(wc, e))
1182 writecache_wait_for_ios(wc, WRITE);
1183 goto unlock_remap;
1184 }
1185 } else {
1186 if (e) {
1187 sector_t next_boundary =
1188 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1189 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1190 dm_accept_partial_bio(bio, next_boundary);
1191 }
1192 }
1193 goto unlock_remap_origin;
1194 }
1195 } else {
1196 do {
1197 if (writecache_has_error(wc))
1198 goto unlock_error;
1199 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1200 if (e) {
1201 if (!writecache_entry_is_committed(wc, e))
1202 goto bio_copy;
1203 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1204 wc->overwrote_committed = true;
1205 goto bio_copy;
1206 }
1207 }
1208 e = writecache_pop_from_freelist(wc, (sector_t)-1);
1209 if (unlikely(!e)) {
1210 writecache_wait_on_freelist(wc);
1211 continue;
1212 }
1213 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1214 writecache_insert_entry(wc, e);
1215 wc->uncommitted_blocks++;
1216bio_copy:
1217 if (WC_MODE_PMEM(wc)) {
1218 bio_copy_block(wc, bio, memory_data(wc, e));
1219 } else {
1220 unsigned bio_size = wc->block_size;
1221 sector_t start_cache_sec = cache_sector(wc, e);
1222 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1223
1224 while (bio_size < bio->bi_iter.bi_size) {
1225 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1226 if (!f)
1227 break;
1228 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1229 (bio_size >> SECTOR_SHIFT), wc->seq_count);
1230 writecache_insert_entry(wc, f);
1231 wc->uncommitted_blocks++;
1232 bio_size += wc->block_size;
1233 current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1234 }
1235
1236 bio_set_dev(bio, wc->ssd_dev->bdev);
1237 bio->bi_iter.bi_sector = start_cache_sec;
1238 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1239
1240 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1241 wc->uncommitted_blocks = 0;
1242 queue_work(wc->writeback_wq, &wc->flush_work);
1243 } else {
1244 writecache_schedule_autocommit(wc);
1245 }
1246 goto unlock_remap;
1247 }
1248 } while (bio->bi_iter.bi_size);
1249
1250 if (unlikely(bio->bi_opf & REQ_FUA ||
1251 wc->uncommitted_blocks >= wc->autocommit_blocks))
1252 writecache_flush(wc);
1253 else
1254 writecache_schedule_autocommit(wc);
1255 goto unlock_submit;
1256 }
1257
1258unlock_remap_origin:
1259 bio_set_dev(bio, wc->dev->bdev);
1260 wc_unlock(wc);
1261 return DM_MAPIO_REMAPPED;
1262
1263unlock_remap:
1264 /* make sure that writecache_end_io decrements bio_in_progress: */
1265 bio->bi_private = (void *)1;
1266 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1267 wc_unlock(wc);
1268 return DM_MAPIO_REMAPPED;
1269
1270unlock_submit:
1271 wc_unlock(wc);
1272 bio_endio(bio);
1273 return DM_MAPIO_SUBMITTED;
1274
1275unlock_return:
1276 wc_unlock(wc);
1277 return DM_MAPIO_SUBMITTED;
1278
1279unlock_error:
1280 wc_unlock(wc);
1281 bio_io_error(bio);
1282 return DM_MAPIO_SUBMITTED;
1283}
1284
1285static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1286{
1287 struct dm_writecache *wc = ti->private;
1288
1289 if (bio->bi_private != NULL) {
1290 int dir = bio_data_dir(bio);
1291 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1292 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1293 wake_up(&wc->bio_in_progress_wait[dir]);
1294 }
1295 return 0;
1296}
1297
1298static int writecache_iterate_devices(struct dm_target *ti,
1299 iterate_devices_callout_fn fn, void *data)
1300{
1301 struct dm_writecache *wc = ti->private;
1302
1303 return fn(ti, wc->dev, 0, ti->len, data);
1304}
1305
1306static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1307{
1308 struct dm_writecache *wc = ti->private;
1309
1310 if (limits->logical_block_size < wc->block_size)
1311 limits->logical_block_size = wc->block_size;
1312
1313 if (limits->physical_block_size < wc->block_size)
1314 limits->physical_block_size = wc->block_size;
1315
1316 if (limits->io_min < wc->block_size)
1317 limits->io_min = wc->block_size;
1318}
1319
1320
1321static void writecache_writeback_endio(struct bio *bio)
1322{
1323 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1324 struct dm_writecache *wc = wb->wc;
1325 unsigned long flags;
1326
1327 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1328 if (unlikely(list_empty(&wc->endio_list)))
1329 wake_up_process(wc->endio_thread);
1330 list_add_tail(&wb->endio_entry, &wc->endio_list);
1331 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1332}
1333
1334static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1335{
1336 struct copy_struct *c = ptr;
1337 struct dm_writecache *wc = c->wc;
1338
1339 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1340
1341 raw_spin_lock_irq(&wc->endio_list_lock);
1342 if (unlikely(list_empty(&wc->endio_list)))
1343 wake_up_process(wc->endio_thread);
1344 list_add_tail(&c->endio_entry, &wc->endio_list);
1345 raw_spin_unlock_irq(&wc->endio_list_lock);
1346}
1347
1348static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1349{
1350 unsigned i;
1351 struct writeback_struct *wb;
1352 struct wc_entry *e;
1353 unsigned long n_walked = 0;
1354
1355 do {
1356 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1357 list_del(&wb->endio_entry);
1358
1359 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1360 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1361 "write error %d", wb->bio.bi_status);
1362 i = 0;
1363 do {
1364 e = wb->wc_list[i];
1365 BUG_ON(!e->write_in_progress);
1366 e->write_in_progress = false;
1367 INIT_LIST_HEAD(&e->lru);
1368 if (!writecache_has_error(wc))
1369 writecache_free_entry(wc, e);
1370 BUG_ON(!wc->writeback_size);
1371 wc->writeback_size--;
1372 n_walked++;
1373 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1374 writecache_commit_flushed(wc, false);
1375 wc_unlock(wc);
1376 wc_lock(wc);
1377 n_walked = 0;
1378 }
1379 } while (++i < wb->wc_list_n);
1380
1381 if (wb->wc_list != wb->wc_list_inline)
1382 kfree(wb->wc_list);
1383 bio_put(&wb->bio);
1384 } while (!list_empty(list));
1385}
1386
1387static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1388{
1389 struct copy_struct *c;
1390 struct wc_entry *e;
1391
1392 do {
1393 c = list_entry(list->next, struct copy_struct, endio_entry);
1394 list_del(&c->endio_entry);
1395
1396 if (unlikely(c->error))
1397 writecache_error(wc, c->error, "copy error");
1398
1399 e = c->e;
1400 do {
1401 BUG_ON(!e->write_in_progress);
1402 e->write_in_progress = false;
1403 INIT_LIST_HEAD(&e->lru);
1404 if (!writecache_has_error(wc))
1405 writecache_free_entry(wc, e);
1406
1407 BUG_ON(!wc->writeback_size);
1408 wc->writeback_size--;
1409 e++;
1410 } while (--c->n_entries);
1411 mempool_free(c, &wc->copy_pool);
1412 } while (!list_empty(list));
1413}
1414
1415static int writecache_endio_thread(void *data)
1416{
1417 struct dm_writecache *wc = data;
1418
1419 while (1) {
1420 struct list_head list;
1421
1422 raw_spin_lock_irq(&wc->endio_list_lock);
1423 if (!list_empty(&wc->endio_list))
1424 goto pop_from_list;
1425 set_current_state(TASK_INTERRUPTIBLE);
1426 raw_spin_unlock_irq(&wc->endio_list_lock);
1427
1428 if (unlikely(kthread_should_stop())) {
1429 set_current_state(TASK_RUNNING);
1430 break;
1431 }
1432
1433 schedule();
1434
1435 continue;
1436
1437pop_from_list:
1438 list = wc->endio_list;
1439 list.next->prev = list.prev->next = &list;
1440 INIT_LIST_HEAD(&wc->endio_list);
1441 raw_spin_unlock_irq(&wc->endio_list_lock);
1442
1443 if (!WC_MODE_FUA(wc))
1444 writecache_disk_flush(wc, wc->dev);
1445
1446 wc_lock(wc);
1447
1448 if (WC_MODE_PMEM(wc)) {
1449 __writecache_endio_pmem(wc, &list);
1450 } else {
1451 __writecache_endio_ssd(wc, &list);
1452 writecache_wait_for_ios(wc, READ);
1453 }
1454
1455 writecache_commit_flushed(wc, false);
1456
1457 wc_unlock(wc);
1458 }
1459
1460 return 0;
1461}
1462
1463static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1464{
1465 struct dm_writecache *wc = wb->wc;
1466 unsigned block_size = wc->block_size;
1467 void *address = memory_data(wc, e);
1468
1469 persistent_memory_flush_cache(address, block_size);
1470 return bio_add_page(&wb->bio, persistent_memory_page(address),
1471 block_size, persistent_memory_page_offset(address)) != 0;
1472}
1473
1474struct writeback_list {
1475 struct list_head list;
1476 size_t size;
1477};
1478
1479static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1480{
1481 if (unlikely(wc->max_writeback_jobs)) {
1482 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1483 wc_lock(wc);
1484 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1485 writecache_wait_on_freelist(wc);
1486 wc_unlock(wc);
1487 }
1488 }
1489 cond_resched();
1490}
1491
1492static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1493{
1494 struct wc_entry *e, *f;
1495 struct bio *bio;
1496 struct writeback_struct *wb;
1497 unsigned max_pages;
1498
1499 while (wbl->size) {
1500 wbl->size--;
1501 e = container_of(wbl->list.prev, struct wc_entry, lru);
1502 list_del(&e->lru);
1503
1504 max_pages = e->wc_list_contiguous;
1505
1506 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1507 wb = container_of(bio, struct writeback_struct, bio);
1508 wb->wc = wc;
1509 bio->bi_end_io = writecache_writeback_endio;
1510 bio_set_dev(bio, wc->dev->bdev);
1511 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1512 if (max_pages <= WB_LIST_INLINE ||
1513 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1514 GFP_NOIO | __GFP_NORETRY |
1515 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1516 wb->wc_list = wb->wc_list_inline;
1517 max_pages = WB_LIST_INLINE;
1518 }
1519
1520 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1521
1522 wb->wc_list[0] = e;
1523 wb->wc_list_n = 1;
1524
1525 while (wbl->size && wb->wc_list_n < max_pages) {
1526 f = container_of(wbl->list.prev, struct wc_entry, lru);
1527 if (read_original_sector(wc, f) !=
1528 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1529 break;
1530 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1531 break;
1532 wbl->size--;
1533 list_del(&f->lru);
1534 wb->wc_list[wb->wc_list_n++] = f;
1535 e = f;
1536 }
1537 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1538 if (writecache_has_error(wc)) {
1539 bio->bi_status = BLK_STS_IOERR;
1540 bio_endio(bio);
1541 } else {
1542 submit_bio(bio);
1543 }
1544
1545 __writeback_throttle(wc, wbl);
1546 }
1547}
1548
1549static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1550{
1551 struct wc_entry *e, *f;
1552 struct dm_io_region from, to;
1553 struct copy_struct *c;
1554
1555 while (wbl->size) {
1556 unsigned n_sectors;
1557
1558 wbl->size--;
1559 e = container_of(wbl->list.prev, struct wc_entry, lru);
1560 list_del(&e->lru);
1561
1562 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1563
1564 from.bdev = wc->ssd_dev->bdev;
1565 from.sector = cache_sector(wc, e);
1566 from.count = n_sectors;
1567 to.bdev = wc->dev->bdev;
1568 to.sector = read_original_sector(wc, e);
1569 to.count = n_sectors;
1570
1571 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1572 c->wc = wc;
1573 c->e = e;
1574 c->n_entries = e->wc_list_contiguous;
1575
1576 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1577 wbl->size--;
1578 f = container_of(wbl->list.prev, struct wc_entry, lru);
1579 BUG_ON(f != e + 1);
1580 list_del(&f->lru);
1581 e = f;
1582 }
1583
1584 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1585
1586 __writeback_throttle(wc, wbl);
1587 }
1588}
1589
1590static void writecache_writeback(struct work_struct *work)
1591{
1592 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1593 struct blk_plug plug;
1594 struct wc_entry *f, *uninitialized_var(g), *e = NULL;
1595 struct rb_node *node, *next_node;
1596 struct list_head skipped;
1597 struct writeback_list wbl;
1598 unsigned long n_walked;
1599
1600 wc_lock(wc);
1601restart:
1602 if (writecache_has_error(wc)) {
1603 wc_unlock(wc);
1604 return;
1605 }
1606
1607 if (unlikely(wc->writeback_all)) {
1608 if (writecache_wait_for_writeback(wc))
1609 goto restart;
1610 }
1611
1612 if (wc->overwrote_committed) {
1613 writecache_wait_for_ios(wc, WRITE);
1614 }
1615
1616 n_walked = 0;
1617 INIT_LIST_HEAD(&skipped);
1618 INIT_LIST_HEAD(&wbl.list);
1619 wbl.size = 0;
1620 while (!list_empty(&wc->lru) &&
1621 (wc->writeback_all ||
1622 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark)) {
1623
1624 n_walked++;
1625 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1626 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1627 queue_work(wc->writeback_wq, &wc->writeback_work);
1628 break;
1629 }
1630
1631 if (unlikely(wc->writeback_all)) {
1632 if (unlikely(!e)) {
1633 writecache_flush(wc);
1634 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1635 } else
1636 e = g;
1637 } else
1638 e = container_of(wc->lru.prev, struct wc_entry, lru);
1639 BUG_ON(e->write_in_progress);
1640 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1641 writecache_flush(wc);
1642 }
1643 node = rb_prev(&e->rb_node);
1644 if (node) {
1645 f = container_of(node, struct wc_entry, rb_node);
1646 if (unlikely(read_original_sector(wc, f) ==
1647 read_original_sector(wc, e))) {
1648 BUG_ON(!f->write_in_progress);
1649 list_del(&e->lru);
1650 list_add(&e->lru, &skipped);
1651 cond_resched();
1652 continue;
1653 }
1654 }
1655 wc->writeback_size++;
1656 list_del(&e->lru);
1657 list_add(&e->lru, &wbl.list);
1658 wbl.size++;
1659 e->write_in_progress = true;
1660 e->wc_list_contiguous = 1;
1661
1662 f = e;
1663
1664 while (1) {
1665 next_node = rb_next(&f->rb_node);
1666 if (unlikely(!next_node))
1667 break;
1668 g = container_of(next_node, struct wc_entry, rb_node);
1669 if (unlikely(read_original_sector(wc, g) ==
1670 read_original_sector(wc, f))) {
1671 f = g;
1672 continue;
1673 }
1674 if (read_original_sector(wc, g) !=
1675 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1676 break;
1677 if (unlikely(g->write_in_progress))
1678 break;
1679 if (unlikely(!writecache_entry_is_committed(wc, g)))
1680 break;
1681
1682 if (!WC_MODE_PMEM(wc)) {
1683 if (g != f + 1)
1684 break;
1685 }
1686
1687 n_walked++;
1688 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1689 // break;
1690
1691 wc->writeback_size++;
1692 list_del(&g->lru);
1693 list_add(&g->lru, &wbl.list);
1694 wbl.size++;
1695 g->write_in_progress = true;
1696 g->wc_list_contiguous = BIO_MAX_PAGES;
1697 f = g;
1698 e->wc_list_contiguous++;
1699 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1700 if (unlikely(wc->writeback_all)) {
1701 next_node = rb_next(&f->rb_node);
1702 if (likely(next_node))
1703 g = container_of(next_node, struct wc_entry, rb_node);
1704 }
1705 break;
1706 }
1707 }
1708 cond_resched();
1709 }
1710
1711 if (!list_empty(&skipped)) {
1712 list_splice_tail(&skipped, &wc->lru);
1713 /*
1714 * If we didn't do any progress, we must wait until some
1715 * writeback finishes to avoid burning CPU in a loop
1716 */
1717 if (unlikely(!wbl.size))
1718 writecache_wait_for_writeback(wc);
1719 }
1720
1721 wc_unlock(wc);
1722
1723 blk_start_plug(&plug);
1724
1725 if (WC_MODE_PMEM(wc))
1726 __writecache_writeback_pmem(wc, &wbl);
1727 else
1728 __writecache_writeback_ssd(wc, &wbl);
1729
1730 blk_finish_plug(&plug);
1731
1732 if (unlikely(wc->writeback_all)) {
1733 wc_lock(wc);
1734 while (writecache_wait_for_writeback(wc));
1735 wc_unlock(wc);
1736 }
1737}
1738
1739static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1740 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1741{
1742 uint64_t n_blocks, offset;
1743 struct wc_entry e;
1744
1745 n_blocks = device_size;
1746 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1747
1748 while (1) {
1749 if (!n_blocks)
1750 return -ENOSPC;
1751 /* Verify the following entries[n_blocks] won't overflow */
1752 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1753 sizeof(struct wc_memory_entry)))
1754 return -EFBIG;
1755 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1756 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1757 if (offset + n_blocks * block_size <= device_size)
1758 break;
1759 n_blocks--;
1760 }
1761
1762 /* check if the bit field overflows */
1763 e.index = n_blocks;
1764 if (e.index != n_blocks)
1765 return -EFBIG;
1766
1767 if (n_blocks_p)
1768 *n_blocks_p = n_blocks;
1769 if (n_metadata_blocks_p)
1770 *n_metadata_blocks_p = offset >> __ffs(block_size);
1771 return 0;
1772}
1773
1774static int init_memory(struct dm_writecache *wc)
1775{
1776 size_t b;
1777 int r;
1778
1779 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1780 if (r)
1781 return r;
1782
1783 r = writecache_alloc_entries(wc);
1784 if (r)
1785 return r;
1786
1787 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1788 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1789 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1790 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1791 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1792 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1793
1794 for (b = 0; b < wc->n_blocks; b++)
1795 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1796
1797 writecache_flush_all_metadata(wc);
1798 writecache_commit_flushed(wc, false);
1799 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1800 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1801 writecache_commit_flushed(wc, false);
1802
1803 return 0;
1804}
1805
1806static void writecache_dtr(struct dm_target *ti)
1807{
1808 struct dm_writecache *wc = ti->private;
1809
1810 if (!wc)
1811 return;
1812
1813 if (wc->endio_thread)
1814 kthread_stop(wc->endio_thread);
1815
1816 if (wc->flush_thread)
1817 kthread_stop(wc->flush_thread);
1818
1819 bioset_exit(&wc->bio_set);
1820
1821 mempool_exit(&wc->copy_pool);
1822
1823 if (wc->writeback_wq)
1824 destroy_workqueue(wc->writeback_wq);
1825
1826 if (wc->dev)
1827 dm_put_device(ti, wc->dev);
1828
1829 if (wc->ssd_dev)
1830 dm_put_device(ti, wc->ssd_dev);
1831
1832 if (wc->entries)
1833 vfree(wc->entries);
1834
1835 if (wc->memory_map) {
1836 if (WC_MODE_PMEM(wc))
1837 persistent_memory_release(wc);
1838 else
1839 vfree(wc->memory_map);
1840 }
1841
1842 if (wc->dm_kcopyd)
1843 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1844
1845 if (wc->dm_io)
1846 dm_io_client_destroy(wc->dm_io);
1847
1848 if (wc->dirty_bitmap)
1849 vfree(wc->dirty_bitmap);
1850
1851 kfree(wc);
1852}
1853
1854static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1855{
1856 struct dm_writecache *wc;
1857 struct dm_arg_set as;
1858 const char *string;
1859 unsigned opt_params;
1860 size_t offset, data_size;
1861 int i, r;
1862 char dummy;
1863 int high_wm_percent = HIGH_WATERMARK;
1864 int low_wm_percent = LOW_WATERMARK;
1865 uint64_t x;
1866 struct wc_memory_superblock s;
1867
1868 static struct dm_arg _args[] = {
1869 {0, 10, "Invalid number of feature args"},
1870 };
1871
1872 as.argc = argc;
1873 as.argv = argv;
1874
1875 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1876 if (!wc) {
1877 ti->error = "Cannot allocate writecache structure";
1878 r = -ENOMEM;
1879 goto bad;
1880 }
1881 ti->private = wc;
1882 wc->ti = ti;
1883
1884 mutex_init(&wc->lock);
1885 writecache_poison_lists(wc);
1886 init_waitqueue_head(&wc->freelist_wait);
1887 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1888
1889 for (i = 0; i < 2; i++) {
1890 atomic_set(&wc->bio_in_progress[i], 0);
1891 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1892 }
1893
1894 wc->dm_io = dm_io_client_create();
1895 if (IS_ERR(wc->dm_io)) {
1896 r = PTR_ERR(wc->dm_io);
1897 ti->error = "Unable to allocate dm-io client";
1898 wc->dm_io = NULL;
1899 goto bad;
1900 }
1901
1902 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
1903 if (!wc->writeback_wq) {
1904 r = -ENOMEM;
1905 ti->error = "Could not allocate writeback workqueue";
1906 goto bad;
1907 }
1908 INIT_WORK(&wc->writeback_work, writecache_writeback);
1909 INIT_WORK(&wc->flush_work, writecache_flush_work);
1910
1911 raw_spin_lock_init(&wc->endio_list_lock);
1912 INIT_LIST_HEAD(&wc->endio_list);
1913 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
1914 if (IS_ERR(wc->endio_thread)) {
1915 r = PTR_ERR(wc->endio_thread);
1916 wc->endio_thread = NULL;
1917 ti->error = "Couldn't spawn endio thread";
1918 goto bad;
1919 }
1920 wake_up_process(wc->endio_thread);
1921
1922 /*
1923 * Parse the mode (pmem or ssd)
1924 */
1925 string = dm_shift_arg(&as);
1926 if (!string)
1927 goto bad_arguments;
1928
1929 if (!strcasecmp(string, "s")) {
1930 wc->pmem_mode = false;
1931 } else if (!strcasecmp(string, "p")) {
1932#ifdef DM_WRITECACHE_HAS_PMEM
1933 wc->pmem_mode = true;
1934 wc->writeback_fua = true;
1935#else
1936 /*
1937 * If the architecture doesn't support persistent memory or
1938 * the kernel doesn't support any DAX drivers, this driver can
1939 * only be used in SSD-only mode.
1940 */
1941 r = -EOPNOTSUPP;
1942 ti->error = "Persistent memory or DAX not supported on this system";
1943 goto bad;
1944#endif
1945 } else {
1946 goto bad_arguments;
1947 }
1948
1949 if (WC_MODE_PMEM(wc)) {
1950 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
1951 offsetof(struct writeback_struct, bio),
1952 BIOSET_NEED_BVECS);
1953 if (r) {
1954 ti->error = "Could not allocate bio set";
1955 goto bad;
1956 }
1957 } else {
1958 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
1959 if (r) {
1960 ti->error = "Could not allocate mempool";
1961 goto bad;
1962 }
1963 }
1964
1965 /*
1966 * Parse the origin data device
1967 */
1968 string = dm_shift_arg(&as);
1969 if (!string)
1970 goto bad_arguments;
1971 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
1972 if (r) {
1973 ti->error = "Origin data device lookup failed";
1974 goto bad;
1975 }
1976
1977 /*
1978 * Parse cache data device (be it pmem or ssd)
1979 */
1980 string = dm_shift_arg(&as);
1981 if (!string)
1982 goto bad_arguments;
1983
1984 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
1985 if (r) {
1986 ti->error = "Cache data device lookup failed";
1987 goto bad;
1988 }
1989 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1990
1991 /*
1992 * Parse the cache block size
1993 */
1994 string = dm_shift_arg(&as);
1995 if (!string)
1996 goto bad_arguments;
1997 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
1998 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
1999 (wc->block_size & (wc->block_size - 1))) {
2000 r = -EINVAL;
2001 ti->error = "Invalid block size";
2002 goto bad;
2003 }
2004 wc->block_size_bits = __ffs(wc->block_size);
2005
2006 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2007 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2008 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2009
2010 /*
2011 * Parse optional arguments
2012 */
2013 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2014 if (r)
2015 goto bad;
2016
2017 while (opt_params) {
2018 string = dm_shift_arg(&as), opt_params--;
2019 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2020 unsigned long long start_sector;
2021 string = dm_shift_arg(&as), opt_params--;
2022 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2023 goto invalid_optional;
2024 wc->start_sector = start_sector;
2025 if (wc->start_sector != start_sector ||
2026 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2027 goto invalid_optional;
2028 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2029 string = dm_shift_arg(&as), opt_params--;
2030 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2031 goto invalid_optional;
2032 if (high_wm_percent < 0 || high_wm_percent > 100)
2033 goto invalid_optional;
2034 wc->high_wm_percent_set = true;
2035 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2036 string = dm_shift_arg(&as), opt_params--;
2037 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2038 goto invalid_optional;
2039 if (low_wm_percent < 0 || low_wm_percent > 100)
2040 goto invalid_optional;
2041 wc->low_wm_percent_set = true;
2042 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2043 string = dm_shift_arg(&as), opt_params--;
2044 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2045 goto invalid_optional;
2046 wc->max_writeback_jobs_set = true;
2047 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2048 string = dm_shift_arg(&as), opt_params--;
2049 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2050 goto invalid_optional;
2051 wc->autocommit_blocks_set = true;
2052 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2053 unsigned autocommit_msecs;
2054 string = dm_shift_arg(&as), opt_params--;
2055 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2056 goto invalid_optional;
2057 if (autocommit_msecs > 3600000)
2058 goto invalid_optional;
2059 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2060 wc->autocommit_time_set = true;
2061 } else if (!strcasecmp(string, "fua")) {
2062 if (WC_MODE_PMEM(wc)) {
2063 wc->writeback_fua = true;
2064 wc->writeback_fua_set = true;
2065 } else goto invalid_optional;
2066 } else if (!strcasecmp(string, "nofua")) {
2067 if (WC_MODE_PMEM(wc)) {
2068 wc->writeback_fua = false;
2069 wc->writeback_fua_set = true;
2070 } else goto invalid_optional;
2071 } else {
2072invalid_optional:
2073 r = -EINVAL;
2074 ti->error = "Invalid optional argument";
2075 goto bad;
2076 }
2077 }
2078
2079 if (high_wm_percent < low_wm_percent) {
2080 r = -EINVAL;
2081 ti->error = "High watermark must be greater than or equal to low watermark";
2082 goto bad;
2083 }
2084
2085 if (WC_MODE_PMEM(wc)) {
2086 r = persistent_memory_claim(wc);
2087 if (r) {
2088 ti->error = "Unable to map persistent memory for cache";
2089 goto bad;
2090 }
2091 } else {
2092 struct dm_io_region region;
2093 struct dm_io_request req;
2094 size_t n_blocks, n_metadata_blocks;
2095 uint64_t n_bitmap_bits;
2096
2097 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2098
2099 bio_list_init(&wc->flush_list);
2100 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2101 if (IS_ERR(wc->flush_thread)) {
2102 r = PTR_ERR(wc->flush_thread);
2103 wc->flush_thread = NULL;
2104 ti->error = "Couldn't spawn flush thread";
2105 goto bad;
2106 }
2107 wake_up_process(wc->flush_thread);
2108
2109 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2110 &n_blocks, &n_metadata_blocks);
2111 if (r) {
2112 ti->error = "Invalid device size";
2113 goto bad;
2114 }
2115
2116 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2117 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2118 /* this is limitation of test_bit functions */
2119 if (n_bitmap_bits > 1U << 31) {
2120 r = -EFBIG;
2121 ti->error = "Invalid device size";
2122 goto bad;
2123 }
2124
2125 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2126 if (!wc->memory_map) {
2127 r = -ENOMEM;
2128 ti->error = "Unable to allocate memory for metadata";
2129 goto bad;
2130 }
2131
2132 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2133 if (IS_ERR(wc->dm_kcopyd)) {
2134 r = PTR_ERR(wc->dm_kcopyd);
2135 ti->error = "Unable to allocate dm-kcopyd client";
2136 wc->dm_kcopyd = NULL;
2137 goto bad;
2138 }
2139
2140 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2141 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2142 BITS_PER_LONG * sizeof(unsigned long);
2143 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2144 if (!wc->dirty_bitmap) {
2145 r = -ENOMEM;
2146 ti->error = "Unable to allocate dirty bitmap";
2147 goto bad;
2148 }
2149
2150 region.bdev = wc->ssd_dev->bdev;
2151 region.sector = wc->start_sector;
2152 region.count = wc->metadata_sectors;
2153 req.bi_op = REQ_OP_READ;
2154 req.bi_op_flags = REQ_SYNC;
2155 req.mem.type = DM_IO_VMA;
2156 req.mem.ptr.vma = (char *)wc->memory_map;
2157 req.client = wc->dm_io;
2158 req.notify.fn = NULL;
2159
2160 r = dm_io(&req, 1, ®ion, NULL);
2161 if (r) {
2162 ti->error = "Unable to read metadata";
2163 goto bad;
2164 }
2165 }
2166
2167 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2168 if (r) {
2169 ti->error = "Hardware memory error when reading superblock";
2170 goto bad;
2171 }
2172 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2173 r = init_memory(wc);
2174 if (r) {
2175 ti->error = "Unable to initialize device";
2176 goto bad;
2177 }
2178 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2179 if (r) {
2180 ti->error = "Hardware memory error when reading superblock";
2181 goto bad;
2182 }
2183 }
2184
2185 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2186 ti->error = "Invalid magic in the superblock";
2187 r = -EINVAL;
2188 goto bad;
2189 }
2190
2191 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2192 ti->error = "Invalid version in the superblock";
2193 r = -EINVAL;
2194 goto bad;
2195 }
2196
2197 if (le32_to_cpu(s.block_size) != wc->block_size) {
2198 ti->error = "Block size does not match superblock";
2199 r = -EINVAL;
2200 goto bad;
2201 }
2202
2203 wc->n_blocks = le64_to_cpu(s.n_blocks);
2204
2205 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2206 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2207overflow:
2208 ti->error = "Overflow in size calculation";
2209 r = -EINVAL;
2210 goto bad;
2211 }
2212 offset += sizeof(struct wc_memory_superblock);
2213 if (offset < sizeof(struct wc_memory_superblock))
2214 goto overflow;
2215 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2216 data_size = wc->n_blocks * (size_t)wc->block_size;
2217 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2218 (offset + data_size < offset))
2219 goto overflow;
2220 if (offset + data_size > wc->memory_map_size) {
2221 ti->error = "Memory area is too small";
2222 r = -EINVAL;
2223 goto bad;
2224 }
2225
2226 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2227 wc->block_start = (char *)sb(wc) + offset;
2228
2229 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2230 x += 50;
2231 do_div(x, 100);
2232 wc->freelist_high_watermark = x;
2233 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2234 x += 50;
2235 do_div(x, 100);
2236 wc->freelist_low_watermark = x;
2237
2238 r = writecache_alloc_entries(wc);
2239 if (r) {
2240 ti->error = "Cannot allocate memory";
2241 goto bad;
2242 }
2243
2244 ti->num_flush_bios = 1;
2245 ti->flush_supported = true;
2246 ti->num_discard_bios = 1;
2247
2248 if (WC_MODE_PMEM(wc))
2249 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2250
2251 return 0;
2252
2253bad_arguments:
2254 r = -EINVAL;
2255 ti->error = "Bad arguments";
2256bad:
2257 writecache_dtr(ti);
2258 return r;
2259}
2260
2261static void writecache_status(struct dm_target *ti, status_type_t type,
2262 unsigned status_flags, char *result, unsigned maxlen)
2263{
2264 struct dm_writecache *wc = ti->private;
2265 unsigned extra_args;
2266 unsigned sz = 0;
2267 uint64_t x;
2268
2269 switch (type) {
2270 case STATUSTYPE_INFO:
2271 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2272 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2273 (unsigned long long)wc->writeback_size);
2274 break;
2275 case STATUSTYPE_TABLE:
2276 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2277 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2278 extra_args = 0;
2279 if (wc->start_sector)
2280 extra_args += 2;
2281 if (wc->high_wm_percent_set)
2282 extra_args += 2;
2283 if (wc->low_wm_percent_set)
2284 extra_args += 2;
2285 if (wc->max_writeback_jobs_set)
2286 extra_args += 2;
2287 if (wc->autocommit_blocks_set)
2288 extra_args += 2;
2289 if (wc->autocommit_time_set)
2290 extra_args += 2;
2291 if (wc->writeback_fua_set)
2292 extra_args++;
2293
2294 DMEMIT("%u", extra_args);
2295 if (wc->start_sector)
2296 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2297 if (wc->high_wm_percent_set) {
2298 x = (uint64_t)wc->freelist_high_watermark * 100;
2299 x += wc->n_blocks / 2;
2300 do_div(x, (size_t)wc->n_blocks);
2301 DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2302 }
2303 if (wc->low_wm_percent_set) {
2304 x = (uint64_t)wc->freelist_low_watermark * 100;
2305 x += wc->n_blocks / 2;
2306 do_div(x, (size_t)wc->n_blocks);
2307 DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2308 }
2309 if (wc->max_writeback_jobs_set)
2310 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2311 if (wc->autocommit_blocks_set)
2312 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2313 if (wc->autocommit_time_set)
2314 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2315 if (wc->writeback_fua_set)
2316 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2317 break;
2318 }
2319}
2320
2321static struct target_type writecache_target = {
2322 .name = "writecache",
2323 .version = {1, 2, 0},
2324 .module = THIS_MODULE,
2325 .ctr = writecache_ctr,
2326 .dtr = writecache_dtr,
2327 .status = writecache_status,
2328 .postsuspend = writecache_suspend,
2329 .resume = writecache_resume,
2330 .message = writecache_message,
2331 .map = writecache_map,
2332 .end_io = writecache_end_io,
2333 .iterate_devices = writecache_iterate_devices,
2334 .io_hints = writecache_io_hints,
2335};
2336
2337static int __init dm_writecache_init(void)
2338{
2339 int r;
2340
2341 r = dm_register_target(&writecache_target);
2342 if (r < 0) {
2343 DMERR("register failed %d", r);
2344 return r;
2345 }
2346
2347 return 0;
2348}
2349
2350static void __exit dm_writecache_exit(void)
2351{
2352 dm_unregister_target(&writecache_target);
2353}
2354
2355module_init(dm_writecache_init);
2356module_exit(dm_writecache_exit);
2357
2358MODULE_DESCRIPTION(DM_NAME " writecache target");
2359MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2360MODULE_LICENSE("GPL");