Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Red Hat. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/device-mapper.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/vmalloc.h>
12#include <linux/kthread.h>
13#include <linux/dm-io.h>
14#include <linux/dm-kcopyd.h>
15#include <linux/dax.h>
16#include <linux/pfn_t.h>
17#include <linux/libnvdimm.h>
18
19#define DM_MSG_PREFIX "writecache"
20
21#define HIGH_WATERMARK 50
22#define LOW_WATERMARK 45
23#define MAX_WRITEBACK_JOBS 0
24#define ENDIO_LATENCY 16
25#define WRITEBACK_LATENCY 64
26#define AUTOCOMMIT_BLOCKS_SSD 65536
27#define AUTOCOMMIT_BLOCKS_PMEM 64
28#define AUTOCOMMIT_MSEC 1000
29#define MAX_AGE_DIV 16
30#define MAX_AGE_UNSPECIFIED -1UL
31
32#define BITMAP_GRANULARITY 65536
33#if BITMAP_GRANULARITY < PAGE_SIZE
34#undef BITMAP_GRANULARITY
35#define BITMAP_GRANULARITY PAGE_SIZE
36#endif
37
38#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39#define DM_WRITECACHE_HAS_PMEM
40#endif
41
42#ifdef DM_WRITECACHE_HAS_PMEM
43#define pmem_assign(dest, src) \
44do { \
45 typeof(dest) uniq = (src); \
46 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
47} while (0)
48#else
49#define pmem_assign(dest, src) ((dest) = (src))
50#endif
51
52#if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
53#define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
54#endif
55
56#define MEMORY_SUPERBLOCK_MAGIC 0x23489321
57#define MEMORY_SUPERBLOCK_VERSION 1
58
59struct wc_memory_entry {
60 __le64 original_sector;
61 __le64 seq_count;
62};
63
64struct wc_memory_superblock {
65 union {
66 struct {
67 __le32 magic;
68 __le32 version;
69 __le32 block_size;
70 __le32 pad;
71 __le64 n_blocks;
72 __le64 seq_count;
73 };
74 __le64 padding[8];
75 };
76 struct wc_memory_entry entries[0];
77};
78
79struct wc_entry {
80 struct rb_node rb_node;
81 struct list_head lru;
82 unsigned short wc_list_contiguous;
83 bool write_in_progress
84#if BITS_PER_LONG == 64
85 :1
86#endif
87 ;
88 unsigned long index
89#if BITS_PER_LONG == 64
90 :47
91#endif
92 ;
93 unsigned long age;
94#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95 uint64_t original_sector;
96 uint64_t seq_count;
97#endif
98};
99
100#ifdef DM_WRITECACHE_HAS_PMEM
101#define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
102#define WC_MODE_FUA(wc) ((wc)->writeback_fua)
103#else
104#define WC_MODE_PMEM(wc) false
105#define WC_MODE_FUA(wc) false
106#endif
107#define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
108
109struct dm_writecache {
110 struct mutex lock;
111 struct list_head lru;
112 union {
113 struct list_head freelist;
114 struct {
115 struct rb_root freetree;
116 struct wc_entry *current_free;
117 };
118 };
119 struct rb_root tree;
120
121 size_t freelist_size;
122 size_t writeback_size;
123 size_t freelist_high_watermark;
124 size_t freelist_low_watermark;
125 unsigned long max_age;
126
127 unsigned uncommitted_blocks;
128 unsigned autocommit_blocks;
129 unsigned max_writeback_jobs;
130
131 int error;
132
133 unsigned long autocommit_jiffies;
134 struct timer_list autocommit_timer;
135 struct wait_queue_head freelist_wait;
136
137 struct timer_list max_age_timer;
138
139 atomic_t bio_in_progress[2];
140 struct wait_queue_head bio_in_progress_wait[2];
141
142 struct dm_target *ti;
143 struct dm_dev *dev;
144 struct dm_dev *ssd_dev;
145 sector_t start_sector;
146 void *memory_map;
147 uint64_t memory_map_size;
148 size_t metadata_sectors;
149 size_t n_blocks;
150 uint64_t seq_count;
151 void *block_start;
152 struct wc_entry *entries;
153 unsigned block_size;
154 unsigned char block_size_bits;
155
156 bool pmem_mode:1;
157 bool writeback_fua:1;
158
159 bool overwrote_committed:1;
160 bool memory_vmapped:1;
161
162 bool high_wm_percent_set:1;
163 bool low_wm_percent_set:1;
164 bool max_writeback_jobs_set:1;
165 bool autocommit_blocks_set:1;
166 bool autocommit_time_set:1;
167 bool writeback_fua_set:1;
168 bool flush_on_suspend:1;
169 bool cleaner:1;
170
171 unsigned writeback_all;
172 struct workqueue_struct *writeback_wq;
173 struct work_struct writeback_work;
174 struct work_struct flush_work;
175
176 struct dm_io_client *dm_io;
177
178 raw_spinlock_t endio_list_lock;
179 struct list_head endio_list;
180 struct task_struct *endio_thread;
181
182 struct task_struct *flush_thread;
183 struct bio_list flush_list;
184
185 struct dm_kcopyd_client *dm_kcopyd;
186 unsigned long *dirty_bitmap;
187 unsigned dirty_bitmap_size;
188
189 struct bio_set bio_set;
190 mempool_t copy_pool;
191};
192
193#define WB_LIST_INLINE 16
194
195struct writeback_struct {
196 struct list_head endio_entry;
197 struct dm_writecache *wc;
198 struct wc_entry **wc_list;
199 unsigned wc_list_n;
200 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
201 struct bio bio;
202};
203
204struct copy_struct {
205 struct list_head endio_entry;
206 struct dm_writecache *wc;
207 struct wc_entry *e;
208 unsigned n_entries;
209 int error;
210};
211
212DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
213 "A percentage of time allocated for data copying");
214
215static void wc_lock(struct dm_writecache *wc)
216{
217 mutex_lock(&wc->lock);
218}
219
220static void wc_unlock(struct dm_writecache *wc)
221{
222 mutex_unlock(&wc->lock);
223}
224
225#ifdef DM_WRITECACHE_HAS_PMEM
226static int persistent_memory_claim(struct dm_writecache *wc)
227{
228 int r;
229 loff_t s;
230 long p, da;
231 pfn_t pfn;
232 int id;
233 struct page **pages;
234
235 wc->memory_vmapped = false;
236
237 s = wc->memory_map_size;
238 p = s >> PAGE_SHIFT;
239 if (!p) {
240 r = -EINVAL;
241 goto err1;
242 }
243 if (p != s >> PAGE_SHIFT) {
244 r = -EOVERFLOW;
245 goto err1;
246 }
247
248 id = dax_read_lock();
249
250 da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
251 if (da < 0) {
252 wc->memory_map = NULL;
253 r = da;
254 goto err2;
255 }
256 if (!pfn_t_has_page(pfn)) {
257 wc->memory_map = NULL;
258 r = -EOPNOTSUPP;
259 goto err2;
260 }
261 if (da != p) {
262 long i;
263 wc->memory_map = NULL;
264 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
265 if (!pages) {
266 r = -ENOMEM;
267 goto err2;
268 }
269 i = 0;
270 do {
271 long daa;
272 daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
273 NULL, &pfn);
274 if (daa <= 0) {
275 r = daa ? daa : -EINVAL;
276 goto err3;
277 }
278 if (!pfn_t_has_page(pfn)) {
279 r = -EOPNOTSUPP;
280 goto err3;
281 }
282 while (daa-- && i < p) {
283 pages[i++] = pfn_t_to_page(pfn);
284 pfn.val++;
285 if (!(i & 15))
286 cond_resched();
287 }
288 } while (i < p);
289 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
290 if (!wc->memory_map) {
291 r = -ENOMEM;
292 goto err3;
293 }
294 kvfree(pages);
295 wc->memory_vmapped = true;
296 }
297
298 dax_read_unlock(id);
299
300 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
301 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
302
303 return 0;
304err3:
305 kvfree(pages);
306err2:
307 dax_read_unlock(id);
308err1:
309 return r;
310}
311#else
312static int persistent_memory_claim(struct dm_writecache *wc)
313{
314 BUG();
315}
316#endif
317
318static void persistent_memory_release(struct dm_writecache *wc)
319{
320 if (wc->memory_vmapped)
321 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
322}
323
324static struct page *persistent_memory_page(void *addr)
325{
326 if (is_vmalloc_addr(addr))
327 return vmalloc_to_page(addr);
328 else
329 return virt_to_page(addr);
330}
331
332static unsigned persistent_memory_page_offset(void *addr)
333{
334 return (unsigned long)addr & (PAGE_SIZE - 1);
335}
336
337static void persistent_memory_flush_cache(void *ptr, size_t size)
338{
339 if (is_vmalloc_addr(ptr))
340 flush_kernel_vmap_range(ptr, size);
341}
342
343static void persistent_memory_invalidate_cache(void *ptr, size_t size)
344{
345 if (is_vmalloc_addr(ptr))
346 invalidate_kernel_vmap_range(ptr, size);
347}
348
349static struct wc_memory_superblock *sb(struct dm_writecache *wc)
350{
351 return wc->memory_map;
352}
353
354static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
355{
356 return &sb(wc)->entries[e->index];
357}
358
359static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
360{
361 return (char *)wc->block_start + (e->index << wc->block_size_bits);
362}
363
364static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
365{
366 return wc->start_sector + wc->metadata_sectors +
367 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
368}
369
370static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
371{
372#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
373 return e->original_sector;
374#else
375 return le64_to_cpu(memory_entry(wc, e)->original_sector);
376#endif
377}
378
379static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
380{
381#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
382 return e->seq_count;
383#else
384 return le64_to_cpu(memory_entry(wc, e)->seq_count);
385#endif
386}
387
388static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
389{
390#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
391 e->seq_count = -1;
392#endif
393 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
394}
395
396static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
397 uint64_t original_sector, uint64_t seq_count)
398{
399 struct wc_memory_entry me;
400#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
401 e->original_sector = original_sector;
402 e->seq_count = seq_count;
403#endif
404 me.original_sector = cpu_to_le64(original_sector);
405 me.seq_count = cpu_to_le64(seq_count);
406 pmem_assign(*memory_entry(wc, e), me);
407}
408
409#define writecache_error(wc, err, msg, arg...) \
410do { \
411 if (!cmpxchg(&(wc)->error, 0, err)) \
412 DMERR(msg, ##arg); \
413 wake_up(&(wc)->freelist_wait); \
414} while (0)
415
416#define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
417
418static void writecache_flush_all_metadata(struct dm_writecache *wc)
419{
420 if (!WC_MODE_PMEM(wc))
421 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
422}
423
424static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
425{
426 if (!WC_MODE_PMEM(wc))
427 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
428 wc->dirty_bitmap);
429}
430
431static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
432
433struct io_notify {
434 struct dm_writecache *wc;
435 struct completion c;
436 atomic_t count;
437};
438
439static void writecache_notify_io(unsigned long error, void *context)
440{
441 struct io_notify *endio = context;
442
443 if (unlikely(error != 0))
444 writecache_error(endio->wc, -EIO, "error writing metadata");
445 BUG_ON(atomic_read(&endio->count) <= 0);
446 if (atomic_dec_and_test(&endio->count))
447 complete(&endio->c);
448}
449
450static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
451{
452 wait_event(wc->bio_in_progress_wait[direction],
453 !atomic_read(&wc->bio_in_progress[direction]));
454}
455
456static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
457{
458 struct dm_io_region region;
459 struct dm_io_request req;
460 struct io_notify endio = {
461 wc,
462 COMPLETION_INITIALIZER_ONSTACK(endio.c),
463 ATOMIC_INIT(1),
464 };
465 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
466 unsigned i = 0;
467
468 while (1) {
469 unsigned j;
470 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
471 if (unlikely(i == bitmap_bits))
472 break;
473 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
474
475 region.bdev = wc->ssd_dev->bdev;
476 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
477 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
478
479 if (unlikely(region.sector >= wc->metadata_sectors))
480 break;
481 if (unlikely(region.sector + region.count > wc->metadata_sectors))
482 region.count = wc->metadata_sectors - region.sector;
483
484 region.sector += wc->start_sector;
485 atomic_inc(&endio.count);
486 req.bi_op = REQ_OP_WRITE;
487 req.bi_op_flags = REQ_SYNC;
488 req.mem.type = DM_IO_VMA;
489 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
490 req.client = wc->dm_io;
491 req.notify.fn = writecache_notify_io;
492 req.notify.context = &endio;
493
494 /* writing via async dm-io (implied by notify.fn above) won't return an error */
495 (void) dm_io(&req, 1, ®ion, NULL);
496 i = j;
497 }
498
499 writecache_notify_io(0, &endio);
500 wait_for_completion_io(&endio.c);
501
502 if (wait_for_ios)
503 writecache_wait_for_ios(wc, WRITE);
504
505 writecache_disk_flush(wc, wc->ssd_dev);
506
507 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
508}
509
510static void ssd_commit_superblock(struct dm_writecache *wc)
511{
512 int r;
513 struct dm_io_region region;
514 struct dm_io_request req;
515
516 region.bdev = wc->ssd_dev->bdev;
517 region.sector = 0;
518 region.count = PAGE_SIZE;
519
520 if (unlikely(region.sector + region.count > wc->metadata_sectors))
521 region.count = wc->metadata_sectors - region.sector;
522
523 region.sector += wc->start_sector;
524
525 req.bi_op = REQ_OP_WRITE;
526 req.bi_op_flags = REQ_SYNC | REQ_FUA;
527 req.mem.type = DM_IO_VMA;
528 req.mem.ptr.vma = (char *)wc->memory_map;
529 req.client = wc->dm_io;
530 req.notify.fn = NULL;
531 req.notify.context = NULL;
532
533 r = dm_io(&req, 1, ®ion, NULL);
534 if (unlikely(r))
535 writecache_error(wc, r, "error writing superblock");
536}
537
538static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
539{
540 if (WC_MODE_PMEM(wc))
541 pmem_wmb();
542 else
543 ssd_commit_flushed(wc, wait_for_ios);
544}
545
546static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
547{
548 int r;
549 struct dm_io_region region;
550 struct dm_io_request req;
551
552 region.bdev = dev->bdev;
553 region.sector = 0;
554 region.count = 0;
555 req.bi_op = REQ_OP_WRITE;
556 req.bi_op_flags = REQ_PREFLUSH;
557 req.mem.type = DM_IO_KMEM;
558 req.mem.ptr.addr = NULL;
559 req.client = wc->dm_io;
560 req.notify.fn = NULL;
561
562 r = dm_io(&req, 1, ®ion, NULL);
563 if (unlikely(r))
564 writecache_error(wc, r, "error flushing metadata: %d", r);
565}
566
567#define WFE_RETURN_FOLLOWING 1
568#define WFE_LOWEST_SEQ 2
569
570static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
571 uint64_t block, int flags)
572{
573 struct wc_entry *e;
574 struct rb_node *node = wc->tree.rb_node;
575
576 if (unlikely(!node))
577 return NULL;
578
579 while (1) {
580 e = container_of(node, struct wc_entry, rb_node);
581 if (read_original_sector(wc, e) == block)
582 break;
583
584 node = (read_original_sector(wc, e) >= block ?
585 e->rb_node.rb_left : e->rb_node.rb_right);
586 if (unlikely(!node)) {
587 if (!(flags & WFE_RETURN_FOLLOWING))
588 return NULL;
589 if (read_original_sector(wc, e) >= block) {
590 return e;
591 } else {
592 node = rb_next(&e->rb_node);
593 if (unlikely(!node))
594 return NULL;
595 e = container_of(node, struct wc_entry, rb_node);
596 return e;
597 }
598 }
599 }
600
601 while (1) {
602 struct wc_entry *e2;
603 if (flags & WFE_LOWEST_SEQ)
604 node = rb_prev(&e->rb_node);
605 else
606 node = rb_next(&e->rb_node);
607 if (unlikely(!node))
608 return e;
609 e2 = container_of(node, struct wc_entry, rb_node);
610 if (read_original_sector(wc, e2) != block)
611 return e;
612 e = e2;
613 }
614}
615
616static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
617{
618 struct wc_entry *e;
619 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
620
621 while (*node) {
622 e = container_of(*node, struct wc_entry, rb_node);
623 parent = &e->rb_node;
624 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
625 node = &parent->rb_left;
626 else
627 node = &parent->rb_right;
628 }
629 rb_link_node(&ins->rb_node, parent, node);
630 rb_insert_color(&ins->rb_node, &wc->tree);
631 list_add(&ins->lru, &wc->lru);
632 ins->age = jiffies;
633}
634
635static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
636{
637 list_del(&e->lru);
638 rb_erase(&e->rb_node, &wc->tree);
639}
640
641static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
642{
643 if (WC_MODE_SORT_FREELIST(wc)) {
644 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
645 if (unlikely(!*node))
646 wc->current_free = e;
647 while (*node) {
648 parent = *node;
649 if (&e->rb_node < *node)
650 node = &parent->rb_left;
651 else
652 node = &parent->rb_right;
653 }
654 rb_link_node(&e->rb_node, parent, node);
655 rb_insert_color(&e->rb_node, &wc->freetree);
656 } else {
657 list_add_tail(&e->lru, &wc->freelist);
658 }
659 wc->freelist_size++;
660}
661
662static inline void writecache_verify_watermark(struct dm_writecache *wc)
663{
664 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
665 queue_work(wc->writeback_wq, &wc->writeback_work);
666}
667
668static void writecache_max_age_timer(struct timer_list *t)
669{
670 struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
671
672 if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
673 queue_work(wc->writeback_wq, &wc->writeback_work);
674 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
675 }
676}
677
678static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
679{
680 struct wc_entry *e;
681
682 if (WC_MODE_SORT_FREELIST(wc)) {
683 struct rb_node *next;
684 if (unlikely(!wc->current_free))
685 return NULL;
686 e = wc->current_free;
687 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
688 return NULL;
689 next = rb_next(&e->rb_node);
690 rb_erase(&e->rb_node, &wc->freetree);
691 if (unlikely(!next))
692 next = rb_first(&wc->freetree);
693 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
694 } else {
695 if (unlikely(list_empty(&wc->freelist)))
696 return NULL;
697 e = container_of(wc->freelist.next, struct wc_entry, lru);
698 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
699 return NULL;
700 list_del(&e->lru);
701 }
702 wc->freelist_size--;
703
704 writecache_verify_watermark(wc);
705
706 return e;
707}
708
709static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
710{
711 writecache_unlink(wc, e);
712 writecache_add_to_freelist(wc, e);
713 clear_seq_count(wc, e);
714 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
715 if (unlikely(waitqueue_active(&wc->freelist_wait)))
716 wake_up(&wc->freelist_wait);
717}
718
719static void writecache_wait_on_freelist(struct dm_writecache *wc)
720{
721 DEFINE_WAIT(wait);
722
723 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
724 wc_unlock(wc);
725 io_schedule();
726 finish_wait(&wc->freelist_wait, &wait);
727 wc_lock(wc);
728}
729
730static void writecache_poison_lists(struct dm_writecache *wc)
731{
732 /*
733 * Catch incorrect access to these values while the device is suspended.
734 */
735 memset(&wc->tree, -1, sizeof wc->tree);
736 wc->lru.next = LIST_POISON1;
737 wc->lru.prev = LIST_POISON2;
738 wc->freelist.next = LIST_POISON1;
739 wc->freelist.prev = LIST_POISON2;
740}
741
742static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
743{
744 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
745 if (WC_MODE_PMEM(wc))
746 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
747}
748
749static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
750{
751 return read_seq_count(wc, e) < wc->seq_count;
752}
753
754static void writecache_flush(struct dm_writecache *wc)
755{
756 struct wc_entry *e, *e2;
757 bool need_flush_after_free;
758
759 wc->uncommitted_blocks = 0;
760 del_timer(&wc->autocommit_timer);
761
762 if (list_empty(&wc->lru))
763 return;
764
765 e = container_of(wc->lru.next, struct wc_entry, lru);
766 if (writecache_entry_is_committed(wc, e)) {
767 if (wc->overwrote_committed) {
768 writecache_wait_for_ios(wc, WRITE);
769 writecache_disk_flush(wc, wc->ssd_dev);
770 wc->overwrote_committed = false;
771 }
772 return;
773 }
774 while (1) {
775 writecache_flush_entry(wc, e);
776 if (unlikely(e->lru.next == &wc->lru))
777 break;
778 e2 = container_of(e->lru.next, struct wc_entry, lru);
779 if (writecache_entry_is_committed(wc, e2))
780 break;
781 e = e2;
782 cond_resched();
783 }
784 writecache_commit_flushed(wc, true);
785
786 wc->seq_count++;
787 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
788 if (WC_MODE_PMEM(wc))
789 writecache_commit_flushed(wc, false);
790 else
791 ssd_commit_superblock(wc);
792
793 wc->overwrote_committed = false;
794
795 need_flush_after_free = false;
796 while (1) {
797 /* Free another committed entry with lower seq-count */
798 struct rb_node *rb_node = rb_prev(&e->rb_node);
799
800 if (rb_node) {
801 e2 = container_of(rb_node, struct wc_entry, rb_node);
802 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
803 likely(!e2->write_in_progress)) {
804 writecache_free_entry(wc, e2);
805 need_flush_after_free = true;
806 }
807 }
808 if (unlikely(e->lru.prev == &wc->lru))
809 break;
810 e = container_of(e->lru.prev, struct wc_entry, lru);
811 cond_resched();
812 }
813
814 if (need_flush_after_free)
815 writecache_commit_flushed(wc, false);
816}
817
818static void writecache_flush_work(struct work_struct *work)
819{
820 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
821
822 wc_lock(wc);
823 writecache_flush(wc);
824 wc_unlock(wc);
825}
826
827static void writecache_autocommit_timer(struct timer_list *t)
828{
829 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
830 if (!writecache_has_error(wc))
831 queue_work(wc->writeback_wq, &wc->flush_work);
832}
833
834static void writecache_schedule_autocommit(struct dm_writecache *wc)
835{
836 if (!timer_pending(&wc->autocommit_timer))
837 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
838}
839
840static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
841{
842 struct wc_entry *e;
843 bool discarded_something = false;
844
845 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
846 if (unlikely(!e))
847 return;
848
849 while (read_original_sector(wc, e) < end) {
850 struct rb_node *node = rb_next(&e->rb_node);
851
852 if (likely(!e->write_in_progress)) {
853 if (!discarded_something) {
854 if (!WC_MODE_PMEM(wc)) {
855 writecache_wait_for_ios(wc, READ);
856 writecache_wait_for_ios(wc, WRITE);
857 }
858 discarded_something = true;
859 }
860 if (!writecache_entry_is_committed(wc, e))
861 wc->uncommitted_blocks--;
862 writecache_free_entry(wc, e);
863 }
864
865 if (unlikely(!node))
866 break;
867
868 e = container_of(node, struct wc_entry, rb_node);
869 }
870
871 if (discarded_something)
872 writecache_commit_flushed(wc, false);
873}
874
875static bool writecache_wait_for_writeback(struct dm_writecache *wc)
876{
877 if (wc->writeback_size) {
878 writecache_wait_on_freelist(wc);
879 return true;
880 }
881 return false;
882}
883
884static void writecache_suspend(struct dm_target *ti)
885{
886 struct dm_writecache *wc = ti->private;
887 bool flush_on_suspend;
888
889 del_timer_sync(&wc->autocommit_timer);
890 del_timer_sync(&wc->max_age_timer);
891
892 wc_lock(wc);
893 writecache_flush(wc);
894 flush_on_suspend = wc->flush_on_suspend;
895 if (flush_on_suspend) {
896 wc->flush_on_suspend = false;
897 wc->writeback_all++;
898 queue_work(wc->writeback_wq, &wc->writeback_work);
899 }
900 wc_unlock(wc);
901
902 drain_workqueue(wc->writeback_wq);
903
904 wc_lock(wc);
905 if (flush_on_suspend)
906 wc->writeback_all--;
907 while (writecache_wait_for_writeback(wc));
908
909 if (WC_MODE_PMEM(wc))
910 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
911
912 writecache_poison_lists(wc);
913
914 wc_unlock(wc);
915}
916
917static int writecache_alloc_entries(struct dm_writecache *wc)
918{
919 size_t b;
920
921 if (wc->entries)
922 return 0;
923 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
924 if (!wc->entries)
925 return -ENOMEM;
926 for (b = 0; b < wc->n_blocks; b++) {
927 struct wc_entry *e = &wc->entries[b];
928 e->index = b;
929 e->write_in_progress = false;
930 cond_resched();
931 }
932
933 return 0;
934}
935
936static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
937{
938 struct dm_io_region region;
939 struct dm_io_request req;
940
941 region.bdev = wc->ssd_dev->bdev;
942 region.sector = wc->start_sector;
943 region.count = n_sectors;
944 req.bi_op = REQ_OP_READ;
945 req.bi_op_flags = REQ_SYNC;
946 req.mem.type = DM_IO_VMA;
947 req.mem.ptr.vma = (char *)wc->memory_map;
948 req.client = wc->dm_io;
949 req.notify.fn = NULL;
950
951 return dm_io(&req, 1, ®ion, NULL);
952}
953
954static void writecache_resume(struct dm_target *ti)
955{
956 struct dm_writecache *wc = ti->private;
957 size_t b;
958 bool need_flush = false;
959 __le64 sb_seq_count;
960 int r;
961
962 wc_lock(wc);
963
964 if (WC_MODE_PMEM(wc)) {
965 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
966 } else {
967 r = writecache_read_metadata(wc, wc->metadata_sectors);
968 if (r) {
969 size_t sb_entries_offset;
970 writecache_error(wc, r, "unable to read metadata: %d", r);
971 sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
972 memset((char *)wc->memory_map + sb_entries_offset, -1,
973 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
974 }
975 }
976
977 wc->tree = RB_ROOT;
978 INIT_LIST_HEAD(&wc->lru);
979 if (WC_MODE_SORT_FREELIST(wc)) {
980 wc->freetree = RB_ROOT;
981 wc->current_free = NULL;
982 } else {
983 INIT_LIST_HEAD(&wc->freelist);
984 }
985 wc->freelist_size = 0;
986
987 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
988 if (r) {
989 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
990 sb_seq_count = cpu_to_le64(0);
991 }
992 wc->seq_count = le64_to_cpu(sb_seq_count);
993
994#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
995 for (b = 0; b < wc->n_blocks; b++) {
996 struct wc_entry *e = &wc->entries[b];
997 struct wc_memory_entry wme;
998 if (writecache_has_error(wc)) {
999 e->original_sector = -1;
1000 e->seq_count = -1;
1001 continue;
1002 }
1003 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
1004 if (r) {
1005 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1006 (unsigned long)b, r);
1007 e->original_sector = -1;
1008 e->seq_count = -1;
1009 } else {
1010 e->original_sector = le64_to_cpu(wme.original_sector);
1011 e->seq_count = le64_to_cpu(wme.seq_count);
1012 }
1013 cond_resched();
1014 }
1015#endif
1016 for (b = 0; b < wc->n_blocks; b++) {
1017 struct wc_entry *e = &wc->entries[b];
1018 if (!writecache_entry_is_committed(wc, e)) {
1019 if (read_seq_count(wc, e) != -1) {
1020erase_this:
1021 clear_seq_count(wc, e);
1022 need_flush = true;
1023 }
1024 writecache_add_to_freelist(wc, e);
1025 } else {
1026 struct wc_entry *old;
1027
1028 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1029 if (!old) {
1030 writecache_insert_entry(wc, e);
1031 } else {
1032 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1033 writecache_error(wc, -EINVAL,
1034 "two identical entries, position %llu, sector %llu, sequence %llu",
1035 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1036 (unsigned long long)read_seq_count(wc, e));
1037 }
1038 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1039 goto erase_this;
1040 } else {
1041 writecache_free_entry(wc, old);
1042 writecache_insert_entry(wc, e);
1043 need_flush = true;
1044 }
1045 }
1046 }
1047 cond_resched();
1048 }
1049
1050 if (need_flush) {
1051 writecache_flush_all_metadata(wc);
1052 writecache_commit_flushed(wc, false);
1053 }
1054
1055 writecache_verify_watermark(wc);
1056
1057 if (wc->max_age != MAX_AGE_UNSPECIFIED)
1058 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1059
1060 wc_unlock(wc);
1061}
1062
1063static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1064{
1065 if (argc != 1)
1066 return -EINVAL;
1067
1068 wc_lock(wc);
1069 if (dm_suspended(wc->ti)) {
1070 wc_unlock(wc);
1071 return -EBUSY;
1072 }
1073 if (writecache_has_error(wc)) {
1074 wc_unlock(wc);
1075 return -EIO;
1076 }
1077
1078 writecache_flush(wc);
1079 wc->writeback_all++;
1080 queue_work(wc->writeback_wq, &wc->writeback_work);
1081 wc_unlock(wc);
1082
1083 flush_workqueue(wc->writeback_wq);
1084
1085 wc_lock(wc);
1086 wc->writeback_all--;
1087 if (writecache_has_error(wc)) {
1088 wc_unlock(wc);
1089 return -EIO;
1090 }
1091 wc_unlock(wc);
1092
1093 return 0;
1094}
1095
1096static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1097{
1098 if (argc != 1)
1099 return -EINVAL;
1100
1101 wc_lock(wc);
1102 wc->flush_on_suspend = true;
1103 wc_unlock(wc);
1104
1105 return 0;
1106}
1107
1108static void activate_cleaner(struct dm_writecache *wc)
1109{
1110 wc->flush_on_suspend = true;
1111 wc->cleaner = true;
1112 wc->freelist_high_watermark = wc->n_blocks;
1113 wc->freelist_low_watermark = wc->n_blocks;
1114}
1115
1116static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1117{
1118 if (argc != 1)
1119 return -EINVAL;
1120
1121 wc_lock(wc);
1122 activate_cleaner(wc);
1123 if (!dm_suspended(wc->ti))
1124 writecache_verify_watermark(wc);
1125 wc_unlock(wc);
1126
1127 return 0;
1128}
1129
1130static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1131 char *result, unsigned maxlen)
1132{
1133 int r = -EINVAL;
1134 struct dm_writecache *wc = ti->private;
1135
1136 if (!strcasecmp(argv[0], "flush"))
1137 r = process_flush_mesg(argc, argv, wc);
1138 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1139 r = process_flush_on_suspend_mesg(argc, argv, wc);
1140 else if (!strcasecmp(argv[0], "cleaner"))
1141 r = process_cleaner_mesg(argc, argv, wc);
1142 else
1143 DMERR("unrecognised message received: %s", argv[0]);
1144
1145 return r;
1146}
1147
1148static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1149{
1150 /*
1151 * clflushopt performs better with block size 1024, 2048, 4096
1152 * non-temporal stores perform better with block size 512
1153 *
1154 * block size 512 1024 2048 4096
1155 * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
1156 * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
1157 *
1158 * We see that movnti performs better for 512-byte blocks, and
1159 * clflushopt performs better for 1024-byte and larger blocks. So, we
1160 * prefer clflushopt for sizes >= 768.
1161 *
1162 * NOTE: this happens to be the case now (with dm-writecache's single
1163 * threaded model) but re-evaluate this once memcpy_flushcache() is
1164 * enabled to use movdir64b which might invalidate this performance
1165 * advantage seen with cache-allocating-writes plus flushing.
1166 */
1167#ifdef CONFIG_X86
1168 if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1169 likely(boot_cpu_data.x86_clflush_size == 64) &&
1170 likely(size >= 768)) {
1171 do {
1172 memcpy((void *)dest, (void *)source, 64);
1173 clflushopt((void *)dest);
1174 dest += 64;
1175 source += 64;
1176 size -= 64;
1177 } while (size >= 64);
1178 return;
1179 }
1180#endif
1181 memcpy_flushcache(dest, source, size);
1182}
1183
1184static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1185{
1186 void *buf;
1187 unsigned long flags;
1188 unsigned size;
1189 int rw = bio_data_dir(bio);
1190 unsigned remaining_size = wc->block_size;
1191
1192 do {
1193 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1194 buf = bvec_kmap_irq(&bv, &flags);
1195 size = bv.bv_len;
1196 if (unlikely(size > remaining_size))
1197 size = remaining_size;
1198
1199 if (rw == READ) {
1200 int r;
1201 r = memcpy_mcsafe(buf, data, size);
1202 flush_dcache_page(bio_page(bio));
1203 if (unlikely(r)) {
1204 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1205 bio->bi_status = BLK_STS_IOERR;
1206 }
1207 } else {
1208 flush_dcache_page(bio_page(bio));
1209 memcpy_flushcache_optimized(data, buf, size);
1210 }
1211
1212 bvec_kunmap_irq(buf, &flags);
1213
1214 data = (char *)data + size;
1215 remaining_size -= size;
1216 bio_advance(bio, size);
1217 } while (unlikely(remaining_size));
1218}
1219
1220static int writecache_flush_thread(void *data)
1221{
1222 struct dm_writecache *wc = data;
1223
1224 while (1) {
1225 struct bio *bio;
1226
1227 wc_lock(wc);
1228 bio = bio_list_pop(&wc->flush_list);
1229 if (!bio) {
1230 set_current_state(TASK_INTERRUPTIBLE);
1231 wc_unlock(wc);
1232
1233 if (unlikely(kthread_should_stop())) {
1234 set_current_state(TASK_RUNNING);
1235 break;
1236 }
1237
1238 schedule();
1239 continue;
1240 }
1241
1242 if (bio_op(bio) == REQ_OP_DISCARD) {
1243 writecache_discard(wc, bio->bi_iter.bi_sector,
1244 bio_end_sector(bio));
1245 wc_unlock(wc);
1246 bio_set_dev(bio, wc->dev->bdev);
1247 submit_bio_noacct(bio);
1248 } else {
1249 writecache_flush(wc);
1250 wc_unlock(wc);
1251 if (writecache_has_error(wc))
1252 bio->bi_status = BLK_STS_IOERR;
1253 bio_endio(bio);
1254 }
1255 }
1256
1257 return 0;
1258}
1259
1260static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1261{
1262 if (bio_list_empty(&wc->flush_list))
1263 wake_up_process(wc->flush_thread);
1264 bio_list_add(&wc->flush_list, bio);
1265}
1266
1267static int writecache_map(struct dm_target *ti, struct bio *bio)
1268{
1269 struct wc_entry *e;
1270 struct dm_writecache *wc = ti->private;
1271
1272 bio->bi_private = NULL;
1273
1274 wc_lock(wc);
1275
1276 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1277 if (writecache_has_error(wc))
1278 goto unlock_error;
1279 if (WC_MODE_PMEM(wc)) {
1280 writecache_flush(wc);
1281 if (writecache_has_error(wc))
1282 goto unlock_error;
1283 goto unlock_submit;
1284 } else {
1285 writecache_offload_bio(wc, bio);
1286 goto unlock_return;
1287 }
1288 }
1289
1290 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1291
1292 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1293 (wc->block_size / 512 - 1)) != 0)) {
1294 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1295 (unsigned long long)bio->bi_iter.bi_sector,
1296 bio->bi_iter.bi_size, wc->block_size);
1297 goto unlock_error;
1298 }
1299
1300 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1301 if (writecache_has_error(wc))
1302 goto unlock_error;
1303 if (WC_MODE_PMEM(wc)) {
1304 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1305 goto unlock_remap_origin;
1306 } else {
1307 writecache_offload_bio(wc, bio);
1308 goto unlock_return;
1309 }
1310 }
1311
1312 if (bio_data_dir(bio) == READ) {
1313read_next_block:
1314 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1315 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1316 if (WC_MODE_PMEM(wc)) {
1317 bio_copy_block(wc, bio, memory_data(wc, e));
1318 if (bio->bi_iter.bi_size)
1319 goto read_next_block;
1320 goto unlock_submit;
1321 } else {
1322 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1323 bio_set_dev(bio, wc->ssd_dev->bdev);
1324 bio->bi_iter.bi_sector = cache_sector(wc, e);
1325 if (!writecache_entry_is_committed(wc, e))
1326 writecache_wait_for_ios(wc, WRITE);
1327 goto unlock_remap;
1328 }
1329 } else {
1330 if (e) {
1331 sector_t next_boundary =
1332 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1333 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1334 dm_accept_partial_bio(bio, next_boundary);
1335 }
1336 }
1337 goto unlock_remap_origin;
1338 }
1339 } else {
1340 do {
1341 bool found_entry = false;
1342 if (writecache_has_error(wc))
1343 goto unlock_error;
1344 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1345 if (e) {
1346 if (!writecache_entry_is_committed(wc, e))
1347 goto bio_copy;
1348 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1349 wc->overwrote_committed = true;
1350 goto bio_copy;
1351 }
1352 found_entry = true;
1353 } else {
1354 if (unlikely(wc->cleaner))
1355 goto direct_write;
1356 }
1357 e = writecache_pop_from_freelist(wc, (sector_t)-1);
1358 if (unlikely(!e)) {
1359 if (!found_entry) {
1360direct_write:
1361 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1362 if (e) {
1363 sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1364 BUG_ON(!next_boundary);
1365 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1366 dm_accept_partial_bio(bio, next_boundary);
1367 }
1368 }
1369 goto unlock_remap_origin;
1370 }
1371 writecache_wait_on_freelist(wc);
1372 continue;
1373 }
1374 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1375 writecache_insert_entry(wc, e);
1376 wc->uncommitted_blocks++;
1377bio_copy:
1378 if (WC_MODE_PMEM(wc)) {
1379 bio_copy_block(wc, bio, memory_data(wc, e));
1380 } else {
1381 unsigned bio_size = wc->block_size;
1382 sector_t start_cache_sec = cache_sector(wc, e);
1383 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1384
1385 while (bio_size < bio->bi_iter.bi_size) {
1386 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1387 if (!f)
1388 break;
1389 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1390 (bio_size >> SECTOR_SHIFT), wc->seq_count);
1391 writecache_insert_entry(wc, f);
1392 wc->uncommitted_blocks++;
1393 bio_size += wc->block_size;
1394 current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1395 }
1396
1397 bio_set_dev(bio, wc->ssd_dev->bdev);
1398 bio->bi_iter.bi_sector = start_cache_sec;
1399 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1400
1401 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1402 wc->uncommitted_blocks = 0;
1403 queue_work(wc->writeback_wq, &wc->flush_work);
1404 } else {
1405 writecache_schedule_autocommit(wc);
1406 }
1407 goto unlock_remap;
1408 }
1409 } while (bio->bi_iter.bi_size);
1410
1411 if (unlikely(bio->bi_opf & REQ_FUA ||
1412 wc->uncommitted_blocks >= wc->autocommit_blocks))
1413 writecache_flush(wc);
1414 else
1415 writecache_schedule_autocommit(wc);
1416 goto unlock_submit;
1417 }
1418
1419unlock_remap_origin:
1420 bio_set_dev(bio, wc->dev->bdev);
1421 wc_unlock(wc);
1422 return DM_MAPIO_REMAPPED;
1423
1424unlock_remap:
1425 /* make sure that writecache_end_io decrements bio_in_progress: */
1426 bio->bi_private = (void *)1;
1427 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1428 wc_unlock(wc);
1429 return DM_MAPIO_REMAPPED;
1430
1431unlock_submit:
1432 wc_unlock(wc);
1433 bio_endio(bio);
1434 return DM_MAPIO_SUBMITTED;
1435
1436unlock_return:
1437 wc_unlock(wc);
1438 return DM_MAPIO_SUBMITTED;
1439
1440unlock_error:
1441 wc_unlock(wc);
1442 bio_io_error(bio);
1443 return DM_MAPIO_SUBMITTED;
1444}
1445
1446static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1447{
1448 struct dm_writecache *wc = ti->private;
1449
1450 if (bio->bi_private != NULL) {
1451 int dir = bio_data_dir(bio);
1452 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1453 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1454 wake_up(&wc->bio_in_progress_wait[dir]);
1455 }
1456 return 0;
1457}
1458
1459static int writecache_iterate_devices(struct dm_target *ti,
1460 iterate_devices_callout_fn fn, void *data)
1461{
1462 struct dm_writecache *wc = ti->private;
1463
1464 return fn(ti, wc->dev, 0, ti->len, data);
1465}
1466
1467static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1468{
1469 struct dm_writecache *wc = ti->private;
1470
1471 if (limits->logical_block_size < wc->block_size)
1472 limits->logical_block_size = wc->block_size;
1473
1474 if (limits->physical_block_size < wc->block_size)
1475 limits->physical_block_size = wc->block_size;
1476
1477 if (limits->io_min < wc->block_size)
1478 limits->io_min = wc->block_size;
1479}
1480
1481
1482static void writecache_writeback_endio(struct bio *bio)
1483{
1484 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1485 struct dm_writecache *wc = wb->wc;
1486 unsigned long flags;
1487
1488 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1489 if (unlikely(list_empty(&wc->endio_list)))
1490 wake_up_process(wc->endio_thread);
1491 list_add_tail(&wb->endio_entry, &wc->endio_list);
1492 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1493}
1494
1495static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1496{
1497 struct copy_struct *c = ptr;
1498 struct dm_writecache *wc = c->wc;
1499
1500 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1501
1502 raw_spin_lock_irq(&wc->endio_list_lock);
1503 if (unlikely(list_empty(&wc->endio_list)))
1504 wake_up_process(wc->endio_thread);
1505 list_add_tail(&c->endio_entry, &wc->endio_list);
1506 raw_spin_unlock_irq(&wc->endio_list_lock);
1507}
1508
1509static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1510{
1511 unsigned i;
1512 struct writeback_struct *wb;
1513 struct wc_entry *e;
1514 unsigned long n_walked = 0;
1515
1516 do {
1517 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1518 list_del(&wb->endio_entry);
1519
1520 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1521 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1522 "write error %d", wb->bio.bi_status);
1523 i = 0;
1524 do {
1525 e = wb->wc_list[i];
1526 BUG_ON(!e->write_in_progress);
1527 e->write_in_progress = false;
1528 INIT_LIST_HEAD(&e->lru);
1529 if (!writecache_has_error(wc))
1530 writecache_free_entry(wc, e);
1531 BUG_ON(!wc->writeback_size);
1532 wc->writeback_size--;
1533 n_walked++;
1534 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1535 writecache_commit_flushed(wc, false);
1536 wc_unlock(wc);
1537 wc_lock(wc);
1538 n_walked = 0;
1539 }
1540 } while (++i < wb->wc_list_n);
1541
1542 if (wb->wc_list != wb->wc_list_inline)
1543 kfree(wb->wc_list);
1544 bio_put(&wb->bio);
1545 } while (!list_empty(list));
1546}
1547
1548static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1549{
1550 struct copy_struct *c;
1551 struct wc_entry *e;
1552
1553 do {
1554 c = list_entry(list->next, struct copy_struct, endio_entry);
1555 list_del(&c->endio_entry);
1556
1557 if (unlikely(c->error))
1558 writecache_error(wc, c->error, "copy error");
1559
1560 e = c->e;
1561 do {
1562 BUG_ON(!e->write_in_progress);
1563 e->write_in_progress = false;
1564 INIT_LIST_HEAD(&e->lru);
1565 if (!writecache_has_error(wc))
1566 writecache_free_entry(wc, e);
1567
1568 BUG_ON(!wc->writeback_size);
1569 wc->writeback_size--;
1570 e++;
1571 } while (--c->n_entries);
1572 mempool_free(c, &wc->copy_pool);
1573 } while (!list_empty(list));
1574}
1575
1576static int writecache_endio_thread(void *data)
1577{
1578 struct dm_writecache *wc = data;
1579
1580 while (1) {
1581 struct list_head list;
1582
1583 raw_spin_lock_irq(&wc->endio_list_lock);
1584 if (!list_empty(&wc->endio_list))
1585 goto pop_from_list;
1586 set_current_state(TASK_INTERRUPTIBLE);
1587 raw_spin_unlock_irq(&wc->endio_list_lock);
1588
1589 if (unlikely(kthread_should_stop())) {
1590 set_current_state(TASK_RUNNING);
1591 break;
1592 }
1593
1594 schedule();
1595
1596 continue;
1597
1598pop_from_list:
1599 list = wc->endio_list;
1600 list.next->prev = list.prev->next = &list;
1601 INIT_LIST_HEAD(&wc->endio_list);
1602 raw_spin_unlock_irq(&wc->endio_list_lock);
1603
1604 if (!WC_MODE_FUA(wc))
1605 writecache_disk_flush(wc, wc->dev);
1606
1607 wc_lock(wc);
1608
1609 if (WC_MODE_PMEM(wc)) {
1610 __writecache_endio_pmem(wc, &list);
1611 } else {
1612 __writecache_endio_ssd(wc, &list);
1613 writecache_wait_for_ios(wc, READ);
1614 }
1615
1616 writecache_commit_flushed(wc, false);
1617
1618 wc_unlock(wc);
1619 }
1620
1621 return 0;
1622}
1623
1624static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1625{
1626 struct dm_writecache *wc = wb->wc;
1627 unsigned block_size = wc->block_size;
1628 void *address = memory_data(wc, e);
1629
1630 persistent_memory_flush_cache(address, block_size);
1631 return bio_add_page(&wb->bio, persistent_memory_page(address),
1632 block_size, persistent_memory_page_offset(address)) != 0;
1633}
1634
1635struct writeback_list {
1636 struct list_head list;
1637 size_t size;
1638};
1639
1640static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1641{
1642 if (unlikely(wc->max_writeback_jobs)) {
1643 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1644 wc_lock(wc);
1645 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1646 writecache_wait_on_freelist(wc);
1647 wc_unlock(wc);
1648 }
1649 }
1650 cond_resched();
1651}
1652
1653static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1654{
1655 struct wc_entry *e, *f;
1656 struct bio *bio;
1657 struct writeback_struct *wb;
1658 unsigned max_pages;
1659
1660 while (wbl->size) {
1661 wbl->size--;
1662 e = container_of(wbl->list.prev, struct wc_entry, lru);
1663 list_del(&e->lru);
1664
1665 max_pages = e->wc_list_contiguous;
1666
1667 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1668 wb = container_of(bio, struct writeback_struct, bio);
1669 wb->wc = wc;
1670 bio->bi_end_io = writecache_writeback_endio;
1671 bio_set_dev(bio, wc->dev->bdev);
1672 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1673 if (max_pages <= WB_LIST_INLINE ||
1674 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1675 GFP_NOIO | __GFP_NORETRY |
1676 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1677 wb->wc_list = wb->wc_list_inline;
1678 max_pages = WB_LIST_INLINE;
1679 }
1680
1681 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1682
1683 wb->wc_list[0] = e;
1684 wb->wc_list_n = 1;
1685
1686 while (wbl->size && wb->wc_list_n < max_pages) {
1687 f = container_of(wbl->list.prev, struct wc_entry, lru);
1688 if (read_original_sector(wc, f) !=
1689 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1690 break;
1691 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1692 break;
1693 wbl->size--;
1694 list_del(&f->lru);
1695 wb->wc_list[wb->wc_list_n++] = f;
1696 e = f;
1697 }
1698 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1699 if (writecache_has_error(wc)) {
1700 bio->bi_status = BLK_STS_IOERR;
1701 bio_endio(bio);
1702 } else {
1703 submit_bio(bio);
1704 }
1705
1706 __writeback_throttle(wc, wbl);
1707 }
1708}
1709
1710static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1711{
1712 struct wc_entry *e, *f;
1713 struct dm_io_region from, to;
1714 struct copy_struct *c;
1715
1716 while (wbl->size) {
1717 unsigned n_sectors;
1718
1719 wbl->size--;
1720 e = container_of(wbl->list.prev, struct wc_entry, lru);
1721 list_del(&e->lru);
1722
1723 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1724
1725 from.bdev = wc->ssd_dev->bdev;
1726 from.sector = cache_sector(wc, e);
1727 from.count = n_sectors;
1728 to.bdev = wc->dev->bdev;
1729 to.sector = read_original_sector(wc, e);
1730 to.count = n_sectors;
1731
1732 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1733 c->wc = wc;
1734 c->e = e;
1735 c->n_entries = e->wc_list_contiguous;
1736
1737 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1738 wbl->size--;
1739 f = container_of(wbl->list.prev, struct wc_entry, lru);
1740 BUG_ON(f != e + 1);
1741 list_del(&f->lru);
1742 e = f;
1743 }
1744
1745 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1746
1747 __writeback_throttle(wc, wbl);
1748 }
1749}
1750
1751static void writecache_writeback(struct work_struct *work)
1752{
1753 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1754 struct blk_plug plug;
1755 struct wc_entry *f, *g, *e = NULL;
1756 struct rb_node *node, *next_node;
1757 struct list_head skipped;
1758 struct writeback_list wbl;
1759 unsigned long n_walked;
1760
1761 wc_lock(wc);
1762restart:
1763 if (writecache_has_error(wc)) {
1764 wc_unlock(wc);
1765 return;
1766 }
1767
1768 if (unlikely(wc->writeback_all)) {
1769 if (writecache_wait_for_writeback(wc))
1770 goto restart;
1771 }
1772
1773 if (wc->overwrote_committed) {
1774 writecache_wait_for_ios(wc, WRITE);
1775 }
1776
1777 n_walked = 0;
1778 INIT_LIST_HEAD(&skipped);
1779 INIT_LIST_HEAD(&wbl.list);
1780 wbl.size = 0;
1781 while (!list_empty(&wc->lru) &&
1782 (wc->writeback_all ||
1783 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1784 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1785 wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1786
1787 n_walked++;
1788 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1789 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1790 queue_work(wc->writeback_wq, &wc->writeback_work);
1791 break;
1792 }
1793
1794 if (unlikely(wc->writeback_all)) {
1795 if (unlikely(!e)) {
1796 writecache_flush(wc);
1797 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1798 } else
1799 e = g;
1800 } else
1801 e = container_of(wc->lru.prev, struct wc_entry, lru);
1802 BUG_ON(e->write_in_progress);
1803 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1804 writecache_flush(wc);
1805 }
1806 node = rb_prev(&e->rb_node);
1807 if (node) {
1808 f = container_of(node, struct wc_entry, rb_node);
1809 if (unlikely(read_original_sector(wc, f) ==
1810 read_original_sector(wc, e))) {
1811 BUG_ON(!f->write_in_progress);
1812 list_del(&e->lru);
1813 list_add(&e->lru, &skipped);
1814 cond_resched();
1815 continue;
1816 }
1817 }
1818 wc->writeback_size++;
1819 list_del(&e->lru);
1820 list_add(&e->lru, &wbl.list);
1821 wbl.size++;
1822 e->write_in_progress = true;
1823 e->wc_list_contiguous = 1;
1824
1825 f = e;
1826
1827 while (1) {
1828 next_node = rb_next(&f->rb_node);
1829 if (unlikely(!next_node))
1830 break;
1831 g = container_of(next_node, struct wc_entry, rb_node);
1832 if (unlikely(read_original_sector(wc, g) ==
1833 read_original_sector(wc, f))) {
1834 f = g;
1835 continue;
1836 }
1837 if (read_original_sector(wc, g) !=
1838 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1839 break;
1840 if (unlikely(g->write_in_progress))
1841 break;
1842 if (unlikely(!writecache_entry_is_committed(wc, g)))
1843 break;
1844
1845 if (!WC_MODE_PMEM(wc)) {
1846 if (g != f + 1)
1847 break;
1848 }
1849
1850 n_walked++;
1851 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1852 // break;
1853
1854 wc->writeback_size++;
1855 list_del(&g->lru);
1856 list_add(&g->lru, &wbl.list);
1857 wbl.size++;
1858 g->write_in_progress = true;
1859 g->wc_list_contiguous = BIO_MAX_PAGES;
1860 f = g;
1861 e->wc_list_contiguous++;
1862 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1863 if (unlikely(wc->writeback_all)) {
1864 next_node = rb_next(&f->rb_node);
1865 if (likely(next_node))
1866 g = container_of(next_node, struct wc_entry, rb_node);
1867 }
1868 break;
1869 }
1870 }
1871 cond_resched();
1872 }
1873
1874 if (!list_empty(&skipped)) {
1875 list_splice_tail(&skipped, &wc->lru);
1876 /*
1877 * If we didn't do any progress, we must wait until some
1878 * writeback finishes to avoid burning CPU in a loop
1879 */
1880 if (unlikely(!wbl.size))
1881 writecache_wait_for_writeback(wc);
1882 }
1883
1884 wc_unlock(wc);
1885
1886 blk_start_plug(&plug);
1887
1888 if (WC_MODE_PMEM(wc))
1889 __writecache_writeback_pmem(wc, &wbl);
1890 else
1891 __writecache_writeback_ssd(wc, &wbl);
1892
1893 blk_finish_plug(&plug);
1894
1895 if (unlikely(wc->writeback_all)) {
1896 wc_lock(wc);
1897 while (writecache_wait_for_writeback(wc));
1898 wc_unlock(wc);
1899 }
1900}
1901
1902static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1903 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1904{
1905 uint64_t n_blocks, offset;
1906 struct wc_entry e;
1907
1908 n_blocks = device_size;
1909 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1910
1911 while (1) {
1912 if (!n_blocks)
1913 return -ENOSPC;
1914 /* Verify the following entries[n_blocks] won't overflow */
1915 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1916 sizeof(struct wc_memory_entry)))
1917 return -EFBIG;
1918 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1919 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1920 if (offset + n_blocks * block_size <= device_size)
1921 break;
1922 n_blocks--;
1923 }
1924
1925 /* check if the bit field overflows */
1926 e.index = n_blocks;
1927 if (e.index != n_blocks)
1928 return -EFBIG;
1929
1930 if (n_blocks_p)
1931 *n_blocks_p = n_blocks;
1932 if (n_metadata_blocks_p)
1933 *n_metadata_blocks_p = offset >> __ffs(block_size);
1934 return 0;
1935}
1936
1937static int init_memory(struct dm_writecache *wc)
1938{
1939 size_t b;
1940 int r;
1941
1942 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1943 if (r)
1944 return r;
1945
1946 r = writecache_alloc_entries(wc);
1947 if (r)
1948 return r;
1949
1950 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1951 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1952 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1953 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1954 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1955 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1956
1957 for (b = 0; b < wc->n_blocks; b++) {
1958 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1959 cond_resched();
1960 }
1961
1962 writecache_flush_all_metadata(wc);
1963 writecache_commit_flushed(wc, false);
1964 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1965 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1966 writecache_commit_flushed(wc, false);
1967
1968 return 0;
1969}
1970
1971static void writecache_dtr(struct dm_target *ti)
1972{
1973 struct dm_writecache *wc = ti->private;
1974
1975 if (!wc)
1976 return;
1977
1978 if (wc->endio_thread)
1979 kthread_stop(wc->endio_thread);
1980
1981 if (wc->flush_thread)
1982 kthread_stop(wc->flush_thread);
1983
1984 bioset_exit(&wc->bio_set);
1985
1986 mempool_exit(&wc->copy_pool);
1987
1988 if (wc->writeback_wq)
1989 destroy_workqueue(wc->writeback_wq);
1990
1991 if (wc->dev)
1992 dm_put_device(ti, wc->dev);
1993
1994 if (wc->ssd_dev)
1995 dm_put_device(ti, wc->ssd_dev);
1996
1997 if (wc->entries)
1998 vfree(wc->entries);
1999
2000 if (wc->memory_map) {
2001 if (WC_MODE_PMEM(wc))
2002 persistent_memory_release(wc);
2003 else
2004 vfree(wc->memory_map);
2005 }
2006
2007 if (wc->dm_kcopyd)
2008 dm_kcopyd_client_destroy(wc->dm_kcopyd);
2009
2010 if (wc->dm_io)
2011 dm_io_client_destroy(wc->dm_io);
2012
2013 if (wc->dirty_bitmap)
2014 vfree(wc->dirty_bitmap);
2015
2016 kfree(wc);
2017}
2018
2019static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2020{
2021 struct dm_writecache *wc;
2022 struct dm_arg_set as;
2023 const char *string;
2024 unsigned opt_params;
2025 size_t offset, data_size;
2026 int i, r;
2027 char dummy;
2028 int high_wm_percent = HIGH_WATERMARK;
2029 int low_wm_percent = LOW_WATERMARK;
2030 uint64_t x;
2031 struct wc_memory_superblock s;
2032
2033 static struct dm_arg _args[] = {
2034 {0, 10, "Invalid number of feature args"},
2035 };
2036
2037 as.argc = argc;
2038 as.argv = argv;
2039
2040 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2041 if (!wc) {
2042 ti->error = "Cannot allocate writecache structure";
2043 r = -ENOMEM;
2044 goto bad;
2045 }
2046 ti->private = wc;
2047 wc->ti = ti;
2048
2049 mutex_init(&wc->lock);
2050 wc->max_age = MAX_AGE_UNSPECIFIED;
2051 writecache_poison_lists(wc);
2052 init_waitqueue_head(&wc->freelist_wait);
2053 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2054 timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2055
2056 for (i = 0; i < 2; i++) {
2057 atomic_set(&wc->bio_in_progress[i], 0);
2058 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2059 }
2060
2061 wc->dm_io = dm_io_client_create();
2062 if (IS_ERR(wc->dm_io)) {
2063 r = PTR_ERR(wc->dm_io);
2064 ti->error = "Unable to allocate dm-io client";
2065 wc->dm_io = NULL;
2066 goto bad;
2067 }
2068
2069 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2070 if (!wc->writeback_wq) {
2071 r = -ENOMEM;
2072 ti->error = "Could not allocate writeback workqueue";
2073 goto bad;
2074 }
2075 INIT_WORK(&wc->writeback_work, writecache_writeback);
2076 INIT_WORK(&wc->flush_work, writecache_flush_work);
2077
2078 raw_spin_lock_init(&wc->endio_list_lock);
2079 INIT_LIST_HEAD(&wc->endio_list);
2080 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2081 if (IS_ERR(wc->endio_thread)) {
2082 r = PTR_ERR(wc->endio_thread);
2083 wc->endio_thread = NULL;
2084 ti->error = "Couldn't spawn endio thread";
2085 goto bad;
2086 }
2087 wake_up_process(wc->endio_thread);
2088
2089 /*
2090 * Parse the mode (pmem or ssd)
2091 */
2092 string = dm_shift_arg(&as);
2093 if (!string)
2094 goto bad_arguments;
2095
2096 if (!strcasecmp(string, "s")) {
2097 wc->pmem_mode = false;
2098 } else if (!strcasecmp(string, "p")) {
2099#ifdef DM_WRITECACHE_HAS_PMEM
2100 wc->pmem_mode = true;
2101 wc->writeback_fua = true;
2102#else
2103 /*
2104 * If the architecture doesn't support persistent memory or
2105 * the kernel doesn't support any DAX drivers, this driver can
2106 * only be used in SSD-only mode.
2107 */
2108 r = -EOPNOTSUPP;
2109 ti->error = "Persistent memory or DAX not supported on this system";
2110 goto bad;
2111#endif
2112 } else {
2113 goto bad_arguments;
2114 }
2115
2116 if (WC_MODE_PMEM(wc)) {
2117 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2118 offsetof(struct writeback_struct, bio),
2119 BIOSET_NEED_BVECS);
2120 if (r) {
2121 ti->error = "Could not allocate bio set";
2122 goto bad;
2123 }
2124 } else {
2125 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2126 if (r) {
2127 ti->error = "Could not allocate mempool";
2128 goto bad;
2129 }
2130 }
2131
2132 /*
2133 * Parse the origin data device
2134 */
2135 string = dm_shift_arg(&as);
2136 if (!string)
2137 goto bad_arguments;
2138 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2139 if (r) {
2140 ti->error = "Origin data device lookup failed";
2141 goto bad;
2142 }
2143
2144 /*
2145 * Parse cache data device (be it pmem or ssd)
2146 */
2147 string = dm_shift_arg(&as);
2148 if (!string)
2149 goto bad_arguments;
2150
2151 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2152 if (r) {
2153 ti->error = "Cache data device lookup failed";
2154 goto bad;
2155 }
2156 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2157
2158 /*
2159 * Parse the cache block size
2160 */
2161 string = dm_shift_arg(&as);
2162 if (!string)
2163 goto bad_arguments;
2164 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2165 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2166 (wc->block_size & (wc->block_size - 1))) {
2167 r = -EINVAL;
2168 ti->error = "Invalid block size";
2169 goto bad;
2170 }
2171 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2172 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2173 r = -EINVAL;
2174 ti->error = "Block size is smaller than device logical block size";
2175 goto bad;
2176 }
2177 wc->block_size_bits = __ffs(wc->block_size);
2178
2179 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2180 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2181 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2182
2183 /*
2184 * Parse optional arguments
2185 */
2186 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2187 if (r)
2188 goto bad;
2189
2190 while (opt_params) {
2191 string = dm_shift_arg(&as), opt_params--;
2192 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2193 unsigned long long start_sector;
2194 string = dm_shift_arg(&as), opt_params--;
2195 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2196 goto invalid_optional;
2197 wc->start_sector = start_sector;
2198 if (wc->start_sector != start_sector ||
2199 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2200 goto invalid_optional;
2201 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2202 string = dm_shift_arg(&as), opt_params--;
2203 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2204 goto invalid_optional;
2205 if (high_wm_percent < 0 || high_wm_percent > 100)
2206 goto invalid_optional;
2207 wc->high_wm_percent_set = true;
2208 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2209 string = dm_shift_arg(&as), opt_params--;
2210 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2211 goto invalid_optional;
2212 if (low_wm_percent < 0 || low_wm_percent > 100)
2213 goto invalid_optional;
2214 wc->low_wm_percent_set = true;
2215 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2216 string = dm_shift_arg(&as), opt_params--;
2217 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2218 goto invalid_optional;
2219 wc->max_writeback_jobs_set = true;
2220 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2221 string = dm_shift_arg(&as), opt_params--;
2222 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2223 goto invalid_optional;
2224 wc->autocommit_blocks_set = true;
2225 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2226 unsigned autocommit_msecs;
2227 string = dm_shift_arg(&as), opt_params--;
2228 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2229 goto invalid_optional;
2230 if (autocommit_msecs > 3600000)
2231 goto invalid_optional;
2232 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2233 wc->autocommit_time_set = true;
2234 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2235 unsigned max_age_msecs;
2236 string = dm_shift_arg(&as), opt_params--;
2237 if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2238 goto invalid_optional;
2239 if (max_age_msecs > 86400000)
2240 goto invalid_optional;
2241 wc->max_age = msecs_to_jiffies(max_age_msecs);
2242 } else if (!strcasecmp(string, "cleaner")) {
2243 wc->cleaner = true;
2244 } else if (!strcasecmp(string, "fua")) {
2245 if (WC_MODE_PMEM(wc)) {
2246 wc->writeback_fua = true;
2247 wc->writeback_fua_set = true;
2248 } else goto invalid_optional;
2249 } else if (!strcasecmp(string, "nofua")) {
2250 if (WC_MODE_PMEM(wc)) {
2251 wc->writeback_fua = false;
2252 wc->writeback_fua_set = true;
2253 } else goto invalid_optional;
2254 } else {
2255invalid_optional:
2256 r = -EINVAL;
2257 ti->error = "Invalid optional argument";
2258 goto bad;
2259 }
2260 }
2261
2262 if (high_wm_percent < low_wm_percent) {
2263 r = -EINVAL;
2264 ti->error = "High watermark must be greater than or equal to low watermark";
2265 goto bad;
2266 }
2267
2268 if (WC_MODE_PMEM(wc)) {
2269 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2270 r = -EOPNOTSUPP;
2271 ti->error = "Asynchronous persistent memory not supported as pmem cache";
2272 goto bad;
2273 }
2274
2275 r = persistent_memory_claim(wc);
2276 if (r) {
2277 ti->error = "Unable to map persistent memory for cache";
2278 goto bad;
2279 }
2280 } else {
2281 size_t n_blocks, n_metadata_blocks;
2282 uint64_t n_bitmap_bits;
2283
2284 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2285
2286 bio_list_init(&wc->flush_list);
2287 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2288 if (IS_ERR(wc->flush_thread)) {
2289 r = PTR_ERR(wc->flush_thread);
2290 wc->flush_thread = NULL;
2291 ti->error = "Couldn't spawn flush thread";
2292 goto bad;
2293 }
2294 wake_up_process(wc->flush_thread);
2295
2296 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2297 &n_blocks, &n_metadata_blocks);
2298 if (r) {
2299 ti->error = "Invalid device size";
2300 goto bad;
2301 }
2302
2303 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2304 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2305 /* this is limitation of test_bit functions */
2306 if (n_bitmap_bits > 1U << 31) {
2307 r = -EFBIG;
2308 ti->error = "Invalid device size";
2309 goto bad;
2310 }
2311
2312 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2313 if (!wc->memory_map) {
2314 r = -ENOMEM;
2315 ti->error = "Unable to allocate memory for metadata";
2316 goto bad;
2317 }
2318
2319 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2320 if (IS_ERR(wc->dm_kcopyd)) {
2321 r = PTR_ERR(wc->dm_kcopyd);
2322 ti->error = "Unable to allocate dm-kcopyd client";
2323 wc->dm_kcopyd = NULL;
2324 goto bad;
2325 }
2326
2327 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2328 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2329 BITS_PER_LONG * sizeof(unsigned long);
2330 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2331 if (!wc->dirty_bitmap) {
2332 r = -ENOMEM;
2333 ti->error = "Unable to allocate dirty bitmap";
2334 goto bad;
2335 }
2336
2337 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2338 if (r) {
2339 ti->error = "Unable to read first block of metadata";
2340 goto bad;
2341 }
2342 }
2343
2344 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2345 if (r) {
2346 ti->error = "Hardware memory error when reading superblock";
2347 goto bad;
2348 }
2349 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2350 r = init_memory(wc);
2351 if (r) {
2352 ti->error = "Unable to initialize device";
2353 goto bad;
2354 }
2355 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2356 if (r) {
2357 ti->error = "Hardware memory error when reading superblock";
2358 goto bad;
2359 }
2360 }
2361
2362 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2363 ti->error = "Invalid magic in the superblock";
2364 r = -EINVAL;
2365 goto bad;
2366 }
2367
2368 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2369 ti->error = "Invalid version in the superblock";
2370 r = -EINVAL;
2371 goto bad;
2372 }
2373
2374 if (le32_to_cpu(s.block_size) != wc->block_size) {
2375 ti->error = "Block size does not match superblock";
2376 r = -EINVAL;
2377 goto bad;
2378 }
2379
2380 wc->n_blocks = le64_to_cpu(s.n_blocks);
2381
2382 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2383 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2384overflow:
2385 ti->error = "Overflow in size calculation";
2386 r = -EINVAL;
2387 goto bad;
2388 }
2389 offset += sizeof(struct wc_memory_superblock);
2390 if (offset < sizeof(struct wc_memory_superblock))
2391 goto overflow;
2392 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2393 data_size = wc->n_blocks * (size_t)wc->block_size;
2394 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2395 (offset + data_size < offset))
2396 goto overflow;
2397 if (offset + data_size > wc->memory_map_size) {
2398 ti->error = "Memory area is too small";
2399 r = -EINVAL;
2400 goto bad;
2401 }
2402
2403 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2404 wc->block_start = (char *)sb(wc) + offset;
2405
2406 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2407 x += 50;
2408 do_div(x, 100);
2409 wc->freelist_high_watermark = x;
2410 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2411 x += 50;
2412 do_div(x, 100);
2413 wc->freelist_low_watermark = x;
2414
2415 if (wc->cleaner)
2416 activate_cleaner(wc);
2417
2418 r = writecache_alloc_entries(wc);
2419 if (r) {
2420 ti->error = "Cannot allocate memory";
2421 goto bad;
2422 }
2423
2424 ti->num_flush_bios = 1;
2425 ti->flush_supported = true;
2426 ti->num_discard_bios = 1;
2427
2428 if (WC_MODE_PMEM(wc))
2429 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2430
2431 return 0;
2432
2433bad_arguments:
2434 r = -EINVAL;
2435 ti->error = "Bad arguments";
2436bad:
2437 writecache_dtr(ti);
2438 return r;
2439}
2440
2441static void writecache_status(struct dm_target *ti, status_type_t type,
2442 unsigned status_flags, char *result, unsigned maxlen)
2443{
2444 struct dm_writecache *wc = ti->private;
2445 unsigned extra_args;
2446 unsigned sz = 0;
2447 uint64_t x;
2448
2449 switch (type) {
2450 case STATUSTYPE_INFO:
2451 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2452 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2453 (unsigned long long)wc->writeback_size);
2454 break;
2455 case STATUSTYPE_TABLE:
2456 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2457 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2458 extra_args = 0;
2459 if (wc->start_sector)
2460 extra_args += 2;
2461 if (wc->high_wm_percent_set && !wc->cleaner)
2462 extra_args += 2;
2463 if (wc->low_wm_percent_set && !wc->cleaner)
2464 extra_args += 2;
2465 if (wc->max_writeback_jobs_set)
2466 extra_args += 2;
2467 if (wc->autocommit_blocks_set)
2468 extra_args += 2;
2469 if (wc->autocommit_time_set)
2470 extra_args += 2;
2471 if (wc->cleaner)
2472 extra_args++;
2473 if (wc->writeback_fua_set)
2474 extra_args++;
2475
2476 DMEMIT("%u", extra_args);
2477 if (wc->start_sector)
2478 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2479 if (wc->high_wm_percent_set && !wc->cleaner) {
2480 x = (uint64_t)wc->freelist_high_watermark * 100;
2481 x += wc->n_blocks / 2;
2482 do_div(x, (size_t)wc->n_blocks);
2483 DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2484 }
2485 if (wc->low_wm_percent_set && !wc->cleaner) {
2486 x = (uint64_t)wc->freelist_low_watermark * 100;
2487 x += wc->n_blocks / 2;
2488 do_div(x, (size_t)wc->n_blocks);
2489 DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2490 }
2491 if (wc->max_writeback_jobs_set)
2492 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2493 if (wc->autocommit_blocks_set)
2494 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2495 if (wc->autocommit_time_set)
2496 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2497 if (wc->max_age != MAX_AGE_UNSPECIFIED)
2498 DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
2499 if (wc->cleaner)
2500 DMEMIT(" cleaner");
2501 if (wc->writeback_fua_set)
2502 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2503 break;
2504 }
2505}
2506
2507static struct target_type writecache_target = {
2508 .name = "writecache",
2509 .version = {1, 3, 0},
2510 .module = THIS_MODULE,
2511 .ctr = writecache_ctr,
2512 .dtr = writecache_dtr,
2513 .status = writecache_status,
2514 .postsuspend = writecache_suspend,
2515 .resume = writecache_resume,
2516 .message = writecache_message,
2517 .map = writecache_map,
2518 .end_io = writecache_end_io,
2519 .iterate_devices = writecache_iterate_devices,
2520 .io_hints = writecache_io_hints,
2521};
2522
2523static int __init dm_writecache_init(void)
2524{
2525 int r;
2526
2527 r = dm_register_target(&writecache_target);
2528 if (r < 0) {
2529 DMERR("register failed %d", r);
2530 return r;
2531 }
2532
2533 return 0;
2534}
2535
2536static void __exit dm_writecache_exit(void)
2537{
2538 dm_unregister_target(&writecache_target);
2539}
2540
2541module_init(dm_writecache_init);
2542module_exit(dm_writecache_exit);
2543
2544MODULE_DESCRIPTION(DM_NAME " writecache target");
2545MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2546MODULE_LICENSE("GPL");