Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include <linux/bio.h>
4
5#include "cache.h"
6#include "backing_dev.h"
7#include "cache_dev.h"
8#include "dm_pcache.h"
9
10static void writeback_ctx_end(struct pcache_cache *cache, int ret)
11{
12 if (ret && !cache->writeback_ctx.ret) {
13 pcache_dev_err(CACHE_TO_PCACHE(cache), "writeback error: %d", ret);
14 cache->writeback_ctx.ret = ret;
15 }
16
17 if (!atomic_dec_and_test(&cache->writeback_ctx.pending))
18 return;
19
20 if (!cache->writeback_ctx.ret) {
21 backing_dev_flush(cache->backing_dev);
22
23 mutex_lock(&cache->dirty_tail_lock);
24 cache_pos_advance(&cache->dirty_tail, cache->writeback_ctx.advance);
25 cache_encode_dirty_tail(cache);
26 mutex_unlock(&cache->dirty_tail_lock);
27 }
28 queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
29}
30
31static void writeback_end_req(struct pcache_backing_dev_req *backing_req, int ret)
32{
33 struct pcache_cache *cache = backing_req->priv_data;
34
35 mutex_lock(&cache->writeback_lock);
36 writeback_ctx_end(cache, ret);
37 mutex_unlock(&cache->writeback_lock);
38}
39
40static inline bool is_cache_clean(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail)
41{
42 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
43 struct pcache_cache_kset_onmedia *kset_onmedia;
44 u32 to_copy;
45 void *addr;
46 int ret;
47
48 addr = cache_pos_addr(dirty_tail);
49 kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
50
51 to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - dirty_tail->seg_off);
52 ret = copy_mc_to_kernel(kset_onmedia, addr, to_copy);
53 if (ret) {
54 pcache_dev_err(pcache, "error to read kset: %d", ret);
55 return true;
56 }
57
58 /* Check if the magic number matches the expected value */
59 if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
60 pcache_dev_debug(pcache, "dirty_tail: %u:%u magic: %llx, not expected: %llx\n",
61 dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
62 kset_onmedia->magic, PCACHE_KSET_MAGIC);
63 return true;
64 }
65
66 /* Verify the CRC checksum for data integrity */
67 if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
68 pcache_dev_debug(pcache, "dirty_tail: %u:%u crc: %x, not expected: %x\n",
69 dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
70 cache_kset_crc(kset_onmedia), kset_onmedia->crc);
71 return true;
72 }
73
74 return false;
75}
76
77void cache_writeback_exit(struct pcache_cache *cache)
78{
79 cancel_delayed_work_sync(&cache->writeback_work);
80 backing_dev_flush(cache->backing_dev);
81 cache_tree_exit(&cache->writeback_key_tree);
82}
83
84int cache_writeback_init(struct pcache_cache *cache)
85{
86 int ret;
87
88 ret = cache_tree_init(cache, &cache->writeback_key_tree, 1);
89 if (ret)
90 goto err;
91
92 atomic_set(&cache->writeback_ctx.pending, 0);
93
94 /* Queue delayed work to start writeback handling */
95 queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
96
97 return 0;
98err:
99 return ret;
100}
101
102static void cache_key_writeback(struct pcache_cache *cache, struct pcache_cache_key *key)
103{
104 struct pcache_backing_dev_req *writeback_req;
105 struct pcache_backing_dev_req_opts writeback_req_opts = { 0 };
106 struct pcache_cache_pos *pos;
107 void *addr;
108 u32 seg_remain, req_len, done = 0;
109
110 if (cache_key_clean(key))
111 return;
112
113 pos = &key->cache_pos;
114
115 seg_remain = cache_seg_remain(pos);
116 BUG_ON(seg_remain < key->len);
117next_req:
118 addr = cache_pos_addr(pos) + done;
119 req_len = backing_dev_req_coalesced_max_len(addr, key->len - done);
120
121 writeback_req_opts.type = BACKING_DEV_REQ_TYPE_KMEM;
122 writeback_req_opts.gfp_mask = GFP_NOIO;
123 writeback_req_opts.end_fn = writeback_end_req;
124 writeback_req_opts.priv_data = cache;
125
126 writeback_req_opts.kmem.data = addr;
127 writeback_req_opts.kmem.opf = REQ_OP_WRITE;
128 writeback_req_opts.kmem.len = req_len;
129 writeback_req_opts.kmem.backing_off = key->off + done;
130
131 writeback_req = backing_dev_req_create(cache->backing_dev, &writeback_req_opts);
132
133 atomic_inc(&cache->writeback_ctx.pending);
134 backing_dev_req_submit(writeback_req, true);
135
136 done += req_len;
137 if (done < key->len)
138 goto next_req;
139}
140
141static void cache_wb_tree_writeback(struct pcache_cache *cache, u32 advance)
142{
143 struct pcache_cache_tree *cache_tree = &cache->writeback_key_tree;
144 struct pcache_cache_subtree *cache_subtree;
145 struct rb_node *node;
146 struct pcache_cache_key *key;
147 u32 i;
148
149 cache->writeback_ctx.ret = 0;
150 cache->writeback_ctx.advance = advance;
151 atomic_set(&cache->writeback_ctx.pending, 1);
152
153 for (i = 0; i < cache_tree->n_subtrees; i++) {
154 cache_subtree = &cache_tree->subtrees[i];
155
156 node = rb_first(&cache_subtree->root);
157 while (node) {
158 key = CACHE_KEY(node);
159 node = rb_next(node);
160
161 cache_key_writeback(cache, key);
162 cache_key_delete(key);
163 }
164 }
165 writeback_ctx_end(cache, 0);
166}
167
168static int cache_kset_insert_tree(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
169{
170 struct pcache_cache_key_onmedia *key_onmedia;
171 struct pcache_cache_subtree *cache_subtree;
172 struct pcache_cache_key *key;
173 int ret;
174 u32 i;
175
176 /* Iterate through all keys in the kset and write each back to storage */
177 for (i = 0; i < kset_onmedia->key_num; i++) {
178 key_onmedia = &kset_onmedia->data[i];
179
180 key = cache_key_alloc(&cache->writeback_key_tree, GFP_NOIO);
181 ret = cache_key_decode(cache, key_onmedia, key);
182 if (ret) {
183 cache_key_put(key);
184 goto clear_tree;
185 }
186
187 cache_subtree = get_subtree(&cache->writeback_key_tree, key->off);
188 spin_lock(&cache_subtree->tree_lock);
189 cache_key_insert(&cache->writeback_key_tree, key, true);
190 spin_unlock(&cache_subtree->tree_lock);
191 }
192
193 return 0;
194clear_tree:
195 cache_tree_clear(&cache->writeback_key_tree);
196 return ret;
197}
198
199static void last_kset_writeback(struct pcache_cache *cache,
200 struct pcache_cache_kset_onmedia *last_kset_onmedia)
201{
202 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
203 struct pcache_cache_segment *next_seg;
204
205 pcache_dev_debug(pcache, "last kset, next: %u\n", last_kset_onmedia->next_cache_seg_id);
206
207 next_seg = &cache->segments[last_kset_onmedia->next_cache_seg_id];
208
209 mutex_lock(&cache->dirty_tail_lock);
210 cache->dirty_tail.cache_seg = next_seg;
211 cache->dirty_tail.seg_off = 0;
212 cache_encode_dirty_tail(cache);
213 mutex_unlock(&cache->dirty_tail_lock);
214}
215
216void cache_writeback_fn(struct work_struct *work)
217{
218 struct pcache_cache *cache = container_of(work, struct pcache_cache, writeback_work.work);
219 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
220 struct pcache_cache_pos dirty_tail;
221 struct pcache_cache_kset_onmedia *kset_onmedia;
222 u32 delay;
223 int ret;
224
225 mutex_lock(&cache->writeback_lock);
226 if (atomic_read(&cache->writeback_ctx.pending))
227 goto unlock;
228
229 if (pcache_is_stopping(pcache))
230 goto unlock;
231
232 kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
233
234 mutex_lock(&cache->dirty_tail_lock);
235 cache_pos_copy(&dirty_tail, &cache->dirty_tail);
236 mutex_unlock(&cache->dirty_tail_lock);
237
238 if (is_cache_clean(cache, &dirty_tail)) {
239 delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
240 goto queue_work;
241 }
242
243 if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
244 last_kset_writeback(cache, kset_onmedia);
245 delay = 0;
246 goto queue_work;
247 }
248
249 ret = cache_kset_insert_tree(cache, kset_onmedia);
250 if (ret) {
251 delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
252 goto queue_work;
253 }
254
255 cache_wb_tree_writeback(cache, get_kset_onmedia_size(kset_onmedia));
256 delay = 0;
257queue_work:
258 queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, delay);
259unlock:
260 mutex_unlock(&cache->writeback_lock);
261}