Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3#include "cache_dev.h"
4#include "cache.h"
5#include "backing_dev.h"
6#include "dm_pcache.h"
7
8static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
9{
10 struct pcache_segment_info *seg_info_addr;
11 u32 seg_id = cache_seg->segment.seg_id;
12 void *seg_addr;
13
14 seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
15 seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
16
17 return seg_info_addr;
18}
19
20static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
21{
22 struct pcache_segment_info *seg_info_addr;
23 struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
24
25 mutex_lock(&cache_seg->info_lock);
26 seg_info->header.seq++;
27 seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
28
29 seg_info_addr = get_seg_info_addr(cache_seg);
30 memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
31 pmem_wmb();
32
33 cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
34 mutex_unlock(&cache_seg->info_lock);
35}
36
37static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
38{
39 struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
40 struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
41 struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
42 u32 seg_id = cache_seg->segment.seg_id;
43 int ret = 0;
44
45 cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
46
47 mutex_lock(&cache_seg->info_lock);
48 cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
49 sizeof(struct pcache_segment_info),
50 PCACHE_SEG_INFO_SIZE,
51 &cache_seg->cache_seg_info);
52 if (IS_ERR(cache_seg_info_addr)) {
53 ret = PTR_ERR(cache_seg_info_addr);
54 goto out;
55 } else if (!cache_seg_info_addr) {
56 ret = -EIO;
57 goto out;
58 }
59 cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
60out:
61 mutex_unlock(&cache_seg->info_lock);
62
63 if (ret)
64 pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
65 cache_seg->segment.seg_id, ret);
66 return ret;
67}
68
69static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
70{
71 struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
72 struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
73 int ret = 0;
74
75 cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
76 sizeof(struct pcache_cache_seg_gen),
77 sizeof(struct pcache_cache_seg_gen),
78 &cache_seg_gen);
79 if (IS_ERR(cache_seg_gen_addr)) {
80 ret = PTR_ERR(cache_seg_gen_addr);
81 goto out;
82 }
83
84 if (!cache_seg_gen_addr) {
85 cache_seg->gen = 0;
86 cache_seg->gen_seq = 0;
87 cache_seg->gen_index = 0;
88 goto out;
89 }
90
91 cache_seg->gen = cache_seg_gen.gen;
92 cache_seg->gen_seq = cache_seg_gen.header.seq;
93 cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
94out:
95
96 return ret;
97}
98
99static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
100{
101 struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
102
103 return (cache_seg_ctrl->gen + cache_seg->gen_index);
104}
105
106/*
107 * cache_seg_ctrl_write - write cache segment control information
108 * @seg: the cache segment to update
109 *
110 * This function writes the control information of a cache segment to media.
111 *
112 * Although this updates shared control data, we intentionally do not use
113 * any locking here. All accesses to control information are single-threaded:
114 *
115 * - All reads occur during the init phase, where no concurrent writes
116 * can happen.
117 * - Writes happen once during init and once when the last reference
118 * to the segment is dropped in cache_seg_put().
119 *
120 * Both cases are guaranteed to be single-threaded, so there is no risk
121 * of concurrent read/write races.
122 */
123static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
124{
125 struct pcache_cache_seg_gen cache_seg_gen;
126
127 cache_seg_gen.gen = cache_seg->gen;
128 cache_seg_gen.header.seq = ++cache_seg->gen_seq;
129 cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
130 sizeof(struct pcache_cache_seg_gen));
131
132 memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
133 pmem_wmb();
134
135 cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
136}
137
138static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
139{
140 cache_seg->gen = 0;
141 cache_seg->gen_seq = 0;
142 cache_seg->gen_index = 0;
143 cache_seg_ctrl_write(cache_seg);
144}
145
146static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
147{
148 int ret;
149
150 ret = cache_seg_info_load(cache_seg);
151 if (ret)
152 goto err;
153
154 ret = cache_seg_ctrl_load(cache_seg);
155 if (ret)
156 goto err;
157
158 return 0;
159err:
160 return ret;
161}
162
163/**
164 * cache_seg_set_next_seg - Sets the ID of the next segment
165 * @cache_seg: Pointer to the cache segment structure.
166 * @seg_id: The segment ID to set as the next segment.
167 *
168 * A pcache_cache allocates multiple cache segments, which are linked together
169 * through next_seg. When loading a pcache_cache, the first cache segment can
170 * be found using cache->seg_id, which allows access to all the cache segments.
171 */
172void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
173{
174 cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
175 cache_seg->cache_seg_info.next_seg = seg_id;
176 cache_seg_info_write(cache_seg);
177}
178
179int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
180 bool new_cache)
181{
182 struct pcache_cache_dev *cache_dev = cache->cache_dev;
183 struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
184 struct pcache_segment_init_options seg_options = { 0 };
185 struct pcache_segment *segment = &cache_seg->segment;
186 int ret;
187
188 cache_seg->cache = cache;
189 cache_seg->cache_seg_id = cache_seg_id;
190 spin_lock_init(&cache_seg->gen_lock);
191 atomic_set(&cache_seg->refs, 0);
192 mutex_init(&cache_seg->info_lock);
193
194 /* init pcache_segment */
195 seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
196 seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
197 seg_options.seg_id = seg_id;
198 seg_options.seg_info = &cache_seg->cache_seg_info;
199 pcache_segment_init(cache_dev, segment, &seg_options);
200
201 cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
202
203 if (new_cache) {
204 cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
205 PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
206 PCACHE_CACHE_SEG_CTRL_SIZE);
207
208 cache_seg_ctrl_init(cache_seg);
209
210 cache_seg->info_index = 0;
211 cache_seg_info_write(cache_seg);
212
213 /* clear outdated kset in segment */
214 memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
215 pmem_wmb();
216 } else {
217 ret = cache_seg_meta_load(cache_seg);
218 if (ret)
219 goto err;
220 }
221
222 return 0;
223err:
224 return ret;
225}
226
227/**
228 * get_cache_segment - Retrieves a free cache segment from the cache.
229 * @cache: Pointer to the cache structure.
230 *
231 * This function attempts to find a free cache segment that can be used.
232 * It locks the segment map and checks for the next available segment ID.
233 * If a free segment is found, it initializes it and returns a pointer to the
234 * cache segment structure. Returns NULL if no segments are available.
235 */
236struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
237{
238 struct pcache_cache_segment *cache_seg;
239 u32 seg_id;
240
241 spin_lock(&cache->seg_map_lock);
242again:
243 seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
244 if (seg_id == cache->n_segs) {
245 /* reset the hint of ->last_cache_seg and retry */
246 if (cache->last_cache_seg) {
247 cache->last_cache_seg = 0;
248 goto again;
249 }
250 cache->cache_full = true;
251 spin_unlock(&cache->seg_map_lock);
252 return NULL;
253 }
254
255 /*
256 * found an available cache_seg, mark it used in seg_map
257 * and update the search hint ->last_cache_seg
258 */
259 __set_bit(seg_id, cache->seg_map);
260 cache->last_cache_seg = seg_id;
261 spin_unlock(&cache->seg_map_lock);
262
263 cache_seg = &cache->segments[seg_id];
264 cache_seg->cache_seg_id = seg_id;
265
266 return cache_seg;
267}
268
269static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
270{
271 spin_lock(&cache_seg->gen_lock);
272 cache_seg->gen++;
273 spin_unlock(&cache_seg->gen_lock);
274
275 cache_seg_ctrl_write(cache_seg);
276}
277
278void cache_seg_get(struct pcache_cache_segment *cache_seg)
279{
280 atomic_inc(&cache_seg->refs);
281}
282
283static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
284{
285 struct pcache_cache *cache;
286
287 cache = cache_seg->cache;
288 cache_seg_gen_increase(cache_seg);
289
290 spin_lock(&cache->seg_map_lock);
291 if (cache->cache_full)
292 cache->cache_full = false;
293 __clear_bit(cache_seg->cache_seg_id, cache->seg_map);
294 spin_unlock(&cache->seg_map_lock);
295
296 pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
297 /* clean_work will clean the bad key in key_tree*/
298 queue_work(cache_get_wq(cache), &cache->clean_work);
299}
300
301void cache_seg_put(struct pcache_cache_segment *cache_seg)
302{
303 if (atomic_dec_and_test(&cache_seg->refs))
304 cache_seg_invalidate(cache_seg);
305}