Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _PCACHE_CACHE_H
3#define _PCACHE_CACHE_H
4
5#include "segment.h"
6
7/* Garbage collection thresholds */
8#define PCACHE_CACHE_GC_PERCENT_MIN 0 /* Minimum GC percentage */
9#define PCACHE_CACHE_GC_PERCENT_MAX 90 /* Maximum GC percentage */
10#define PCACHE_CACHE_GC_PERCENT_DEFAULT 70 /* Default GC percentage */
11
12#define PCACHE_CACHE_SUBTREE_SIZE (4 * PCACHE_MB) /* 4MB total tree size */
13#define PCACHE_CACHE_SUBTREE_SIZE_MASK 0x3FFFFF /* Mask for tree size */
14#define PCACHE_CACHE_SUBTREE_SIZE_SHIFT 22 /* Bit shift for tree size */
15
16/* Maximum number of keys per key set */
17#define PCACHE_KSET_KEYS_MAX 128
18#define PCACHE_CACHE_SEGS_MAX (1024 * 1024) /* maximum cache size for each device is 16T */
19#define PCACHE_KSET_ONMEDIA_SIZE_MAX struct_size_t(struct pcache_cache_kset_onmedia, data, PCACHE_KSET_KEYS_MAX)
20#define PCACHE_KSET_SIZE (sizeof(struct pcache_cache_kset) + sizeof(struct pcache_cache_key_onmedia) * PCACHE_KSET_KEYS_MAX)
21
22/* Maximum number of keys to clean in one round of clean_work */
23#define PCACHE_CLEAN_KEYS_MAX 10
24
25/* Writeback and garbage collection intervals in jiffies */
26#define PCACHE_CACHE_WRITEBACK_INTERVAL (5 * HZ)
27#define PCACHE_CACHE_GC_INTERVAL (5 * HZ)
28
29/* Macro to get the cache key structure from an rb_node pointer */
30#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
31
32struct pcache_cache_pos_onmedia {
33 struct pcache_meta_header header;
34 __u32 cache_seg_id;
35 __u32 seg_off;
36};
37
38/* Offset and size definitions for cache segment control */
39#define PCACHE_CACHE_SEG_CTRL_OFF (PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX)
40#define PCACHE_CACHE_SEG_CTRL_SIZE (4 * PCACHE_KB)
41
42struct pcache_cache_seg_gen {
43 struct pcache_meta_header header;
44 __u64 gen;
45};
46
47/* Control structure for cache segments */
48struct pcache_cache_seg_ctrl {
49 struct pcache_cache_seg_gen gen[PCACHE_META_INDEX_MAX];
50 __u64 res[64];
51};
52
53#define PCACHE_CACHE_FLAGS_DATA_CRC BIT(0)
54#define PCACHE_CACHE_FLAGS_INIT_DONE BIT(1)
55
56#define PCACHE_CACHE_FLAGS_CACHE_MODE_MASK GENMASK(5, 2)
57#define PCACHE_CACHE_MODE_WRITEBACK 0
58#define PCACHE_CACHE_MODE_WRITETHROUGH 1
59#define PCACHE_CACHE_MODE_WRITEAROUND 2
60#define PCACHE_CACHE_MODE_WRITEONLY 3
61
62#define PCACHE_CACHE_FLAGS_GC_PERCENT_MASK GENMASK(12, 6)
63
64struct pcache_cache_info {
65 struct pcache_meta_header header;
66 __u32 seg_id;
67 __u32 n_segs;
68 __u32 flags;
69 __u32 reserved;
70};
71
72struct pcache_cache_pos {
73 struct pcache_cache_segment *cache_seg;
74 u32 seg_off;
75};
76
77struct pcache_cache_segment {
78 struct pcache_cache *cache;
79 u32 cache_seg_id; /* Index in cache->segments */
80 struct pcache_segment segment;
81 atomic_t refs;
82
83 struct pcache_segment_info cache_seg_info;
84 struct mutex info_lock;
85 u32 info_index;
86
87 spinlock_t gen_lock;
88 u64 gen;
89 u64 gen_seq;
90 u32 gen_index;
91
92 struct pcache_cache_seg_ctrl *cache_seg_ctrl;
93};
94
95/* rbtree for cache entries */
96struct pcache_cache_subtree {
97 struct rb_root root;
98 spinlock_t tree_lock;
99};
100
101struct pcache_cache_tree {
102 struct pcache_cache *cache;
103 u32 n_subtrees;
104 mempool_t key_pool;
105 struct pcache_cache_subtree *subtrees;
106};
107
108extern struct kmem_cache *key_cache;
109
110struct pcache_cache_key {
111 struct pcache_cache_tree *cache_tree;
112 struct pcache_cache_subtree *cache_subtree;
113 struct kref ref;
114 struct rb_node rb_node;
115 struct list_head list_node;
116 u64 off;
117 u32 len;
118 u32 flags;
119 struct pcache_cache_pos cache_pos;
120 u64 seg_gen;
121};
122
123#define PCACHE_CACHE_KEY_FLAGS_EMPTY BIT(0)
124#define PCACHE_CACHE_KEY_FLAGS_CLEAN BIT(1)
125
126struct pcache_cache_key_onmedia {
127 __u64 off;
128 __u32 len;
129 __u32 flags;
130 __u32 cache_seg_id;
131 __u32 cache_seg_off;
132 __u64 seg_gen;
133 __u32 data_crc;
134 __u32 reserved;
135};
136
137struct pcache_cache_kset_onmedia {
138 __u32 crc;
139 union {
140 __u32 key_num;
141 __u32 next_cache_seg_id;
142 };
143 __u64 magic;
144 __u64 flags;
145 struct pcache_cache_key_onmedia data[];
146};
147
148struct pcache_cache {
149 struct pcache_backing_dev *backing_dev;
150 struct pcache_cache_dev *cache_dev;
151 struct pcache_cache_ctrl *cache_ctrl;
152 u64 dev_size;
153
154 struct pcache_cache_data_head __percpu *data_heads;
155
156 spinlock_t key_head_lock;
157 struct pcache_cache_pos key_head;
158 u32 n_ksets;
159 struct pcache_cache_kset *ksets;
160
161 struct mutex key_tail_lock;
162 struct pcache_cache_pos key_tail;
163 u64 key_tail_seq;
164 u32 key_tail_index;
165
166 struct mutex dirty_tail_lock;
167 struct pcache_cache_pos dirty_tail;
168 u64 dirty_tail_seq;
169 u32 dirty_tail_index;
170
171 struct pcache_cache_tree req_key_tree;
172 struct work_struct clean_work;
173
174 struct mutex writeback_lock;
175 char wb_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
176 struct pcache_cache_tree writeback_key_tree;
177 struct delayed_work writeback_work;
178 struct {
179 atomic_t pending;
180 u32 advance;
181 int ret;
182 } writeback_ctx;
183
184 char gc_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
185 struct delayed_work gc_work;
186 atomic_t gc_errors;
187
188 struct mutex cache_info_lock;
189 struct pcache_cache_info cache_info;
190 struct pcache_cache_info *cache_info_addr;
191 u32 info_index;
192
193 u32 n_segs;
194 unsigned long *seg_map;
195 u32 last_cache_seg;
196 bool cache_full;
197 spinlock_t seg_map_lock;
198 struct pcache_cache_segment *segments;
199};
200
201struct workqueue_struct *cache_get_wq(struct pcache_cache *cache);
202
203struct dm_pcache;
204struct pcache_cache_options {
205 u32 cache_mode:4;
206 u32 data_crc:1;
207};
208int pcache_cache_start(struct dm_pcache *pcache);
209void pcache_cache_stop(struct dm_pcache *pcache);
210
211struct pcache_cache_ctrl {
212 /* Updated by gc_thread */
213 struct pcache_cache_pos_onmedia key_tail_pos[PCACHE_META_INDEX_MAX];
214
215 /* Updated by writeback_thread */
216 struct pcache_cache_pos_onmedia dirty_tail_pos[PCACHE_META_INDEX_MAX];
217};
218
219struct pcache_cache_data_head {
220 struct pcache_cache_pos head_pos;
221};
222
223static inline u16 pcache_cache_get_gc_percent(struct pcache_cache *cache)
224{
225 return FIELD_GET(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, cache->cache_info.flags);
226}
227
228int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent);
229
230/* cache key */
231struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask);
232void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key);
233void cache_key_get(struct pcache_cache_key *key);
234void cache_key_put(struct pcache_cache_key *key);
235int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close);
236void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup);
237int cache_key_decode(struct pcache_cache *cache,
238 struct pcache_cache_key_onmedia *key_onmedia,
239 struct pcache_cache_key *key);
240void cache_pos_advance(struct pcache_cache_pos *pos, u32 len);
241
242#define PCACHE_KSET_FLAGS_LAST BIT(0)
243#define PCACHE_KSET_MAGIC 0x676894a64e164f1aULL
244
245struct pcache_cache_kset {
246 struct pcache_cache *cache;
247 spinlock_t kset_lock;
248 struct delayed_work flush_work;
249 struct pcache_cache_kset_onmedia kset_onmedia;
250};
251
252extern struct pcache_cache_kset_onmedia pcache_empty_kset;
253
254#define SUBTREE_WALK_RET_OK 0
255#define SUBTREE_WALK_RET_ERR 1
256#define SUBTREE_WALK_RET_NEED_KEY 2
257#define SUBTREE_WALK_RET_NEED_REQ 3
258#define SUBTREE_WALK_RET_RESEARCH 4
259
260struct pcache_cache_subtree_walk_ctx {
261 struct pcache_cache_tree *cache_tree;
262 struct rb_node *start_node;
263 struct pcache_request *pcache_req;
264 struct pcache_cache_key *key;
265 u32 req_done;
266 int ret;
267
268 /* pre-allocated key and backing_dev_req */
269 struct pcache_cache_key *pre_alloc_key;
270 struct pcache_backing_dev_req *pre_alloc_req;
271
272 struct list_head *delete_key_list;
273 struct list_head *submit_req_list;
274
275 /*
276 * |--------| key_tmp
277 * |====| key
278 */
279 int (*before)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
280 struct pcache_cache_subtree_walk_ctx *ctx);
281
282 /*
283 * |----------| key_tmp
284 * |=====| key
285 */
286 int (*after)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
287 struct pcache_cache_subtree_walk_ctx *ctx);
288
289 /*
290 * |----------------| key_tmp
291 * |===========| key
292 */
293 int (*overlap_tail)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
294 struct pcache_cache_subtree_walk_ctx *ctx);
295
296 /*
297 * |--------| key_tmp
298 * |==========| key
299 */
300 int (*overlap_head)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
301 struct pcache_cache_subtree_walk_ctx *ctx);
302
303 /*
304 * |----| key_tmp
305 * |==========| key
306 */
307 int (*overlap_contain)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
308 struct pcache_cache_subtree_walk_ctx *ctx);
309
310 /*
311 * |-----------| key_tmp
312 * |====| key
313 */
314 int (*overlap_contained)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
315 struct pcache_cache_subtree_walk_ctx *ctx);
316
317 int (*walk_finally)(struct pcache_cache_subtree_walk_ctx *ctx, int ret);
318 bool (*walk_done)(struct pcache_cache_subtree_walk_ctx *ctx);
319};
320
321int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx);
322struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
323 struct rb_node **parentp, struct rb_node ***newp,
324 struct list_head *delete_key_list);
325int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset);
326void clean_fn(struct work_struct *work);
327void kset_flush_fn(struct work_struct *work);
328int cache_replay(struct pcache_cache *cache);
329int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees);
330void cache_tree_clear(struct pcache_cache_tree *cache_tree);
331void cache_tree_exit(struct pcache_cache_tree *cache_tree);
332
333/* cache segments */
334struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache);
335int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
336 bool new_cache);
337void cache_seg_get(struct pcache_cache_segment *cache_seg);
338void cache_seg_put(struct pcache_cache_segment *cache_seg);
339void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
340
341/* cache request*/
342int pcache_cache_flush(struct pcache_cache *cache);
343void miss_read_end_work_fn(struct work_struct *work);
344int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
345
346/* gc */
347void pcache_cache_gc_fn(struct work_struct *work);
348
349/* writeback */
350void cache_writeback_exit(struct pcache_cache *cache);
351int cache_writeback_init(struct pcache_cache *cache);
352void cache_writeback_fn(struct work_struct *work);
353
354/* inline functions */
355static inline struct pcache_cache_subtree *get_subtree(struct pcache_cache_tree *cache_tree, u64 off)
356{
357 if (cache_tree->n_subtrees == 1)
358 return &cache_tree->subtrees[0];
359
360 return &cache_tree->subtrees[off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT];
361}
362
363static inline void *cache_pos_addr(struct pcache_cache_pos *pos)
364{
365 return (pos->cache_seg->segment.data + pos->seg_off);
366}
367
368static inline void *get_key_head_addr(struct pcache_cache *cache)
369{
370 return cache_pos_addr(&cache->key_head);
371}
372
373static inline u32 get_kset_id(struct pcache_cache *cache, u64 off)
374{
375 u32 kset_id;
376
377 div_u64_rem(off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT, cache->n_ksets, &kset_id);
378
379 return kset_id;
380}
381
382static inline struct pcache_cache_kset *get_kset(struct pcache_cache *cache, u32 kset_id)
383{
384 return (void *)cache->ksets + PCACHE_KSET_SIZE * kset_id;
385}
386
387static inline struct pcache_cache_data_head *get_data_head(struct pcache_cache *cache)
388{
389 return this_cpu_ptr(cache->data_heads);
390}
391
392static inline bool cache_key_empty(struct pcache_cache_key *key)
393{
394 return key->flags & PCACHE_CACHE_KEY_FLAGS_EMPTY;
395}
396
397static inline bool cache_key_clean(struct pcache_cache_key *key)
398{
399 return key->flags & PCACHE_CACHE_KEY_FLAGS_CLEAN;
400}
401
402static inline void cache_pos_copy(struct pcache_cache_pos *dst, struct pcache_cache_pos *src)
403{
404 memcpy(dst, src, sizeof(struct pcache_cache_pos));
405}
406
407/**
408 * cache_seg_is_ctrl_seg - Checks if a cache segment is a cache ctrl segment.
409 * @cache_seg_id: ID of the cache segment.
410 *
411 * Returns true if the cache segment ID corresponds to a cache ctrl segment.
412 *
413 * Note: We extend the segment control of the first cache segment
414 * (cache segment ID 0) to serve as the cache control (pcache_cache_ctrl)
415 * for the entire PCACHE cache. This function determines whether the given
416 * cache segment is the one storing the pcache_cache_ctrl information.
417 */
418static inline bool cache_seg_is_ctrl_seg(u32 cache_seg_id)
419{
420 return (cache_seg_id == 0);
421}
422
423/**
424 * cache_key_cutfront - Cuts a specified length from the front of a cache key.
425 * @key: Pointer to pcache_cache_key structure.
426 * @cut_len: Length to cut from the front.
427 *
428 * Advances the cache key position by cut_len and adjusts offset and length accordingly.
429 */
430static inline void cache_key_cutfront(struct pcache_cache_key *key, u32 cut_len)
431{
432 if (key->cache_pos.cache_seg)
433 cache_pos_advance(&key->cache_pos, cut_len);
434
435 key->off += cut_len;
436 key->len -= cut_len;
437}
438
439/**
440 * cache_key_cutback - Cuts a specified length from the back of a cache key.
441 * @key: Pointer to pcache_cache_key structure.
442 * @cut_len: Length to cut from the back.
443 *
444 * Reduces the length of the cache key by cut_len.
445 */
446static inline void cache_key_cutback(struct pcache_cache_key *key, u32 cut_len)
447{
448 key->len -= cut_len;
449}
450
451static inline void cache_key_delete(struct pcache_cache_key *key)
452{
453 struct pcache_cache_subtree *cache_subtree;
454
455 cache_subtree = key->cache_subtree;
456 BUG_ON(!cache_subtree);
457
458 rb_erase(&key->rb_node, &cache_subtree->root);
459 key->flags = 0;
460 cache_key_put(key);
461}
462
463static inline bool cache_data_crc_on(struct pcache_cache *cache)
464{
465 return (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC);
466}
467
468static inline u32 cache_mode_get(struct pcache_cache *cache)
469{
470 return FIELD_GET(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache->cache_info.flags);
471}
472
473static inline void cache_mode_set(struct pcache_cache *cache, u32 cache_mode)
474{
475 cache->cache_info.flags &= ~PCACHE_CACHE_FLAGS_CACHE_MODE_MASK;
476 cache->cache_info.flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache_mode);
477}
478
479/**
480 * cache_key_data_crc - Calculates CRC for data in a cache key.
481 * @key: Pointer to the pcache_cache_key structure.
482 *
483 * Returns the CRC-32 checksum of the data within the cache key's position.
484 */
485static inline u32 cache_key_data_crc(struct pcache_cache_key *key)
486{
487 void *data;
488
489 data = cache_pos_addr(&key->cache_pos);
490
491 return crc32c(PCACHE_CRC_SEED, data, key->len);
492}
493
494static inline u32 cache_kset_crc(struct pcache_cache_kset_onmedia *kset_onmedia)
495{
496 u32 crc_size;
497
498 if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST)
499 crc_size = sizeof(struct pcache_cache_kset_onmedia) - 4;
500 else
501 crc_size = struct_size(kset_onmedia, data, kset_onmedia->key_num) - 4;
502
503 return crc32c(PCACHE_CRC_SEED, (void *)kset_onmedia + 4, crc_size);
504}
505
506static inline u32 get_kset_onmedia_size(struct pcache_cache_kset_onmedia *kset_onmedia)
507{
508 return struct_size_t(struct pcache_cache_kset_onmedia, data, kset_onmedia->key_num);
509}
510
511/**
512 * cache_seg_remain - Computes remaining space in a cache segment.
513 * @pos: Pointer to pcache_cache_pos structure.
514 *
515 * Returns the amount of remaining space in the segment data starting from
516 * the current position offset.
517 */
518static inline u32 cache_seg_remain(struct pcache_cache_pos *pos)
519{
520 struct pcache_cache_segment *cache_seg;
521 struct pcache_segment *segment;
522 u32 seg_remain;
523
524 cache_seg = pos->cache_seg;
525 segment = &cache_seg->segment;
526 seg_remain = segment->data_size - pos->seg_off;
527
528 return seg_remain;
529}
530
531/**
532 * cache_key_invalid - Checks if a cache key is invalid.
533 * @key: Pointer to pcache_cache_key structure.
534 *
535 * Returns true if the cache key is invalid due to its generation being
536 * less than the generation of its segment; otherwise returns false.
537 *
538 * When the GC (garbage collection) thread identifies a segment
539 * as reclaimable, it increments the segment's generation (gen). However,
540 * it does not immediately remove all related cache keys. When accessing
541 * such a cache key, this function can be used to determine if the cache
542 * key has already become invalid.
543 */
544static inline bool cache_key_invalid(struct pcache_cache_key *key)
545{
546 if (cache_key_empty(key))
547 return false;
548
549 return (key->seg_gen < key->cache_pos.cache_seg->gen);
550}
551
552/**
553 * cache_key_lstart - Retrieves the logical start offset of a cache key.
554 * @key: Pointer to pcache_cache_key structure.
555 *
556 * Returns the logical start offset for the cache key.
557 */
558static inline u64 cache_key_lstart(struct pcache_cache_key *key)
559{
560 return key->off;
561}
562
563/**
564 * cache_key_lend - Retrieves the logical end offset of a cache key.
565 * @key: Pointer to pcache_cache_key structure.
566 *
567 * Returns the logical end offset for the cache key.
568 */
569static inline u64 cache_key_lend(struct pcache_cache_key *key)
570{
571 return key->off + key->len;
572}
573
574static inline void cache_key_copy(struct pcache_cache_key *key_dst, struct pcache_cache_key *key_src)
575{
576 key_dst->off = key_src->off;
577 key_dst->len = key_src->len;
578 key_dst->seg_gen = key_src->seg_gen;
579 key_dst->cache_tree = key_src->cache_tree;
580 key_dst->cache_subtree = key_src->cache_subtree;
581 key_dst->flags = key_src->flags;
582
583 cache_pos_copy(&key_dst->cache_pos, &key_src->cache_pos);
584}
585
586/**
587 * cache_pos_onmedia_crc - Calculates the CRC for an on-media cache position.
588 * @pos_om: Pointer to pcache_cache_pos_onmedia structure.
589 *
590 * Calculates the CRC-32 checksum of the position, excluding the first 4 bytes.
591 * Returns the computed CRC value.
592 */
593static inline u32 cache_pos_onmedia_crc(struct pcache_cache_pos_onmedia *pos_om)
594{
595 return pcache_meta_crc(&pos_om->header, sizeof(struct pcache_cache_pos_onmedia));
596}
597
598void cache_pos_encode(struct pcache_cache *cache,
599 struct pcache_cache_pos_onmedia *pos_onmedia,
600 struct pcache_cache_pos *pos, u64 seq, u32 *index);
601int cache_pos_decode(struct pcache_cache *cache,
602 struct pcache_cache_pos_onmedia *pos_onmedia,
603 struct pcache_cache_pos *pos, u64 *seq, u32 *index);
604
605static inline void cache_encode_key_tail(struct pcache_cache *cache)
606{
607 cache_pos_encode(cache, cache->cache_ctrl->key_tail_pos,
608 &cache->key_tail, ++cache->key_tail_seq,
609 &cache->key_tail_index);
610}
611
612static inline int cache_decode_key_tail(struct pcache_cache *cache)
613{
614 return cache_pos_decode(cache, cache->cache_ctrl->key_tail_pos,
615 &cache->key_tail, &cache->key_tail_seq,
616 &cache->key_tail_index);
617}
618
619static inline void cache_encode_dirty_tail(struct pcache_cache *cache)
620{
621 cache_pos_encode(cache, cache->cache_ctrl->dirty_tail_pos,
622 &cache->dirty_tail, ++cache->dirty_tail_seq,
623 &cache->dirty_tail_index);
624}
625
626static inline int cache_decode_dirty_tail(struct pcache_cache *cache)
627{
628 return cache_pos_decode(cache, cache->cache_ctrl->dirty_tail_pos,
629 &cache->dirty_tail, &cache->dirty_tail_seq,
630 &cache->dirty_tail_index);
631}
632
633int pcache_cache_init(void);
634void pcache_cache_exit(void);
635#endif /* _PCACHE_CACHE_H */