Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_SUBPAGE_H
4#define BTRFS_SUBPAGE_H
5
6#include <linux/spinlock.h>
7#include <linux/atomic.h>
8#include <linux/sizes.h>
9#include "btrfs_inode.h"
10
11struct address_space;
12struct folio;
13
14/*
15 * Extra info for subpage bitmap.
16 *
17 * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into
18 * one larger bitmap.
19 *
20 * This structure records how they are organized in the bitmap:
21 *
22 * /- uptodate /- dirty /- ordered
23 * | | |
24 * v v v
25 * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o|
26 * |< sectors_per_page >|
27 *
28 * Unlike regular macro-like enums, here we do not go upper-case names, as
29 * these names will be utilized in various macros to define function names.
30 */
31enum {
32 btrfs_bitmap_nr_uptodate = 0,
33 btrfs_bitmap_nr_dirty,
34
35 /*
36 * This can be changed to atomic eventually. But this change will rely
37 * on the async delalloc range rework for locked bitmap. As async
38 * delalloc can unlock its range and mark blocks writeback at random
39 * timing.
40 */
41 btrfs_bitmap_nr_writeback,
42
43 /*
44 * The ordered and checked flags are for COW fixup, already marked
45 * deprecated, and will be removed eventually.
46 */
47 btrfs_bitmap_nr_ordered,
48 btrfs_bitmap_nr_checked,
49
50 /*
51 * The locked bit is for async delalloc range (compression), currently
52 * async extent is queued with the range locked, until the compression
53 * is done.
54 * So an async extent can unlock the range at any random timing.
55 *
56 * This will need a rework on the async extent lifespan (mark writeback
57 * and do compression) before deprecating this flag.
58 */
59 btrfs_bitmap_nr_locked,
60 btrfs_bitmap_nr_max
61};
62
63/*
64 * Structure to trace status of each sector inside a page, attached to
65 * page::private for both data and metadata inodes.
66 */
67struct btrfs_folio_state {
68 /* Common members for both data and metadata pages */
69 spinlock_t lock;
70 union {
71 /*
72 * Structures only used by metadata
73 *
74 * @eb_refs should only be operated under private_lock, as it
75 * manages whether the btrfs_folio_state can be detached.
76 */
77 atomic_t eb_refs;
78
79 /*
80 * Structures only used by data,
81 *
82 * How many sectors inside the page is locked.
83 */
84 atomic_t nr_locked;
85 };
86 unsigned long bitmaps[];
87};
88
89enum btrfs_folio_type {
90 BTRFS_SUBPAGE_METADATA,
91 BTRFS_SUBPAGE_DATA,
92};
93
94/*
95 * Subpage support for metadata is more complex, as we can have dummy extent
96 * buffers, where folios have no mapping to determine the owning inode.
97 *
98 * Thankfully we only need to check if node size is smaller than page size.
99 * Even with larger folio support, we will only allocate a folio as large as
100 * node size.
101 * Thus if nodesize < PAGE_SIZE, we know metadata needs need to subpage routine.
102 */
103static inline bool btrfs_meta_is_subpage(const struct btrfs_fs_info *fs_info)
104{
105 return fs_info->nodesize < PAGE_SIZE;
106}
107static inline bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info,
108 struct folio *folio)
109{
110 if (folio->mapping && folio->mapping->host)
111 ASSERT(is_data_inode(BTRFS_I(folio->mapping->host)));
112 return fs_info->sectorsize < folio_size(folio);
113}
114
115int btrfs_attach_folio_state(const struct btrfs_fs_info *fs_info,
116 struct folio *folio, enum btrfs_folio_type type);
117void btrfs_detach_folio_state(const struct btrfs_fs_info *fs_info, struct folio *folio,
118 enum btrfs_folio_type type);
119
120/* Allocate additional data where page represents more than one sector */
121struct btrfs_folio_state *btrfs_alloc_folio_state(const struct btrfs_fs_info *fs_info,
122 size_t fsize, enum btrfs_folio_type type);
123static inline void btrfs_free_folio_state(struct btrfs_folio_state *bfs)
124{
125 kfree(bfs);
126}
127
128void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
129void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
130
131void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
132 struct folio *folio, u64 start, u32 len);
133void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
134 struct folio *folio, u64 start, u32 len);
135void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
136 struct folio *folio, unsigned long bitmap);
137/*
138 * Template for subpage related operations.
139 *
140 * btrfs_subpage_*() are for call sites where the folio has subpage attached and
141 * the range is ensured to be inside the folio's single page.
142 *
143 * btrfs_folio_*() are for call sites where the page can either be subpage
144 * specific or regular folios. The function will handle both cases.
145 * But the range still needs to be inside one single page.
146 *
147 * btrfs_folio_clamp_*() are similar to btrfs_folio_*(), except the range doesn't
148 * need to be inside the page. Those functions will truncate the range
149 * automatically.
150 *
151 * Both btrfs_folio_*() and btrfs_folio_clamp_*() are for data folios.
152 *
153 * For metadata, one should use btrfs_meta_folio_*() helpers instead, and there
154 * is no clamp version for metadata helpers, as we either go subpage
155 * (nodesize < PAGE_SIZE) or go regular folio helpers (nodesize >= PAGE_SIZE,
156 * and our folio is never larger than nodesize).
157 */
158#define DECLARE_BTRFS_SUBPAGE_OPS(name) \
159void btrfs_subpage_set_##name(const struct btrfs_fs_info *fs_info, \
160 struct folio *folio, u64 start, u32 len); \
161void btrfs_subpage_clear_##name(const struct btrfs_fs_info *fs_info, \
162 struct folio *folio, u64 start, u32 len); \
163bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \
164 struct folio *folio, u64 start, u32 len); \
165void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \
166 struct folio *folio, u64 start, u32 len); \
167void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \
168 struct folio *folio, u64 start, u32 len); \
169bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \
170 struct folio *folio, u64 start, u32 len); \
171void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
172 struct folio *folio, u64 start, u32 len); \
173void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
174 struct folio *folio, u64 start, u32 len); \
175bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
176 struct folio *folio, u64 start, u32 len); \
177void btrfs_meta_folio_set_##name(struct folio *folio, const struct extent_buffer *eb); \
178void btrfs_meta_folio_clear_##name(struct folio *folio, const struct extent_buffer *eb); \
179bool btrfs_meta_folio_test_##name(struct folio *folio, const struct extent_buffer *eb);
180
181DECLARE_BTRFS_SUBPAGE_OPS(uptodate);
182DECLARE_BTRFS_SUBPAGE_OPS(dirty);
183DECLARE_BTRFS_SUBPAGE_OPS(writeback);
184DECLARE_BTRFS_SUBPAGE_OPS(ordered);
185DECLARE_BTRFS_SUBPAGE_OPS(checked);
186
187/*
188 * Helper for error cleanup, where a folio will have its dirty flag cleared,
189 * with writeback started and finished.
190 */
191static inline void btrfs_folio_clamp_finish_io(struct btrfs_fs_info *fs_info,
192 struct folio *locked_folio,
193 u64 start, u32 len)
194{
195 btrfs_folio_clamp_clear_dirty(fs_info, locked_folio, start, len);
196 btrfs_folio_clamp_set_writeback(fs_info, locked_folio, start, len);
197 btrfs_folio_clamp_clear_writeback(fs_info, locked_folio, start, len);
198}
199
200bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info,
201 struct folio *folio, u64 start, u32 len);
202
203void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
204 struct folio *folio, u64 start, u32 len);
205bool btrfs_meta_folio_clear_and_test_dirty(struct folio *folio, const struct extent_buffer *eb);
206void btrfs_get_subpage_dirty_bitmap(struct btrfs_fs_info *fs_info,
207 struct folio *folio,
208 unsigned long *ret_bitmap);
209void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info,
210 struct folio *folio, u64 start, u32 len);
211
212#endif