Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/drivers/staging/erofs/data.c
4 *
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13#include "internal.h"
14#include <linux/prefetch.h>
15
16#include <trace/events/erofs.h>
17
18static inline void read_endio(struct bio *bio)
19{
20 struct super_block *const sb = bio->bi_private;
21 struct bio_vec *bvec;
22 blk_status_t err = bio->bi_status;
23 struct bvec_iter_all iter_all;
24
25 if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
26 erofs_show_injection_info(FAULT_READ_IO);
27 err = BLK_STS_IOERR;
28 }
29
30 bio_for_each_segment_all(bvec, bio, iter_all) {
31 struct page *page = bvec->bv_page;
32
33 /* page is already locked */
34 DBG_BUGON(PageUptodate(page));
35
36 if (unlikely(err))
37 SetPageError(page);
38 else
39 SetPageUptodate(page);
40
41 unlock_page(page);
42 /* page could be reclaimed now */
43 }
44 bio_put(bio);
45}
46
47/* prio -- true is used for dir */
48struct page *__erofs_get_meta_page(struct super_block *sb,
49 erofs_blk_t blkaddr, bool prio, bool nofail)
50{
51 struct inode *const bd_inode = sb->s_bdev->bd_inode;
52 struct address_space *const mapping = bd_inode->i_mapping;
53 /* prefer retrying in the allocator to blindly looping below */
54 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
55 (nofail ? __GFP_NOFAIL : 0);
56 unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
57 struct page *page;
58 int err;
59
60repeat:
61 page = find_or_create_page(mapping, blkaddr, gfp);
62 if (unlikely(!page)) {
63 DBG_BUGON(nofail);
64 return ERR_PTR(-ENOMEM);
65 }
66 DBG_BUGON(!PageLocked(page));
67
68 if (!PageUptodate(page)) {
69 struct bio *bio;
70
71 bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
72 if (IS_ERR(bio)) {
73 DBG_BUGON(nofail);
74 err = PTR_ERR(bio);
75 goto err_out;
76 }
77
78 err = bio_add_page(bio, page, PAGE_SIZE, 0);
79 if (unlikely(err != PAGE_SIZE)) {
80 err = -EFAULT;
81 goto err_out;
82 }
83
84 __submit_bio(bio, REQ_OP_READ,
85 REQ_META | (prio ? REQ_PRIO : 0));
86
87 lock_page(page);
88
89 /* this page has been truncated by others */
90 if (unlikely(page->mapping != mapping)) {
91unlock_repeat:
92 unlock_page(page);
93 put_page(page);
94 goto repeat;
95 }
96
97 /* more likely a read error */
98 if (unlikely(!PageUptodate(page))) {
99 if (io_retries) {
100 --io_retries;
101 goto unlock_repeat;
102 }
103 err = -EIO;
104 goto err_out;
105 }
106 }
107 return page;
108
109err_out:
110 unlock_page(page);
111 put_page(page);
112 return ERR_PTR(err);
113}
114
115static int erofs_map_blocks_flatmode(struct inode *inode,
116 struct erofs_map_blocks *map,
117 int flags)
118{
119 int err = 0;
120 erofs_blk_t nblocks, lastblk;
121 u64 offset = map->m_la;
122 struct erofs_vnode *vi = EROFS_V(inode);
123
124 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
125
126 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
127 lastblk = nblocks - is_inode_layout_inline(inode);
128
129 if (unlikely(offset >= inode->i_size)) {
130 /* leave out-of-bound access unmapped */
131 map->m_flags = 0;
132 map->m_plen = 0;
133 goto out;
134 }
135
136 /* there is no hole in flatmode */
137 map->m_flags = EROFS_MAP_MAPPED;
138
139 if (offset < blknr_to_addr(lastblk)) {
140 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
141 map->m_plen = blknr_to_addr(lastblk) - offset;
142 } else if (is_inode_layout_inline(inode)) {
143 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
144 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
145
146 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
147 vi->xattr_isize + erofs_blkoff(map->m_la);
148 map->m_plen = inode->i_size - offset;
149
150 /* inline data should locate in one meta block */
151 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
152 DBG_BUGON(1);
153 err = -EIO;
154 goto err_out;
155 }
156
157 map->m_flags |= EROFS_MAP_META;
158 } else {
159 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
160 vi->nid, inode->i_size, map->m_la);
161 DBG_BUGON(1);
162 err = -EIO;
163 goto err_out;
164 }
165
166out:
167 map->m_llen = map->m_plen;
168
169err_out:
170 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
171 return err;
172}
173
174int erofs_map_blocks(struct inode *inode,
175 struct erofs_map_blocks *map, int flags)
176{
177 if (unlikely(is_inode_layout_compression(inode))) {
178 int err = z_erofs_map_blocks_iter(inode, map, flags);
179
180 if (map->mpage) {
181 put_page(map->mpage);
182 map->mpage = NULL;
183 }
184 return err;
185 }
186 return erofs_map_blocks_flatmode(inode, map, flags);
187}
188
189static inline struct bio *erofs_read_raw_page(struct bio *bio,
190 struct address_space *mapping,
191 struct page *page,
192 erofs_off_t *last_block,
193 unsigned int nblocks,
194 bool ra)
195{
196 struct inode *const inode = mapping->host;
197 struct super_block *const sb = inode->i_sb;
198 erofs_off_t current_block = (erofs_off_t)page->index;
199 int err;
200
201 DBG_BUGON(!nblocks);
202
203 if (PageUptodate(page)) {
204 err = 0;
205 goto has_updated;
206 }
207
208 if (cleancache_get_page(page) == 0) {
209 err = 0;
210 SetPageUptodate(page);
211 goto has_updated;
212 }
213
214 /* note that for readpage case, bio also equals to NULL */
215 if (bio &&
216 /* not continuous */
217 *last_block + 1 != current_block) {
218submit_bio_retry:
219 __submit_bio(bio, REQ_OP_READ, 0);
220 bio = NULL;
221 }
222
223 if (!bio) {
224 struct erofs_map_blocks map = {
225 .m_la = blknr_to_addr(current_block),
226 };
227 erofs_blk_t blknr;
228 unsigned int blkoff;
229
230 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
231 if (unlikely(err))
232 goto err_out;
233
234 /* zero out the holed page */
235 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
236 zero_user_segment(page, 0, PAGE_SIZE);
237 SetPageUptodate(page);
238
239 /* imply err = 0, see erofs_map_blocks */
240 goto has_updated;
241 }
242
243 /* for RAW access mode, m_plen must be equal to m_llen */
244 DBG_BUGON(map.m_plen != map.m_llen);
245
246 blknr = erofs_blknr(map.m_pa);
247 blkoff = erofs_blkoff(map.m_pa);
248
249 /* deal with inline page */
250 if (map.m_flags & EROFS_MAP_META) {
251 void *vsrc, *vto;
252 struct page *ipage;
253
254 DBG_BUGON(map.m_plen > PAGE_SIZE);
255
256 ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
257
258 if (IS_ERR(ipage)) {
259 err = PTR_ERR(ipage);
260 goto err_out;
261 }
262
263 vsrc = kmap_atomic(ipage);
264 vto = kmap_atomic(page);
265 memcpy(vto, vsrc + blkoff, map.m_plen);
266 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
267 kunmap_atomic(vto);
268 kunmap_atomic(vsrc);
269 flush_dcache_page(page);
270
271 SetPageUptodate(page);
272 /* TODO: could we unlock the page earlier? */
273 unlock_page(ipage);
274 put_page(ipage);
275
276 /* imply err = 0, see erofs_map_blocks */
277 goto has_updated;
278 }
279
280 /* pa must be block-aligned for raw reading */
281 DBG_BUGON(erofs_blkoff(map.m_pa));
282
283 /* max # of continuous pages */
284 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
285 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
286 if (nblocks > BIO_MAX_PAGES)
287 nblocks = BIO_MAX_PAGES;
288
289 bio = erofs_grab_bio(sb, blknr, nblocks, sb,
290 read_endio, false);
291 if (IS_ERR(bio)) {
292 err = PTR_ERR(bio);
293 bio = NULL;
294 goto err_out;
295 }
296 }
297
298 err = bio_add_page(bio, page, PAGE_SIZE, 0);
299 /* out of the extent or bio is full */
300 if (err < PAGE_SIZE)
301 goto submit_bio_retry;
302
303 *last_block = current_block;
304
305 /* shift in advance in case of it followed by too many gaps */
306 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
307 /* err should reassign to 0 after submitting */
308 err = 0;
309 goto submit_bio_out;
310 }
311
312 return bio;
313
314err_out:
315 /* for sync reading, set page error immediately */
316 if (!ra) {
317 SetPageError(page);
318 ClearPageUptodate(page);
319 }
320has_updated:
321 unlock_page(page);
322
323 /* if updated manually, continuous pages has a gap */
324 if (bio)
325submit_bio_out:
326 __submit_bio(bio, REQ_OP_READ, 0);
327
328 return unlikely(err) ? ERR_PTR(err) : NULL;
329}
330
331/*
332 * since we dont have write or truncate flows, so no inode
333 * locking needs to be held at the moment.
334 */
335static int erofs_raw_access_readpage(struct file *file, struct page *page)
336{
337 erofs_off_t last_block;
338 struct bio *bio;
339
340 trace_erofs_readpage(page, true);
341
342 bio = erofs_read_raw_page(NULL, page->mapping,
343 page, &last_block, 1, false);
344
345 if (IS_ERR(bio))
346 return PTR_ERR(bio);
347
348 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
349 return 0;
350}
351
352static int erofs_raw_access_readpages(struct file *filp,
353 struct address_space *mapping,
354 struct list_head *pages,
355 unsigned int nr_pages)
356{
357 erofs_off_t last_block;
358 struct bio *bio = NULL;
359 gfp_t gfp = readahead_gfp_mask(mapping);
360 struct page *page = list_last_entry(pages, struct page, lru);
361
362 trace_erofs_readpages(mapping->host, page, nr_pages, true);
363
364 for (; nr_pages; --nr_pages) {
365 page = list_entry(pages->prev, struct page, lru);
366
367 prefetchw(&page->flags);
368 list_del(&page->lru);
369
370 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
371 bio = erofs_read_raw_page(bio, mapping, page,
372 &last_block, nr_pages, true);
373
374 /* all the page errors are ignored when readahead */
375 if (IS_ERR(bio)) {
376 pr_err("%s, readahead error at page %lu of nid %llu\n",
377 __func__, page->index,
378 EROFS_V(mapping->host)->nid);
379
380 bio = NULL;
381 }
382 }
383
384 /* pages could still be locked */
385 put_page(page);
386 }
387 DBG_BUGON(!list_empty(pages));
388
389 /* the rare case (end in gaps) */
390 if (unlikely(bio))
391 __submit_bio(bio, REQ_OP_READ, 0);
392 return 0;
393}
394
395/* for uncompressed (aligned) files and raw access for other files */
396const struct address_space_operations erofs_raw_access_aops = {
397 .readpage = erofs_raw_access_readpage,
398 .readpages = erofs_raw_access_readpages,
399};
400