Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
6 */
7#include "compress.h"
8#include <linux/lz4.h>
9
10#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
11
12static int z_erofs_load_lz4_config(struct super_block *sb,
13 struct erofs_super_block *dsb, void *data, int size)
14{
15 struct erofs_sb_info *sbi = EROFS_SB(sb);
16 struct z_erofs_lz4_cfgs *lz4 = data;
17 u16 distance;
18
19 if (lz4) {
20 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
21 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
22 return -EINVAL;
23 }
24 distance = le16_to_cpu(lz4->max_distance);
25
26 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
27 if (!sbi->lz4.max_pclusterblks) {
28 sbi->lz4.max_pclusterblks = 1; /* reserved case */
29 } else if (sbi->lz4.max_pclusterblks >
30 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
31 erofs_err(sb, "too large lz4 pclusterblks %u",
32 sbi->lz4.max_pclusterblks);
33 return -EINVAL;
34 }
35 } else {
36 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
37 sbi->lz4.max_pclusterblks = 1;
38 }
39
40 sbi->lz4.max_distance_pages = distance ?
41 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
42 LZ4_MAX_DISTANCE_PAGES;
43 return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
44}
45
46/*
47 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
48 * all physical pages are consecutive, which can be seen for moderate CR.
49 */
50static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
51 struct page **pagepool)
52{
53 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
54 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
55 BITS_PER_LONG)] = { 0 };
56 unsigned int lz4_max_distance_pages =
57 EROFS_SB(rq->sb)->lz4.max_distance_pages;
58 void *kaddr = NULL;
59 unsigned int i, j, top;
60
61 top = 0;
62 for (i = j = 0; i < rq->outpages; ++i, ++j) {
63 struct page *const page = rq->out[i];
64 struct page *victim;
65
66 if (j >= lz4_max_distance_pages)
67 j = 0;
68
69 /* 'valid' bounced can only be tested after a complete round */
70 if (!rq->fillgaps && test_bit(j, bounced)) {
71 DBG_BUGON(i < lz4_max_distance_pages);
72 DBG_BUGON(top >= lz4_max_distance_pages);
73 availables[top++] = rq->out[i - lz4_max_distance_pages];
74 }
75
76 if (page) {
77 __clear_bit(j, bounced);
78 if (!PageHighMem(page)) {
79 if (!i) {
80 kaddr = page_address(page);
81 continue;
82 }
83 if (kaddr &&
84 kaddr + PAGE_SIZE == page_address(page)) {
85 kaddr += PAGE_SIZE;
86 continue;
87 }
88 }
89 kaddr = NULL;
90 continue;
91 }
92 kaddr = NULL;
93 __set_bit(j, bounced);
94
95 if (top) {
96 victim = availables[--top];
97 } else {
98 victim = __erofs_allocpage(pagepool, rq->gfp, true);
99 if (!victim)
100 return -ENOMEM;
101 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
102 }
103 rq->out[i] = victim;
104 }
105 return kaddr ? 1 : 0;
106}
107
108static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
109 void *inpage, void *out, unsigned int *inputmargin,
110 int *maptype, bool may_inplace)
111{
112 unsigned int oend, omargin, cnt, i;
113 struct page **in;
114 void *src;
115
116 /*
117 * If in-place I/O isn't used, for example, the bounce compressed cache
118 * can hold data for incomplete read requests. Just map the compressed
119 * buffer as well and decompress directly.
120 */
121 if (!rq->inplace_io) {
122 if (rq->inpages <= 1) {
123 *maptype = 0;
124 return inpage;
125 }
126 kunmap_local(inpage);
127 src = erofs_vm_map_ram(rq->in, rq->inpages);
128 if (!src)
129 return ERR_PTR(-ENOMEM);
130 *maptype = 1;
131 return src;
132 }
133 /*
134 * Then, deal with in-place I/Os. The reasons why in-place I/O is useful
135 * are: (1) It minimizes memory footprint during the I/O submission,
136 * which is useful for slow storage (including network devices and
137 * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
138 * decompression can also be applied, it will reuse the unique buffer so
139 * that no extra CPU D-cache is polluted with temporary compressed data
140 * for extreme performance.
141 */
142 oend = rq->pageofs_out + rq->outputsize;
143 omargin = PAGE_ALIGN(oend) - oend;
144 if (!rq->partial_decoding && may_inplace &&
145 omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
146 for (i = 0; i < rq->inpages; ++i)
147 if (rq->out[rq->outpages - rq->inpages + i] !=
148 rq->in[i])
149 break;
150 if (i >= rq->inpages) {
151 kunmap_local(inpage);
152 *maptype = 3;
153 return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
154 }
155 }
156 /*
157 * If in-place decompression can't be applied, copy compressed data that
158 * may potentially overlap during decompression to a per-CPU buffer.
159 */
160 src = z_erofs_get_gbuf(rq->inpages);
161 if (!src) {
162 DBG_BUGON(1);
163 kunmap_local(inpage);
164 return ERR_PTR(-EFAULT);
165 }
166
167 for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
168 cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
169 if (!inpage)
170 inpage = kmap_local_page(*in);
171 memcpy(src + i, inpage + *inputmargin, cnt);
172 kunmap_local(inpage);
173 inpage = NULL;
174 *inputmargin = 0;
175 }
176 *maptype = 2;
177 return src;
178}
179
180/*
181 * Get the exact on-disk size of the compressed data:
182 * - For LZ4, it should apply if the zero_padding feature is on (5.3+);
183 * - For others, zero_padding is enabled all the time.
184 */
185const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
186 const char *padbuf, unsigned int padbufsize)
187{
188 const char *padend;
189
190 padend = memchr_inv(padbuf, 0, padbufsize);
191 if (!padend)
192 return "compressed data start not found";
193 rq->inputsize -= padend - padbuf;
194 rq->pageofs_in += padend - padbuf;
195 return NULL;
196}
197
198static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
199{
200 bool support_0padding = false, may_inplace = false;
201 unsigned int inputmargin;
202 u8 *out, *headpage, *src;
203 const char *reason;
204 int ret, maptype;
205
206 DBG_BUGON(*rq->in == NULL);
207 headpage = kmap_local_page(*rq->in);
208
209 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
210 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
211 support_0padding = true;
212 reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
213 min_t(unsigned int, rq->inputsize,
214 rq->sb->s_blocksize - rq->pageofs_in));
215 if (reason) {
216 kunmap_local(headpage);
217 return IS_ERR(reason) ? PTR_ERR(reason) : -EFSCORRUPTED;
218 }
219 may_inplace = !((rq->pageofs_in + rq->inputsize) &
220 (rq->sb->s_blocksize - 1));
221 }
222
223 inputmargin = rq->pageofs_in;
224 src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
225 &maptype, may_inplace);
226 if (IS_ERR(src))
227 return PTR_ERR(src);
228
229 out = dst + rq->pageofs_out;
230 /* legacy format could compress extra data in a pcluster. */
231 if (rq->partial_decoding || !support_0padding)
232 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
233 rq->inputsize, rq->outputsize, rq->outputsize);
234 else
235 ret = LZ4_decompress_safe(src + inputmargin, out,
236 rq->inputsize, rq->outputsize);
237
238 if (ret != rq->outputsize) {
239 if (ret >= 0)
240 memset(out + ret, 0, rq->outputsize - ret);
241 ret = -EFSCORRUPTED;
242 } else {
243 ret = 0;
244 }
245
246 if (maptype == 0) {
247 kunmap_local(headpage);
248 } else if (maptype == 1) {
249 vm_unmap_ram(src, rq->inpages);
250 } else if (maptype == 2) {
251 z_erofs_put_gbuf(src);
252 } else if (maptype != 3) {
253 DBG_BUGON(1);
254 return -EFAULT;
255 }
256 return ret;
257}
258
259static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
260 struct page **pagepool)
261{
262 unsigned int dst_maptype;
263 void *dst;
264 int ret;
265
266 /* one optimized fast path only for non bigpcluster cases yet */
267 if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
268 DBG_BUGON(!*rq->out);
269 dst = kmap_local_page(*rq->out);
270 dst_maptype = 0;
271 } else {
272 /* general decoding path which can be used for all cases */
273 ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
274 if (ret < 0)
275 return ERR_PTR(ret);
276 if (ret > 0) {
277 dst = page_address(*rq->out);
278 dst_maptype = 1;
279 } else {
280 dst = erofs_vm_map_ram(rq->out, rq->outpages);
281 if (!dst)
282 return ERR_PTR(-ENOMEM);
283 dst_maptype = 2;
284 }
285 }
286 ret = z_erofs_lz4_decompress_mem(rq, dst);
287 if (!dst_maptype)
288 kunmap_local(dst);
289 else if (dst_maptype == 2)
290 vm_unmap_ram(dst, rq->outpages);
291 return ERR_PTR(ret);
292}
293
294static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
295 struct page **pagepool)
296{
297 const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
298 const unsigned int bs = rq->sb->s_blocksize;
299 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
300 u8 *kin;
301
302 if (rq->outputsize > rq->inputsize)
303 return ERR_PTR(-EOPNOTSUPP);
304 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
305 cur = bs - (rq->pageofs_out & (bs - 1));
306 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
307 cur = min(cur, rq->outputsize);
308 if (cur && rq->out[0]) {
309 kin = kmap_local_page(rq->in[nrpages_in - 1]);
310 if (rq->out[0] == rq->in[nrpages_in - 1])
311 memmove(kin + rq->pageofs_out, kin + pi, cur);
312 else
313 memcpy_to_page(rq->out[0], rq->pageofs_out,
314 kin + pi, cur);
315 kunmap_local(kin);
316 }
317 rq->outputsize -= cur;
318 }
319
320 for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
321 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
322 rq->outputsize -= insz;
323 if (!rq->in[ni])
324 continue;
325 kin = kmap_local_page(rq->in[ni]);
326 pi = 0;
327 do {
328 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
329 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
330 DBG_BUGON(no >= nrpages_out);
331 cnt = min(insz - pi, PAGE_SIZE - po);
332 if (rq->out[no] == rq->in[ni])
333 memmove(kin + po,
334 kin + rq->pageofs_in + pi, cnt);
335 else if (rq->out[no])
336 memcpy_to_page(rq->out[no], po,
337 kin + rq->pageofs_in + pi, cnt);
338 pi += cnt;
339 } while (pi < insz);
340 kunmap_local(kin);
341 }
342 DBG_BUGON(ni > nrpages_in);
343 return NULL;
344}
345
346const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
347 void **dst, void **src, struct page **pgpl)
348{
349 struct z_erofs_decompress_req *rq = dctx->rq;
350 struct page **pgo, *tmppage;
351 unsigned int j;
352
353 if (!dctx->avail_out) {
354 if (++dctx->no >= rq->outpages || !rq->outputsize)
355 return "insufficient space for decompressed data";
356
357 if (dctx->kout)
358 kunmap_local(dctx->kout);
359 dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
360 rq->outputsize -= dctx->avail_out;
361 pgo = &rq->out[dctx->no];
362 if (!*pgo && rq->fillgaps) { /* deduped */
363 *pgo = erofs_allocpage(pgpl, rq->gfp);
364 if (!*pgo) {
365 dctx->kout = NULL;
366 return ERR_PTR(-ENOMEM);
367 }
368 set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
369 }
370 if (*pgo) {
371 dctx->kout = kmap_local_page(*pgo);
372 *dst = dctx->kout + rq->pageofs_out;
373 } else {
374 *dst = dctx->kout = NULL;
375 }
376 rq->pageofs_out = 0;
377 }
378
379 if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
380 if (++dctx->ni >= rq->inpages)
381 return "invalid compressed data";
382 if (dctx->kout) /* unlike kmap(), take care of the orders */
383 kunmap_local(dctx->kout);
384 kunmap_local(dctx->kin);
385
386 dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
387 rq->inputsize -= dctx->inbuf_sz;
388 dctx->kin = kmap_local_page(rq->in[dctx->ni]);
389 *src = dctx->kin;
390 dctx->bounced = false;
391 if (dctx->kout) {
392 j = (u8 *)*dst - dctx->kout;
393 dctx->kout = kmap_local_page(rq->out[dctx->no]);
394 *dst = dctx->kout + j;
395 }
396 dctx->inbuf_pos = 0;
397 }
398
399 /*
400 * Handle overlapping: Use the given bounce buffer if the input data is
401 * under processing; Or utilize short-lived pages from the on-stack page
402 * pool, where pages are shared among the same request. Note that only
403 * a few inplace I/O pages need to be doubled.
404 */
405 if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
406 memcpy(dctx->bounce, *src, dctx->inbuf_sz);
407 *src = dctx->bounce;
408 dctx->bounced = true;
409 }
410
411 for (j = dctx->ni + 1; j < rq->inpages; ++j) {
412 if (rq->out[dctx->no] != rq->in[j])
413 continue;
414 tmppage = erofs_allocpage(pgpl, rq->gfp);
415 if (!tmppage)
416 return ERR_PTR(-ENOMEM);
417 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
418 copy_highpage(tmppage, rq->in[j]);
419 rq->in[j] = tmppage;
420 }
421 return NULL;
422}
423
424const struct z_erofs_decompressor *z_erofs_decomp[] = {
425 [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
426 .decompress = z_erofs_transform_plain,
427 .name = "shifted"
428 },
429 [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
430 .decompress = z_erofs_transform_plain,
431 .name = "interlaced"
432 },
433 [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
434 .config = z_erofs_load_lz4_config,
435 .decompress = z_erofs_lz4_decompress,
436 .init = z_erofs_gbuf_init,
437 .exit = z_erofs_gbuf_exit,
438 .name = "lz4"
439 },
440#ifdef CONFIG_EROFS_FS_ZIP_LZMA
441 [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
442#endif
443#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
444 [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
445#endif
446#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
447 [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
448#endif
449};
450
451int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
452{
453 struct erofs_sb_info *sbi = EROFS_SB(sb);
454 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
455 unsigned int algs, alg;
456 erofs_off_t offset;
457 int size, ret = 0;
458
459 if (!erofs_sb_has_compr_cfgs(sbi)) {
460 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
461 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
462 }
463
464 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
465 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
466 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
467 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
468 return -EOPNOTSUPP;
469 }
470
471 (void)erofs_init_metabuf(&buf, sb, false);
472 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
473 alg = 0;
474 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
475 const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
476 void *data;
477
478 if (!(algs & 1))
479 continue;
480
481 data = erofs_read_metadata(sb, &buf, &offset, &size);
482 if (IS_ERR(data)) {
483 ret = PTR_ERR(data);
484 break;
485 }
486
487 if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
488 ret = dec->config(sb, dsb, data, size);
489 } else {
490 erofs_err(sb, "algorithm %d isn't enabled on this kernel",
491 alg);
492 ret = -EOPNOTSUPP;
493 }
494 kfree(data);
495 if (ret)
496 break;
497 }
498 erofs_put_metabuf(&buf);
499 return ret;
500}
501
502int __init z_erofs_init_decompressor(void)
503{
504 int i, err;
505
506 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
507 err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
508 if (err) {
509 while (i--)
510 if (z_erofs_decomp[i])
511 z_erofs_decomp[i]->exit();
512 return err;
513 }
514 }
515 return 0;
516}
517
518void z_erofs_exit_decompressor(void)
519{
520 int i;
521
522 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
523 if (z_erofs_decomp[i])
524 z_erofs_decomp[i]->exit();
525}