Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
15#include <linux/backing-dev.h>
16#include <linux/writeback.h>
17#include <linux/slab.h>
18#include <linux/sched/mm.h>
19#include <linux/log2.h>
20#include <crypto/hash.h>
21#include "misc.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31
32static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
33
34const char* btrfs_compress_type2str(enum btrfs_compression_type type)
35{
36 switch (type) {
37 case BTRFS_COMPRESS_ZLIB:
38 case BTRFS_COMPRESS_LZO:
39 case BTRFS_COMPRESS_ZSTD:
40 case BTRFS_COMPRESS_NONE:
41 return btrfs_compress_types[type];
42 default:
43 break;
44 }
45
46 return NULL;
47}
48
49bool btrfs_compress_is_valid_type(const char *str, size_t len)
50{
51 int i;
52
53 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
54 size_t comp_len = strlen(btrfs_compress_types[i]);
55
56 if (len < comp_len)
57 continue;
58
59 if (!strncmp(btrfs_compress_types[i], str, comp_len))
60 return true;
61 }
62 return false;
63}
64
65static int compression_compress_pages(int type, struct list_head *ws,
66 struct address_space *mapping, u64 start, struct page **pages,
67 unsigned long *out_pages, unsigned long *total_in,
68 unsigned long *total_out)
69{
70 switch (type) {
71 case BTRFS_COMPRESS_ZLIB:
72 return zlib_compress_pages(ws, mapping, start, pages,
73 out_pages, total_in, total_out);
74 case BTRFS_COMPRESS_LZO:
75 return lzo_compress_pages(ws, mapping, start, pages,
76 out_pages, total_in, total_out);
77 case BTRFS_COMPRESS_ZSTD:
78 return zstd_compress_pages(ws, mapping, start, pages,
79 out_pages, total_in, total_out);
80 case BTRFS_COMPRESS_NONE:
81 default:
82 /*
83 * This can't happen, the type is validated several times
84 * before we get here. As a sane fallback, return what the
85 * callers will understand as 'no compression happened'.
86 */
87 return -E2BIG;
88 }
89}
90
91static int compression_decompress_bio(int type, struct list_head *ws,
92 struct compressed_bio *cb)
93{
94 switch (type) {
95 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
96 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
97 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
98 case BTRFS_COMPRESS_NONE:
99 default:
100 /*
101 * This can't happen, the type is validated several times
102 * before we get here.
103 */
104 BUG();
105 }
106}
107
108static int compression_decompress(int type, struct list_head *ws,
109 unsigned char *data_in, struct page *dest_page,
110 unsigned long start_byte, size_t srclen, size_t destlen)
111{
112 switch (type) {
113 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
114 start_byte, srclen, destlen);
115 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
116 start_byte, srclen, destlen);
117 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
118 start_byte, srclen, destlen);
119 case BTRFS_COMPRESS_NONE:
120 default:
121 /*
122 * This can't happen, the type is validated several times
123 * before we get here.
124 */
125 BUG();
126 }
127}
128
129static int btrfs_decompress_bio(struct compressed_bio *cb);
130
131static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
132 unsigned long disk_size)
133{
134 return sizeof(struct compressed_bio) +
135 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
136}
137
138static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
139 u64 disk_start)
140{
141 struct btrfs_fs_info *fs_info = inode->root->fs_info;
142 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
143 const u32 csum_size = fs_info->csum_size;
144 const u32 sectorsize = fs_info->sectorsize;
145 struct page *page;
146 unsigned long i;
147 char *kaddr;
148 u8 csum[BTRFS_CSUM_SIZE];
149 struct compressed_bio *cb = bio->bi_private;
150 u8 *cb_sum = cb->sums;
151
152 if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
153 return 0;
154
155 shash->tfm = fs_info->csum_shash;
156
157 for (i = 0; i < cb->nr_pages; i++) {
158 u32 pg_offset;
159 u32 bytes_left = PAGE_SIZE;
160 page = cb->compressed_pages[i];
161
162 /* Determine the remaining bytes inside the page first */
163 if (i == cb->nr_pages - 1)
164 bytes_left = cb->compressed_len - i * PAGE_SIZE;
165
166 /* Hash through the page sector by sector */
167 for (pg_offset = 0; pg_offset < bytes_left;
168 pg_offset += sectorsize) {
169 kaddr = kmap_atomic(page);
170 crypto_shash_digest(shash, kaddr + pg_offset,
171 sectorsize, csum);
172 kunmap_atomic(kaddr);
173
174 if (memcmp(&csum, cb_sum, csum_size) != 0) {
175 btrfs_print_data_csum_error(inode, disk_start,
176 csum, cb_sum, cb->mirror_num);
177 if (btrfs_io_bio(bio)->device)
178 btrfs_dev_stat_inc_and_print(
179 btrfs_io_bio(bio)->device,
180 BTRFS_DEV_STAT_CORRUPTION_ERRS);
181 return -EIO;
182 }
183 cb_sum += csum_size;
184 disk_start += sectorsize;
185 }
186 }
187 return 0;
188}
189
190/* when we finish reading compressed pages from the disk, we
191 * decompress them and then run the bio end_io routines on the
192 * decompressed pages (in the inode address space).
193 *
194 * This allows the checksumming and other IO error handling routines
195 * to work normally
196 *
197 * The compressed pages are freed here, and it must be run
198 * in process context
199 */
200static void end_compressed_bio_read(struct bio *bio)
201{
202 struct compressed_bio *cb = bio->bi_private;
203 struct inode *inode;
204 struct page *page;
205 unsigned long index;
206 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
207 int ret = 0;
208
209 if (bio->bi_status)
210 cb->errors = 1;
211
212 /* if there are more bios still pending for this compressed
213 * extent, just exit
214 */
215 if (!refcount_dec_and_test(&cb->pending_bios))
216 goto out;
217
218 /*
219 * Record the correct mirror_num in cb->orig_bio so that
220 * read-repair can work properly.
221 */
222 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
223 cb->mirror_num = mirror;
224
225 /*
226 * Some IO in this cb have failed, just skip checksum as there
227 * is no way it could be correct.
228 */
229 if (cb->errors == 1)
230 goto csum_failed;
231
232 inode = cb->inode;
233 ret = check_compressed_csum(BTRFS_I(inode), bio,
234 bio->bi_iter.bi_sector << 9);
235 if (ret)
236 goto csum_failed;
237
238 /* ok, we're the last bio for this extent, lets start
239 * the decompression.
240 */
241 ret = btrfs_decompress_bio(cb);
242
243csum_failed:
244 if (ret)
245 cb->errors = 1;
246
247 /* release the compressed pages */
248 index = 0;
249 for (index = 0; index < cb->nr_pages; index++) {
250 page = cb->compressed_pages[index];
251 page->mapping = NULL;
252 put_page(page);
253 }
254
255 /* do io completion on the original bio */
256 if (cb->errors) {
257 bio_io_error(cb->orig_bio);
258 } else {
259 struct bio_vec *bvec;
260 struct bvec_iter_all iter_all;
261
262 /*
263 * we have verified the checksum already, set page
264 * checked so the end_io handlers know about it
265 */
266 ASSERT(!bio_flagged(bio, BIO_CLONED));
267 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
268 SetPageChecked(bvec->bv_page);
269
270 bio_endio(cb->orig_bio);
271 }
272
273 /* finally free the cb struct */
274 kfree(cb->compressed_pages);
275 kfree(cb);
276out:
277 bio_put(bio);
278}
279
280/*
281 * Clear the writeback bits on all of the file
282 * pages for a compressed write
283 */
284static noinline void end_compressed_writeback(struct inode *inode,
285 const struct compressed_bio *cb)
286{
287 unsigned long index = cb->start >> PAGE_SHIFT;
288 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
289 struct page *pages[16];
290 unsigned long nr_pages = end_index - index + 1;
291 int i;
292 int ret;
293
294 if (cb->errors)
295 mapping_set_error(inode->i_mapping, -EIO);
296
297 while (nr_pages > 0) {
298 ret = find_get_pages_contig(inode->i_mapping, index,
299 min_t(unsigned long,
300 nr_pages, ARRAY_SIZE(pages)), pages);
301 if (ret == 0) {
302 nr_pages -= 1;
303 index += 1;
304 continue;
305 }
306 for (i = 0; i < ret; i++) {
307 if (cb->errors)
308 SetPageError(pages[i]);
309 end_page_writeback(pages[i]);
310 put_page(pages[i]);
311 }
312 nr_pages -= ret;
313 index += ret;
314 }
315 /* the inode may be gone now */
316}
317
318/*
319 * do the cleanup once all the compressed pages hit the disk.
320 * This will clear writeback on the file pages and free the compressed
321 * pages.
322 *
323 * This also calls the writeback end hooks for the file pages so that
324 * metadata and checksums can be updated in the file.
325 */
326static void end_compressed_bio_write(struct bio *bio)
327{
328 struct compressed_bio *cb = bio->bi_private;
329 struct inode *inode;
330 struct page *page;
331 unsigned long index;
332
333 if (bio->bi_status)
334 cb->errors = 1;
335
336 /* if there are more bios still pending for this compressed
337 * extent, just exit
338 */
339 if (!refcount_dec_and_test(&cb->pending_bios))
340 goto out;
341
342 /* ok, we're the last bio for this extent, step one is to
343 * call back into the FS and do all the end_io operations
344 */
345 inode = cb->inode;
346 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
347 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
348 cb->start, cb->start + cb->len - 1,
349 bio->bi_status == BLK_STS_OK);
350 cb->compressed_pages[0]->mapping = NULL;
351
352 end_compressed_writeback(inode, cb);
353 /* note, our inode could be gone now */
354
355 /*
356 * release the compressed pages, these came from alloc_page and
357 * are not attached to the inode at all
358 */
359 index = 0;
360 for (index = 0; index < cb->nr_pages; index++) {
361 page = cb->compressed_pages[index];
362 page->mapping = NULL;
363 put_page(page);
364 }
365
366 /* finally free the cb struct */
367 kfree(cb->compressed_pages);
368 kfree(cb);
369out:
370 bio_put(bio);
371}
372
373/*
374 * worker function to build and submit bios for previously compressed pages.
375 * The corresponding pages in the inode should be marked for writeback
376 * and the compressed pages should have a reference on them for dropping
377 * when the IO is complete.
378 *
379 * This also checksums the file bytes and gets things ready for
380 * the end io hooks.
381 */
382blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
383 unsigned long len, u64 disk_start,
384 unsigned long compressed_len,
385 struct page **compressed_pages,
386 unsigned long nr_pages,
387 unsigned int write_flags,
388 struct cgroup_subsys_state *blkcg_css)
389{
390 struct btrfs_fs_info *fs_info = inode->root->fs_info;
391 struct bio *bio = NULL;
392 struct compressed_bio *cb;
393 unsigned long bytes_left;
394 int pg_index = 0;
395 struct page *page;
396 u64 first_byte = disk_start;
397 blk_status_t ret;
398 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
399
400 WARN_ON(!PAGE_ALIGNED(start));
401 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
402 if (!cb)
403 return BLK_STS_RESOURCE;
404 refcount_set(&cb->pending_bios, 0);
405 cb->errors = 0;
406 cb->inode = &inode->vfs_inode;
407 cb->start = start;
408 cb->len = len;
409 cb->mirror_num = 0;
410 cb->compressed_pages = compressed_pages;
411 cb->compressed_len = compressed_len;
412 cb->orig_bio = NULL;
413 cb->nr_pages = nr_pages;
414
415 bio = btrfs_bio_alloc(first_byte);
416 bio->bi_opf = REQ_OP_WRITE | write_flags;
417 bio->bi_private = cb;
418 bio->bi_end_io = end_compressed_bio_write;
419
420 if (blkcg_css) {
421 bio->bi_opf |= REQ_CGROUP_PUNT;
422 kthread_associate_blkcg(blkcg_css);
423 }
424 refcount_set(&cb->pending_bios, 1);
425
426 /* create and submit bios for the compressed pages */
427 bytes_left = compressed_len;
428 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
429 int submit = 0;
430
431 page = compressed_pages[pg_index];
432 page->mapping = inode->vfs_inode.i_mapping;
433 if (bio->bi_iter.bi_size)
434 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
435 0);
436
437 page->mapping = NULL;
438 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
439 PAGE_SIZE) {
440 /*
441 * inc the count before we submit the bio so
442 * we know the end IO handler won't happen before
443 * we inc the count. Otherwise, the cb might get
444 * freed before we're done setting it up
445 */
446 refcount_inc(&cb->pending_bios);
447 ret = btrfs_bio_wq_end_io(fs_info, bio,
448 BTRFS_WQ_ENDIO_DATA);
449 BUG_ON(ret); /* -ENOMEM */
450
451 if (!skip_sum) {
452 ret = btrfs_csum_one_bio(inode, bio, start, 1);
453 BUG_ON(ret); /* -ENOMEM */
454 }
455
456 ret = btrfs_map_bio(fs_info, bio, 0);
457 if (ret) {
458 bio->bi_status = ret;
459 bio_endio(bio);
460 }
461
462 bio = btrfs_bio_alloc(first_byte);
463 bio->bi_opf = REQ_OP_WRITE | write_flags;
464 bio->bi_private = cb;
465 bio->bi_end_io = end_compressed_bio_write;
466 if (blkcg_css)
467 bio->bi_opf |= REQ_CGROUP_PUNT;
468 bio_add_page(bio, page, PAGE_SIZE, 0);
469 }
470 if (bytes_left < PAGE_SIZE) {
471 btrfs_info(fs_info,
472 "bytes left %lu compress len %lu nr %lu",
473 bytes_left, cb->compressed_len, cb->nr_pages);
474 }
475 bytes_left -= PAGE_SIZE;
476 first_byte += PAGE_SIZE;
477 cond_resched();
478 }
479
480 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
481 BUG_ON(ret); /* -ENOMEM */
482
483 if (!skip_sum) {
484 ret = btrfs_csum_one_bio(inode, bio, start, 1);
485 BUG_ON(ret); /* -ENOMEM */
486 }
487
488 ret = btrfs_map_bio(fs_info, bio, 0);
489 if (ret) {
490 bio->bi_status = ret;
491 bio_endio(bio);
492 }
493
494 if (blkcg_css)
495 kthread_associate_blkcg(NULL);
496
497 return 0;
498}
499
500static u64 bio_end_offset(struct bio *bio)
501{
502 struct bio_vec *last = bio_last_bvec_all(bio);
503
504 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
505}
506
507static noinline int add_ra_bio_pages(struct inode *inode,
508 u64 compressed_end,
509 struct compressed_bio *cb)
510{
511 unsigned long end_index;
512 unsigned long pg_index;
513 u64 last_offset;
514 u64 isize = i_size_read(inode);
515 int ret;
516 struct page *page;
517 unsigned long nr_pages = 0;
518 struct extent_map *em;
519 struct address_space *mapping = inode->i_mapping;
520 struct extent_map_tree *em_tree;
521 struct extent_io_tree *tree;
522 u64 end;
523 int misses = 0;
524
525 last_offset = bio_end_offset(cb->orig_bio);
526 em_tree = &BTRFS_I(inode)->extent_tree;
527 tree = &BTRFS_I(inode)->io_tree;
528
529 if (isize == 0)
530 return 0;
531
532 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
533
534 while (last_offset < compressed_end) {
535 pg_index = last_offset >> PAGE_SHIFT;
536
537 if (pg_index > end_index)
538 break;
539
540 page = xa_load(&mapping->i_pages, pg_index);
541 if (page && !xa_is_value(page)) {
542 misses++;
543 if (misses > 4)
544 break;
545 goto next;
546 }
547
548 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
549 ~__GFP_FS));
550 if (!page)
551 break;
552
553 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
554 put_page(page);
555 goto next;
556 }
557
558 /*
559 * at this point, we have a locked page in the page cache
560 * for these bytes in the file. But, we have to make
561 * sure they map to this compressed extent on disk.
562 */
563 ret = set_page_extent_mapped(page);
564 if (ret < 0) {
565 unlock_page(page);
566 put_page(page);
567 break;
568 }
569
570 end = last_offset + PAGE_SIZE - 1;
571 lock_extent(tree, last_offset, end);
572 read_lock(&em_tree->lock);
573 em = lookup_extent_mapping(em_tree, last_offset,
574 PAGE_SIZE);
575 read_unlock(&em_tree->lock);
576
577 if (!em || last_offset < em->start ||
578 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
579 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
580 free_extent_map(em);
581 unlock_extent(tree, last_offset, end);
582 unlock_page(page);
583 put_page(page);
584 break;
585 }
586 free_extent_map(em);
587
588 if (page->index == end_index) {
589 char *userpage;
590 size_t zero_offset = offset_in_page(isize);
591
592 if (zero_offset) {
593 int zeros;
594 zeros = PAGE_SIZE - zero_offset;
595 userpage = kmap_atomic(page);
596 memset(userpage + zero_offset, 0, zeros);
597 flush_dcache_page(page);
598 kunmap_atomic(userpage);
599 }
600 }
601
602 ret = bio_add_page(cb->orig_bio, page,
603 PAGE_SIZE, 0);
604
605 if (ret == PAGE_SIZE) {
606 nr_pages++;
607 put_page(page);
608 } else {
609 unlock_extent(tree, last_offset, end);
610 unlock_page(page);
611 put_page(page);
612 break;
613 }
614next:
615 last_offset += PAGE_SIZE;
616 }
617 return 0;
618}
619
620/*
621 * for a compressed read, the bio we get passed has all the inode pages
622 * in it. We don't actually do IO on those pages but allocate new ones
623 * to hold the compressed pages on disk.
624 *
625 * bio->bi_iter.bi_sector points to the compressed extent on disk
626 * bio->bi_io_vec points to all of the inode pages
627 *
628 * After the compressed pages are read, we copy the bytes into the
629 * bio we were passed and then call the bio end_io calls
630 */
631blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
632 int mirror_num, unsigned long bio_flags)
633{
634 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
635 struct extent_map_tree *em_tree;
636 struct compressed_bio *cb;
637 unsigned long compressed_len;
638 unsigned long nr_pages;
639 unsigned long pg_index;
640 struct page *page;
641 struct bio *comp_bio;
642 u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
643 u64 em_len;
644 u64 em_start;
645 struct extent_map *em;
646 blk_status_t ret = BLK_STS_RESOURCE;
647 int faili = 0;
648 u8 *sums;
649
650 em_tree = &BTRFS_I(inode)->extent_tree;
651
652 /* we need the actual starting offset of this extent in the file */
653 read_lock(&em_tree->lock);
654 em = lookup_extent_mapping(em_tree,
655 page_offset(bio_first_page_all(bio)),
656 fs_info->sectorsize);
657 read_unlock(&em_tree->lock);
658 if (!em)
659 return BLK_STS_IOERR;
660
661 compressed_len = em->block_len;
662 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
663 if (!cb)
664 goto out;
665
666 refcount_set(&cb->pending_bios, 0);
667 cb->errors = 0;
668 cb->inode = inode;
669 cb->mirror_num = mirror_num;
670 sums = cb->sums;
671
672 cb->start = em->orig_start;
673 em_len = em->len;
674 em_start = em->start;
675
676 free_extent_map(em);
677 em = NULL;
678
679 cb->len = bio->bi_iter.bi_size;
680 cb->compressed_len = compressed_len;
681 cb->compress_type = extent_compress_type(bio_flags);
682 cb->orig_bio = bio;
683
684 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
685 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
686 GFP_NOFS);
687 if (!cb->compressed_pages)
688 goto fail1;
689
690 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
691 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
692 __GFP_HIGHMEM);
693 if (!cb->compressed_pages[pg_index]) {
694 faili = pg_index - 1;
695 ret = BLK_STS_RESOURCE;
696 goto fail2;
697 }
698 }
699 faili = nr_pages - 1;
700 cb->nr_pages = nr_pages;
701
702 add_ra_bio_pages(inode, em_start + em_len, cb);
703
704 /* include any pages we added in add_ra-bio_pages */
705 cb->len = bio->bi_iter.bi_size;
706
707 comp_bio = btrfs_bio_alloc(cur_disk_byte);
708 comp_bio->bi_opf = REQ_OP_READ;
709 comp_bio->bi_private = cb;
710 comp_bio->bi_end_io = end_compressed_bio_read;
711 refcount_set(&cb->pending_bios, 1);
712
713 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
714 u32 pg_len = PAGE_SIZE;
715 int submit = 0;
716
717 /*
718 * To handle subpage case, we need to make sure the bio only
719 * covers the range we need.
720 *
721 * If we're at the last page, truncate the length to only cover
722 * the remaining part.
723 */
724 if (pg_index == nr_pages - 1)
725 pg_len = min_t(u32, PAGE_SIZE,
726 compressed_len - pg_index * PAGE_SIZE);
727
728 page = cb->compressed_pages[pg_index];
729 page->mapping = inode->i_mapping;
730 page->index = em_start >> PAGE_SHIFT;
731
732 if (comp_bio->bi_iter.bi_size)
733 submit = btrfs_bio_fits_in_stripe(page, pg_len,
734 comp_bio, 0);
735
736 page->mapping = NULL;
737 if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
738 unsigned int nr_sectors;
739
740 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
741 BTRFS_WQ_ENDIO_DATA);
742 BUG_ON(ret); /* -ENOMEM */
743
744 /*
745 * inc the count before we submit the bio so
746 * we know the end IO handler won't happen before
747 * we inc the count. Otherwise, the cb might get
748 * freed before we're done setting it up
749 */
750 refcount_inc(&cb->pending_bios);
751
752 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
753 BUG_ON(ret); /* -ENOMEM */
754
755 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
756 fs_info->sectorsize);
757 sums += fs_info->csum_size * nr_sectors;
758
759 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
760 if (ret) {
761 comp_bio->bi_status = ret;
762 bio_endio(comp_bio);
763 }
764
765 comp_bio = btrfs_bio_alloc(cur_disk_byte);
766 comp_bio->bi_opf = REQ_OP_READ;
767 comp_bio->bi_private = cb;
768 comp_bio->bi_end_io = end_compressed_bio_read;
769
770 bio_add_page(comp_bio, page, pg_len, 0);
771 }
772 cur_disk_byte += pg_len;
773 }
774
775 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
776 BUG_ON(ret); /* -ENOMEM */
777
778 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
779 BUG_ON(ret); /* -ENOMEM */
780
781 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
782 if (ret) {
783 comp_bio->bi_status = ret;
784 bio_endio(comp_bio);
785 }
786
787 return 0;
788
789fail2:
790 while (faili >= 0) {
791 __free_page(cb->compressed_pages[faili]);
792 faili--;
793 }
794
795 kfree(cb->compressed_pages);
796fail1:
797 kfree(cb);
798out:
799 free_extent_map(em);
800 return ret;
801}
802
803/*
804 * Heuristic uses systematic sampling to collect data from the input data
805 * range, the logic can be tuned by the following constants:
806 *
807 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
808 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
809 */
810#define SAMPLING_READ_SIZE (16)
811#define SAMPLING_INTERVAL (256)
812
813/*
814 * For statistical analysis of the input data we consider bytes that form a
815 * Galois Field of 256 objects. Each object has an attribute count, ie. how
816 * many times the object appeared in the sample.
817 */
818#define BUCKET_SIZE (256)
819
820/*
821 * The size of the sample is based on a statistical sampling rule of thumb.
822 * The common way is to perform sampling tests as long as the number of
823 * elements in each cell is at least 5.
824 *
825 * Instead of 5, we choose 32 to obtain more accurate results.
826 * If the data contain the maximum number of symbols, which is 256, we obtain a
827 * sample size bound by 8192.
828 *
829 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
830 * from up to 512 locations.
831 */
832#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
833 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
834
835struct bucket_item {
836 u32 count;
837};
838
839struct heuristic_ws {
840 /* Partial copy of input data */
841 u8 *sample;
842 u32 sample_size;
843 /* Buckets store counters for each byte value */
844 struct bucket_item *bucket;
845 /* Sorting buffer */
846 struct bucket_item *bucket_b;
847 struct list_head list;
848};
849
850static struct workspace_manager heuristic_wsm;
851
852static void free_heuristic_ws(struct list_head *ws)
853{
854 struct heuristic_ws *workspace;
855
856 workspace = list_entry(ws, struct heuristic_ws, list);
857
858 kvfree(workspace->sample);
859 kfree(workspace->bucket);
860 kfree(workspace->bucket_b);
861 kfree(workspace);
862}
863
864static struct list_head *alloc_heuristic_ws(unsigned int level)
865{
866 struct heuristic_ws *ws;
867
868 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
869 if (!ws)
870 return ERR_PTR(-ENOMEM);
871
872 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
873 if (!ws->sample)
874 goto fail;
875
876 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
877 if (!ws->bucket)
878 goto fail;
879
880 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
881 if (!ws->bucket_b)
882 goto fail;
883
884 INIT_LIST_HEAD(&ws->list);
885 return &ws->list;
886fail:
887 free_heuristic_ws(&ws->list);
888 return ERR_PTR(-ENOMEM);
889}
890
891const struct btrfs_compress_op btrfs_heuristic_compress = {
892 .workspace_manager = &heuristic_wsm,
893};
894
895static const struct btrfs_compress_op * const btrfs_compress_op[] = {
896 /* The heuristic is represented as compression type 0 */
897 &btrfs_heuristic_compress,
898 &btrfs_zlib_compress,
899 &btrfs_lzo_compress,
900 &btrfs_zstd_compress,
901};
902
903static struct list_head *alloc_workspace(int type, unsigned int level)
904{
905 switch (type) {
906 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
907 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
908 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
909 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
910 default:
911 /*
912 * This can't happen, the type is validated several times
913 * before we get here.
914 */
915 BUG();
916 }
917}
918
919static void free_workspace(int type, struct list_head *ws)
920{
921 switch (type) {
922 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
923 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
924 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
925 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
926 default:
927 /*
928 * This can't happen, the type is validated several times
929 * before we get here.
930 */
931 BUG();
932 }
933}
934
935static void btrfs_init_workspace_manager(int type)
936{
937 struct workspace_manager *wsm;
938 struct list_head *workspace;
939
940 wsm = btrfs_compress_op[type]->workspace_manager;
941 INIT_LIST_HEAD(&wsm->idle_ws);
942 spin_lock_init(&wsm->ws_lock);
943 atomic_set(&wsm->total_ws, 0);
944 init_waitqueue_head(&wsm->ws_wait);
945
946 /*
947 * Preallocate one workspace for each compression type so we can
948 * guarantee forward progress in the worst case
949 */
950 workspace = alloc_workspace(type, 0);
951 if (IS_ERR(workspace)) {
952 pr_warn(
953 "BTRFS: cannot preallocate compression workspace, will try later\n");
954 } else {
955 atomic_set(&wsm->total_ws, 1);
956 wsm->free_ws = 1;
957 list_add(workspace, &wsm->idle_ws);
958 }
959}
960
961static void btrfs_cleanup_workspace_manager(int type)
962{
963 struct workspace_manager *wsman;
964 struct list_head *ws;
965
966 wsman = btrfs_compress_op[type]->workspace_manager;
967 while (!list_empty(&wsman->idle_ws)) {
968 ws = wsman->idle_ws.next;
969 list_del(ws);
970 free_workspace(type, ws);
971 atomic_dec(&wsman->total_ws);
972 }
973}
974
975/*
976 * This finds an available workspace or allocates a new one.
977 * If it's not possible to allocate a new one, waits until there's one.
978 * Preallocation makes a forward progress guarantees and we do not return
979 * errors.
980 */
981struct list_head *btrfs_get_workspace(int type, unsigned int level)
982{
983 struct workspace_manager *wsm;
984 struct list_head *workspace;
985 int cpus = num_online_cpus();
986 unsigned nofs_flag;
987 struct list_head *idle_ws;
988 spinlock_t *ws_lock;
989 atomic_t *total_ws;
990 wait_queue_head_t *ws_wait;
991 int *free_ws;
992
993 wsm = btrfs_compress_op[type]->workspace_manager;
994 idle_ws = &wsm->idle_ws;
995 ws_lock = &wsm->ws_lock;
996 total_ws = &wsm->total_ws;
997 ws_wait = &wsm->ws_wait;
998 free_ws = &wsm->free_ws;
999
1000again:
1001 spin_lock(ws_lock);
1002 if (!list_empty(idle_ws)) {
1003 workspace = idle_ws->next;
1004 list_del(workspace);
1005 (*free_ws)--;
1006 spin_unlock(ws_lock);
1007 return workspace;
1008
1009 }
1010 if (atomic_read(total_ws) > cpus) {
1011 DEFINE_WAIT(wait);
1012
1013 spin_unlock(ws_lock);
1014 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1015 if (atomic_read(total_ws) > cpus && !*free_ws)
1016 schedule();
1017 finish_wait(ws_wait, &wait);
1018 goto again;
1019 }
1020 atomic_inc(total_ws);
1021 spin_unlock(ws_lock);
1022
1023 /*
1024 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1025 * to turn it off here because we might get called from the restricted
1026 * context of btrfs_compress_bio/btrfs_compress_pages
1027 */
1028 nofs_flag = memalloc_nofs_save();
1029 workspace = alloc_workspace(type, level);
1030 memalloc_nofs_restore(nofs_flag);
1031
1032 if (IS_ERR(workspace)) {
1033 atomic_dec(total_ws);
1034 wake_up(ws_wait);
1035
1036 /*
1037 * Do not return the error but go back to waiting. There's a
1038 * workspace preallocated for each type and the compression
1039 * time is bounded so we get to a workspace eventually. This
1040 * makes our caller's life easier.
1041 *
1042 * To prevent silent and low-probability deadlocks (when the
1043 * initial preallocation fails), check if there are any
1044 * workspaces at all.
1045 */
1046 if (atomic_read(total_ws) == 0) {
1047 static DEFINE_RATELIMIT_STATE(_rs,
1048 /* once per minute */ 60 * HZ,
1049 /* no burst */ 1);
1050
1051 if (__ratelimit(&_rs)) {
1052 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1053 }
1054 }
1055 goto again;
1056 }
1057 return workspace;
1058}
1059
1060static struct list_head *get_workspace(int type, int level)
1061{
1062 switch (type) {
1063 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1064 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1065 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1066 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1067 default:
1068 /*
1069 * This can't happen, the type is validated several times
1070 * before we get here.
1071 */
1072 BUG();
1073 }
1074}
1075
1076/*
1077 * put a workspace struct back on the list or free it if we have enough
1078 * idle ones sitting around
1079 */
1080void btrfs_put_workspace(int type, struct list_head *ws)
1081{
1082 struct workspace_manager *wsm;
1083 struct list_head *idle_ws;
1084 spinlock_t *ws_lock;
1085 atomic_t *total_ws;
1086 wait_queue_head_t *ws_wait;
1087 int *free_ws;
1088
1089 wsm = btrfs_compress_op[type]->workspace_manager;
1090 idle_ws = &wsm->idle_ws;
1091 ws_lock = &wsm->ws_lock;
1092 total_ws = &wsm->total_ws;
1093 ws_wait = &wsm->ws_wait;
1094 free_ws = &wsm->free_ws;
1095
1096 spin_lock(ws_lock);
1097 if (*free_ws <= num_online_cpus()) {
1098 list_add(ws, idle_ws);
1099 (*free_ws)++;
1100 spin_unlock(ws_lock);
1101 goto wake;
1102 }
1103 spin_unlock(ws_lock);
1104
1105 free_workspace(type, ws);
1106 atomic_dec(total_ws);
1107wake:
1108 cond_wake_up(ws_wait);
1109}
1110
1111static void put_workspace(int type, struct list_head *ws)
1112{
1113 switch (type) {
1114 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1115 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1116 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1117 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1118 default:
1119 /*
1120 * This can't happen, the type is validated several times
1121 * before we get here.
1122 */
1123 BUG();
1124 }
1125}
1126
1127/*
1128 * Adjust @level according to the limits of the compression algorithm or
1129 * fallback to default
1130 */
1131static unsigned int btrfs_compress_set_level(int type, unsigned level)
1132{
1133 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1134
1135 if (level == 0)
1136 level = ops->default_level;
1137 else
1138 level = min(level, ops->max_level);
1139
1140 return level;
1141}
1142
1143/*
1144 * Given an address space and start and length, compress the bytes into @pages
1145 * that are allocated on demand.
1146 *
1147 * @type_level is encoded algorithm and level, where level 0 means whatever
1148 * default the algorithm chooses and is opaque here;
1149 * - compression algo are 0-3
1150 * - the level are bits 4-7
1151 *
1152 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1153 * and returns number of actually allocated pages
1154 *
1155 * @total_in is used to return the number of bytes actually read. It
1156 * may be smaller than the input length if we had to exit early because we
1157 * ran out of room in the pages array or because we cross the
1158 * max_out threshold.
1159 *
1160 * @total_out is an in/out parameter, must be set to the input length and will
1161 * be also used to return the total number of compressed bytes
1162 *
1163 * @max_out tells us the max number of bytes that we're allowed to
1164 * stuff into pages
1165 */
1166int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1167 u64 start, struct page **pages,
1168 unsigned long *out_pages,
1169 unsigned long *total_in,
1170 unsigned long *total_out)
1171{
1172 int type = btrfs_compress_type(type_level);
1173 int level = btrfs_compress_level(type_level);
1174 struct list_head *workspace;
1175 int ret;
1176
1177 level = btrfs_compress_set_level(type, level);
1178 workspace = get_workspace(type, level);
1179 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1180 out_pages, total_in, total_out);
1181 put_workspace(type, workspace);
1182 return ret;
1183}
1184
1185/*
1186 * pages_in is an array of pages with compressed data.
1187 *
1188 * disk_start is the starting logical offset of this array in the file
1189 *
1190 * orig_bio contains the pages from the file that we want to decompress into
1191 *
1192 * srclen is the number of bytes in pages_in
1193 *
1194 * The basic idea is that we have a bio that was created by readpages.
1195 * The pages in the bio are for the uncompressed data, and they may not
1196 * be contiguous. They all correspond to the range of bytes covered by
1197 * the compressed extent.
1198 */
1199static int btrfs_decompress_bio(struct compressed_bio *cb)
1200{
1201 struct list_head *workspace;
1202 int ret;
1203 int type = cb->compress_type;
1204
1205 workspace = get_workspace(type, 0);
1206 ret = compression_decompress_bio(type, workspace, cb);
1207 put_workspace(type, workspace);
1208
1209 return ret;
1210}
1211
1212/*
1213 * a less complex decompression routine. Our compressed data fits in a
1214 * single page, and we want to read a single page out of it.
1215 * start_byte tells us the offset into the compressed data we're interested in
1216 */
1217int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1218 unsigned long start_byte, size_t srclen, size_t destlen)
1219{
1220 struct list_head *workspace;
1221 int ret;
1222
1223 workspace = get_workspace(type, 0);
1224 ret = compression_decompress(type, workspace, data_in, dest_page,
1225 start_byte, srclen, destlen);
1226 put_workspace(type, workspace);
1227
1228 return ret;
1229}
1230
1231void __init btrfs_init_compress(void)
1232{
1233 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1234 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1235 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1236 zstd_init_workspace_manager();
1237}
1238
1239void __cold btrfs_exit_compress(void)
1240{
1241 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1242 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1243 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1244 zstd_cleanup_workspace_manager();
1245}
1246
1247/*
1248 * Copy uncompressed data from working buffer to pages.
1249 *
1250 * buf_start is the byte offset we're of the start of our workspace buffer.
1251 *
1252 * total_out is the last byte of the buffer
1253 */
1254int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1255 unsigned long total_out, u64 disk_start,
1256 struct bio *bio)
1257{
1258 unsigned long buf_offset;
1259 unsigned long current_buf_start;
1260 unsigned long start_byte;
1261 unsigned long prev_start_byte;
1262 unsigned long working_bytes = total_out - buf_start;
1263 unsigned long bytes;
1264 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1265
1266 /*
1267 * start byte is the first byte of the page we're currently
1268 * copying into relative to the start of the compressed data.
1269 */
1270 start_byte = page_offset(bvec.bv_page) - disk_start;
1271
1272 /* we haven't yet hit data corresponding to this page */
1273 if (total_out <= start_byte)
1274 return 1;
1275
1276 /*
1277 * the start of the data we care about is offset into
1278 * the middle of our working buffer
1279 */
1280 if (total_out > start_byte && buf_start < start_byte) {
1281 buf_offset = start_byte - buf_start;
1282 working_bytes -= buf_offset;
1283 } else {
1284 buf_offset = 0;
1285 }
1286 current_buf_start = buf_start;
1287
1288 /* copy bytes from the working buffer into the pages */
1289 while (working_bytes > 0) {
1290 bytes = min_t(unsigned long, bvec.bv_len,
1291 PAGE_SIZE - (buf_offset % PAGE_SIZE));
1292 bytes = min(bytes, working_bytes);
1293
1294 memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1295 bytes);
1296 flush_dcache_page(bvec.bv_page);
1297
1298 buf_offset += bytes;
1299 working_bytes -= bytes;
1300 current_buf_start += bytes;
1301
1302 /* check if we need to pick another page */
1303 bio_advance(bio, bytes);
1304 if (!bio->bi_iter.bi_size)
1305 return 0;
1306 bvec = bio_iter_iovec(bio, bio->bi_iter);
1307 prev_start_byte = start_byte;
1308 start_byte = page_offset(bvec.bv_page) - disk_start;
1309
1310 /*
1311 * We need to make sure we're only adjusting
1312 * our offset into compression working buffer when
1313 * we're switching pages. Otherwise we can incorrectly
1314 * keep copying when we were actually done.
1315 */
1316 if (start_byte != prev_start_byte) {
1317 /*
1318 * make sure our new page is covered by this
1319 * working buffer
1320 */
1321 if (total_out <= start_byte)
1322 return 1;
1323
1324 /*
1325 * the next page in the biovec might not be adjacent
1326 * to the last page, but it might still be found
1327 * inside this working buffer. bump our offset pointer
1328 */
1329 if (total_out > start_byte &&
1330 current_buf_start < start_byte) {
1331 buf_offset = start_byte - buf_start;
1332 working_bytes = total_out - start_byte;
1333 current_buf_start = buf_start + buf_offset;
1334 }
1335 }
1336 }
1337
1338 return 1;
1339}
1340
1341/*
1342 * Shannon Entropy calculation
1343 *
1344 * Pure byte distribution analysis fails to determine compressibility of data.
1345 * Try calculating entropy to estimate the average minimum number of bits
1346 * needed to encode the sampled data.
1347 *
1348 * For convenience, return the percentage of needed bits, instead of amount of
1349 * bits directly.
1350 *
1351 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1352 * and can be compressible with high probability
1353 *
1354 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1355 *
1356 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1357 */
1358#define ENTROPY_LVL_ACEPTABLE (65)
1359#define ENTROPY_LVL_HIGH (80)
1360
1361/*
1362 * For increasead precision in shannon_entropy calculation,
1363 * let's do pow(n, M) to save more digits after comma:
1364 *
1365 * - maximum int bit length is 64
1366 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1367 * - 13 * 4 = 52 < 64 -> M = 4
1368 *
1369 * So use pow(n, 4).
1370 */
1371static inline u32 ilog2_w(u64 n)
1372{
1373 return ilog2(n * n * n * n);
1374}
1375
1376static u32 shannon_entropy(struct heuristic_ws *ws)
1377{
1378 const u32 entropy_max = 8 * ilog2_w(2);
1379 u32 entropy_sum = 0;
1380 u32 p, p_base, sz_base;
1381 u32 i;
1382
1383 sz_base = ilog2_w(ws->sample_size);
1384 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1385 p = ws->bucket[i].count;
1386 p_base = ilog2_w(p);
1387 entropy_sum += p * (sz_base - p_base);
1388 }
1389
1390 entropy_sum /= ws->sample_size;
1391 return entropy_sum * 100 / entropy_max;
1392}
1393
1394#define RADIX_BASE 4U
1395#define COUNTERS_SIZE (1U << RADIX_BASE)
1396
1397static u8 get4bits(u64 num, int shift) {
1398 u8 low4bits;
1399
1400 num >>= shift;
1401 /* Reverse order */
1402 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1403 return low4bits;
1404}
1405
1406/*
1407 * Use 4 bits as radix base
1408 * Use 16 u32 counters for calculating new position in buf array
1409 *
1410 * @array - array that will be sorted
1411 * @array_buf - buffer array to store sorting results
1412 * must be equal in size to @array
1413 * @num - array size
1414 */
1415static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1416 int num)
1417{
1418 u64 max_num;
1419 u64 buf_num;
1420 u32 counters[COUNTERS_SIZE];
1421 u32 new_addr;
1422 u32 addr;
1423 int bitlen;
1424 int shift;
1425 int i;
1426
1427 /*
1428 * Try avoid useless loop iterations for small numbers stored in big
1429 * counters. Example: 48 33 4 ... in 64bit array
1430 */
1431 max_num = array[0].count;
1432 for (i = 1; i < num; i++) {
1433 buf_num = array[i].count;
1434 if (buf_num > max_num)
1435 max_num = buf_num;
1436 }
1437
1438 buf_num = ilog2(max_num);
1439 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1440
1441 shift = 0;
1442 while (shift < bitlen) {
1443 memset(counters, 0, sizeof(counters));
1444
1445 for (i = 0; i < num; i++) {
1446 buf_num = array[i].count;
1447 addr = get4bits(buf_num, shift);
1448 counters[addr]++;
1449 }
1450
1451 for (i = 1; i < COUNTERS_SIZE; i++)
1452 counters[i] += counters[i - 1];
1453
1454 for (i = num - 1; i >= 0; i--) {
1455 buf_num = array[i].count;
1456 addr = get4bits(buf_num, shift);
1457 counters[addr]--;
1458 new_addr = counters[addr];
1459 array_buf[new_addr] = array[i];
1460 }
1461
1462 shift += RADIX_BASE;
1463
1464 /*
1465 * Normal radix expects to move data from a temporary array, to
1466 * the main one. But that requires some CPU time. Avoid that
1467 * by doing another sort iteration to original array instead of
1468 * memcpy()
1469 */
1470 memset(counters, 0, sizeof(counters));
1471
1472 for (i = 0; i < num; i ++) {
1473 buf_num = array_buf[i].count;
1474 addr = get4bits(buf_num, shift);
1475 counters[addr]++;
1476 }
1477
1478 for (i = 1; i < COUNTERS_SIZE; i++)
1479 counters[i] += counters[i - 1];
1480
1481 for (i = num - 1; i >= 0; i--) {
1482 buf_num = array_buf[i].count;
1483 addr = get4bits(buf_num, shift);
1484 counters[addr]--;
1485 new_addr = counters[addr];
1486 array[new_addr] = array_buf[i];
1487 }
1488
1489 shift += RADIX_BASE;
1490 }
1491}
1492
1493/*
1494 * Size of the core byte set - how many bytes cover 90% of the sample
1495 *
1496 * There are several types of structured binary data that use nearly all byte
1497 * values. The distribution can be uniform and counts in all buckets will be
1498 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1499 *
1500 * Other possibility is normal (Gaussian) distribution, where the data could
1501 * be potentially compressible, but we have to take a few more steps to decide
1502 * how much.
1503 *
1504 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1505 * compression algo can easy fix that
1506 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1507 * probability is not compressible
1508 */
1509#define BYTE_CORE_SET_LOW (64)
1510#define BYTE_CORE_SET_HIGH (200)
1511
1512static int byte_core_set_size(struct heuristic_ws *ws)
1513{
1514 u32 i;
1515 u32 coreset_sum = 0;
1516 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1517 struct bucket_item *bucket = ws->bucket;
1518
1519 /* Sort in reverse order */
1520 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1521
1522 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1523 coreset_sum += bucket[i].count;
1524
1525 if (coreset_sum > core_set_threshold)
1526 return i;
1527
1528 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1529 coreset_sum += bucket[i].count;
1530 if (coreset_sum > core_set_threshold)
1531 break;
1532 }
1533
1534 return i;
1535}
1536
1537/*
1538 * Count byte values in buckets.
1539 * This heuristic can detect textual data (configs, xml, json, html, etc).
1540 * Because in most text-like data byte set is restricted to limited number of
1541 * possible characters, and that restriction in most cases makes data easy to
1542 * compress.
1543 *
1544 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1545 * less - compressible
1546 * more - need additional analysis
1547 */
1548#define BYTE_SET_THRESHOLD (64)
1549
1550static u32 byte_set_size(const struct heuristic_ws *ws)
1551{
1552 u32 i;
1553 u32 byte_set_size = 0;
1554
1555 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1556 if (ws->bucket[i].count > 0)
1557 byte_set_size++;
1558 }
1559
1560 /*
1561 * Continue collecting count of byte values in buckets. If the byte
1562 * set size is bigger then the threshold, it's pointless to continue,
1563 * the detection technique would fail for this type of data.
1564 */
1565 for (; i < BUCKET_SIZE; i++) {
1566 if (ws->bucket[i].count > 0) {
1567 byte_set_size++;
1568 if (byte_set_size > BYTE_SET_THRESHOLD)
1569 return byte_set_size;
1570 }
1571 }
1572
1573 return byte_set_size;
1574}
1575
1576static bool sample_repeated_patterns(struct heuristic_ws *ws)
1577{
1578 const u32 half_of_sample = ws->sample_size / 2;
1579 const u8 *data = ws->sample;
1580
1581 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1582}
1583
1584static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1585 struct heuristic_ws *ws)
1586{
1587 struct page *page;
1588 u64 index, index_end;
1589 u32 i, curr_sample_pos;
1590 u8 *in_data;
1591
1592 /*
1593 * Compression handles the input data by chunks of 128KiB
1594 * (defined by BTRFS_MAX_UNCOMPRESSED)
1595 *
1596 * We do the same for the heuristic and loop over the whole range.
1597 *
1598 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1599 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1600 */
1601 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1602 end = start + BTRFS_MAX_UNCOMPRESSED;
1603
1604 index = start >> PAGE_SHIFT;
1605 index_end = end >> PAGE_SHIFT;
1606
1607 /* Don't miss unaligned end */
1608 if (!IS_ALIGNED(end, PAGE_SIZE))
1609 index_end++;
1610
1611 curr_sample_pos = 0;
1612 while (index < index_end) {
1613 page = find_get_page(inode->i_mapping, index);
1614 in_data = kmap(page);
1615 /* Handle case where the start is not aligned to PAGE_SIZE */
1616 i = start % PAGE_SIZE;
1617 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1618 /* Don't sample any garbage from the last page */
1619 if (start > end - SAMPLING_READ_SIZE)
1620 break;
1621 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1622 SAMPLING_READ_SIZE);
1623 i += SAMPLING_INTERVAL;
1624 start += SAMPLING_INTERVAL;
1625 curr_sample_pos += SAMPLING_READ_SIZE;
1626 }
1627 kunmap(page);
1628 put_page(page);
1629
1630 index++;
1631 }
1632
1633 ws->sample_size = curr_sample_pos;
1634}
1635
1636/*
1637 * Compression heuristic.
1638 *
1639 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1640 * quickly (compared to direct compression) detect data characteristics
1641 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1642 * data.
1643 *
1644 * The following types of analysis can be performed:
1645 * - detect mostly zero data
1646 * - detect data with low "byte set" size (text, etc)
1647 * - detect data with low/high "core byte" set
1648 *
1649 * Return non-zero if the compression should be done, 0 otherwise.
1650 */
1651int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1652{
1653 struct list_head *ws_list = get_workspace(0, 0);
1654 struct heuristic_ws *ws;
1655 u32 i;
1656 u8 byte;
1657 int ret = 0;
1658
1659 ws = list_entry(ws_list, struct heuristic_ws, list);
1660
1661 heuristic_collect_sample(inode, start, end, ws);
1662
1663 if (sample_repeated_patterns(ws)) {
1664 ret = 1;
1665 goto out;
1666 }
1667
1668 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1669
1670 for (i = 0; i < ws->sample_size; i++) {
1671 byte = ws->sample[i];
1672 ws->bucket[byte].count++;
1673 }
1674
1675 i = byte_set_size(ws);
1676 if (i < BYTE_SET_THRESHOLD) {
1677 ret = 2;
1678 goto out;
1679 }
1680
1681 i = byte_core_set_size(ws);
1682 if (i <= BYTE_CORE_SET_LOW) {
1683 ret = 3;
1684 goto out;
1685 }
1686
1687 if (i >= BYTE_CORE_SET_HIGH) {
1688 ret = 0;
1689 goto out;
1690 }
1691
1692 i = shannon_entropy(ws);
1693 if (i <= ENTROPY_LVL_ACEPTABLE) {
1694 ret = 4;
1695 goto out;
1696 }
1697
1698 /*
1699 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1700 * needed to give green light to compression.
1701 *
1702 * For now just assume that compression at that level is not worth the
1703 * resources because:
1704 *
1705 * 1. it is possible to defrag the data later
1706 *
1707 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1708 * values, every bucket has counter at level ~54. The heuristic would
1709 * be confused. This can happen when data have some internal repeated
1710 * patterns like "abbacbbc...". This can be detected by analyzing
1711 * pairs of bytes, which is too costly.
1712 */
1713 if (i < ENTROPY_LVL_HIGH) {
1714 ret = 5;
1715 goto out;
1716 } else {
1717 ret = 0;
1718 goto out;
1719 }
1720
1721out:
1722 put_workspace(0, ws_list);
1723 return ret;
1724}
1725
1726/*
1727 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1728 * level, unrecognized string will set the default level
1729 */
1730unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1731{
1732 unsigned int level = 0;
1733 int ret;
1734
1735 if (!type)
1736 return 0;
1737
1738 if (str[0] == ':') {
1739 ret = kstrtouint(str + 1, 10, &level);
1740 if (ret)
1741 level = 0;
1742 }
1743
1744 level = btrfs_compress_set_level(type, level);
1745
1746 return level;
1747}