Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: use memzero_page() instead of open coded kmap pattern

There are many places where kmap/memset/kunmap patterns occur.

Use the newly lifted memzero_page() to eliminate direct uses of kmap and
leverage the new core functions use of kmap_local_page().

The development of this patch was aided by the following coccinelle
script:

// <smpl>
// SPDX-License-Identifier: GPL-2.0-only
// Find kmap/memset/kunmap pattern and replace with memset*page calls
//
// NOTE: Offsets and other expressions may be more complex than what the script
// will automatically generate. Therefore a catchall rule is provided to find
// the pattern which then must be evaluated by hand.
//
// Confidence: Low
// Copyright: (C) 2021 Intel Corporation
// URL: http://coccinelle.lip6.fr/
// Comments:
// Options:

//
// Then the memset pattern
//
@ memset_rule1 @
expression page, V, L, Off;
identifier ptr;
type VP;
@@

(
-VP ptr = kmap(page);
|
-ptr = kmap(page);
|
-VP ptr = kmap_atomic(page);
|
-ptr = kmap_atomic(page);
)
<+...
(
-memset(ptr, 0, L);
+memzero_page(page, 0, L);
|
-memset(ptr + Off, 0, L);
+memzero_page(page, Off, L);
|
-memset(ptr, V, L);
+memset_page(page, V, 0, L);
|
-memset(ptr + Off, V, L);
+memset_page(page, V, Off, L);
)
...+>
(
-kunmap(page);
|
-kunmap_atomic(ptr);
)

// Remove any pointers left unused
@
depends on memset_rule1
@
identifier memset_rule1.ptr;
type VP, VP1;
@@

-VP ptr;
... when != ptr;
? VP1 ptr;

//
// Catch all
//
@ memset_rule2 @
expression page;
identifier ptr;
expression GenTo, GenSize, GenValue;
type VP;
@@

(
-VP ptr = kmap(page);
|
-ptr = kmap(page);
|
-VP ptr = kmap_atomic(page);
|
-ptr = kmap_atomic(page);
)
<+...
(
//
// Some call sites have complex expressions within the memset/memcpy
// The follow are catch alls which need to be evaluated by hand.
//
-memset(GenTo, 0, GenSize);
+memzero_pageExtra(page, GenTo, GenSize);
|
-memset(GenTo, GenValue, GenSize);
+memset_pageExtra(page, GenValue, GenTo, GenSize);
)
...+>
(
-kunmap(page);
|
-kunmap_atomic(ptr);
)

// Remove any pointers left unused
@
depends on memset_rule2
@
identifier memset_rule2.ptr;
type VP, VP1;
@@

-VP ptr;
... when != ptr;
? VP1 ptr;

// </smpl>

Link: https://lkml.kernel.org/r/20210309212137.2610186-4-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Ira Weiny and committed by
Linus Torvalds
d048b9c2 28961998

+18 -58
+1 -4
fs/btrfs/compression.c
··· 591 591 free_extent_map(em); 592 592 593 593 if (page->index == end_index) { 594 - char *userpage; 595 594 size_t zero_offset = offset_in_page(isize); 596 595 597 596 if (zero_offset) { 598 597 int zeros; 599 598 zeros = PAGE_SIZE - zero_offset; 600 - userpage = kmap_atomic(page); 601 - memset(userpage + zero_offset, 0, zeros); 599 + memzero_page(page, zero_offset, zeros); 602 600 flush_dcache_page(page); 603 - kunmap_atomic(userpage); 604 601 } 605 602 } 606 603
+4 -18
fs/btrfs/extent_io.c
··· 3421 3421 } 3422 3422 3423 3423 if (page->index == last_byte >> PAGE_SHIFT) { 3424 - char *userpage; 3425 3424 size_t zero_offset = offset_in_page(last_byte); 3426 3425 3427 3426 if (zero_offset) { 3428 3427 iosize = PAGE_SIZE - zero_offset; 3429 - userpage = kmap_atomic(page); 3430 - memset(userpage + zero_offset, 0, iosize); 3428 + memzero_page(page, zero_offset, iosize); 3431 3429 flush_dcache_page(page); 3432 - kunmap_atomic(userpage); 3433 3430 } 3434 3431 } 3435 3432 begin_page_read(fs_info, page); ··· 3435 3438 u64 disk_bytenr; 3436 3439 3437 3440 if (cur >= last_byte) { 3438 - char *userpage; 3439 3441 struct extent_state *cached = NULL; 3440 3442 3441 3443 iosize = PAGE_SIZE - pg_offset; 3442 - userpage = kmap_atomic(page); 3443 - memset(userpage + pg_offset, 0, iosize); 3444 + memzero_page(page, pg_offset, iosize); 3444 3445 flush_dcache_page(page); 3445 - kunmap_atomic(userpage); 3446 3446 set_extent_uptodate(tree, cur, cur + iosize - 1, 3447 3447 &cached, GFP_NOFS); 3448 3448 unlock_extent_cached(tree, cur, ··· 3522 3528 3523 3529 /* we've found a hole, just zero and go on */ 3524 3530 if (block_start == EXTENT_MAP_HOLE) { 3525 - char *userpage; 3526 3531 struct extent_state *cached = NULL; 3527 3532 3528 - userpage = kmap_atomic(page); 3529 - memset(userpage + pg_offset, 0, iosize); 3533 + memzero_page(page, pg_offset, iosize); 3530 3534 flush_dcache_page(page); 3531 - kunmap_atomic(userpage); 3532 3535 3533 3536 set_extent_uptodate(tree, cur, cur + iosize - 1, 3534 3537 &cached, GFP_NOFS); ··· 3836 3845 } 3837 3846 3838 3847 if (page->index == end_index) { 3839 - char *userpage; 3840 - 3841 - userpage = kmap_atomic(page); 3842 - memset(userpage + pg_offset, 0, 3843 - PAGE_SIZE - pg_offset); 3844 - kunmap_atomic(userpage); 3848 + memzero_page(page, pg_offset, PAGE_SIZE - pg_offset); 3845 3849 flush_dcache_page(page); 3846 3850 } 3847 3851
+10 -23
fs/btrfs/inode.c
··· 646 646 if (!ret) { 647 647 unsigned long offset = offset_in_page(total_compressed); 648 648 struct page *page = pages[nr_pages - 1]; 649 - char *kaddr; 650 649 651 650 /* zero the tail end of the last page, we might be 652 651 * sending it down to disk 653 652 */ 654 - if (offset) { 655 - kaddr = kmap_atomic(page); 656 - memset(kaddr + offset, 0, 657 - PAGE_SIZE - offset); 658 - kunmap_atomic(kaddr); 659 - } 653 + if (offset) 654 + memzero_page(page, offset, PAGE_SIZE - offset); 660 655 will_compress = 1; 661 656 } 662 657 } ··· 4828 4833 struct btrfs_ordered_extent *ordered; 4829 4834 struct extent_state *cached_state = NULL; 4830 4835 struct extent_changeset *data_reserved = NULL; 4831 - char *kaddr; 4832 4836 bool only_release_metadata = false; 4833 4837 u32 blocksize = fs_info->sectorsize; 4834 4838 pgoff_t index = from >> PAGE_SHIFT; ··· 4919 4925 if (offset != blocksize) { 4920 4926 if (!len) 4921 4927 len = blocksize - offset; 4922 - kaddr = kmap(page); 4923 4928 if (front) 4924 - memset(kaddr + (block_start - page_offset(page)), 4925 - 0, offset); 4929 + memzero_page(page, (block_start - page_offset(page)), 4930 + offset); 4926 4931 else 4927 - memset(kaddr + (block_start - page_offset(page)) + offset, 4928 - 0, len); 4932 + memzero_page(page, (block_start - page_offset(page)) + offset, 4933 + len); 4929 4934 flush_dcache_page(page); 4930 - kunmap(page); 4931 4935 } 4932 4936 ClearPageChecked(page); 4933 4937 set_page_dirty(page); ··· 6824 6832 * cover that region here. 6825 6833 */ 6826 6834 6827 - if (max_size + pg_offset < PAGE_SIZE) { 6828 - char *map = kmap(page); 6829 - memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); 6830 - kunmap(page); 6831 - } 6835 + if (max_size + pg_offset < PAGE_SIZE) 6836 + memzero_page(page, pg_offset + max_size, 6837 + PAGE_SIZE - max_size - pg_offset); 6832 6838 kfree(tmp); 6833 6839 return ret; 6834 6840 } ··· 8496 8506 struct btrfs_ordered_extent *ordered; 8497 8507 struct extent_state *cached_state = NULL; 8498 8508 struct extent_changeset *data_reserved = NULL; 8499 - char *kaddr; 8500 8509 unsigned long zero_start; 8501 8510 loff_t size; 8502 8511 vm_fault_t ret; ··· 8609 8620 zero_start = PAGE_SIZE; 8610 8621 8611 8622 if (zero_start != PAGE_SIZE) { 8612 - kaddr = kmap(page); 8613 - memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); 8623 + memzero_page(page, zero_start, PAGE_SIZE - zero_start); 8614 8624 flush_dcache_page(page); 8615 - kunmap(page); 8616 8625 } 8617 8626 ClearPageChecked(page); 8618 8627 set_page_dirty(page);
+1 -5
fs/btrfs/reflink.c
··· 129 129 * So what's in the range [500, 4095] corresponds to zeroes. 130 130 */ 131 131 if (datal < block_size) { 132 - char *map; 133 - 134 - map = kmap(page); 135 - memset(map + datal, 0, block_size - datal); 132 + memzero_page(page, datal, block_size - datal); 136 133 flush_dcache_page(page); 137 - kunmap(page); 138 134 } 139 135 140 136 SetPageUptodate(page);
+1 -4
fs/btrfs/zlib.c
··· 375 375 unsigned long bytes_left; 376 376 unsigned long total_out = 0; 377 377 unsigned long pg_offset = 0; 378 - char *kaddr; 379 378 380 379 destlen = min_t(unsigned long, destlen, PAGE_SIZE); 381 380 bytes_left = destlen; ··· 454 455 * end of the inline extent (destlen) to the end of the page 455 456 */ 456 457 if (pg_offset < destlen) { 457 - kaddr = kmap_atomic(dest_page); 458 - memset(kaddr + pg_offset, 0, destlen - pg_offset); 459 - kunmap_atomic(kaddr); 458 + memzero_page(dest_page, pg_offset, destlen - pg_offset); 460 459 } 461 460 return ret; 462 461 }
+1 -4
fs/btrfs/zstd.c
··· 631 631 size_t ret2; 632 632 unsigned long total_out = 0; 633 633 unsigned long pg_offset = 0; 634 - char *kaddr; 635 634 636 635 stream = ZSTD_initDStream( 637 636 ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); ··· 695 696 ret = 0; 696 697 finish: 697 698 if (pg_offset < destlen) { 698 - kaddr = kmap_atomic(dest_page); 699 - memset(kaddr + pg_offset, 0, destlen - pg_offset); 700 - kunmap_atomic(kaddr); 699 + memzero_page(dest_page, pg_offset, destlen - pg_offset); 701 700 } 702 701 return ret; 703 702 }