Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: introduce page_size()

Patch series "Make working with compound pages easier", v2.

These three patches add three helpers and convert the appropriate
places to use them.

This patch (of 3):

It's unnecessarily hard to find out the size of a potentially huge page.
Replace 'PAGE_SIZE << compound_order(page)' with page_size(page).

Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Linus Torvalds
a50b854e 1f18b296

+35 -38
+1 -2
arch/arm/mm/flush.c
··· 204 204 * coherent with the kernels mapping. 205 205 */ 206 206 if (!PageHighMem(page)) { 207 - size_t page_size = PAGE_SIZE << compound_order(page); 208 - __cpuc_flush_dcache_area(page_address(page), page_size); 207 + __cpuc_flush_dcache_area(page_address(page), page_size(page)); 209 208 } else { 210 209 unsigned long i; 211 210 if (cache_is_vipt_nonaliasing()) {
+1 -2
arch/arm64/mm/flush.c
··· 56 56 struct page *page = pte_page(pte); 57 57 58 58 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 59 - sync_icache_aliases(page_address(page), 60 - PAGE_SIZE << compound_order(page)); 59 + sync_icache_aliases(page_address(page), page_size(page)); 61 60 } 62 61 EXPORT_SYMBOL_GPL(__sync_icache_dcache); 63 62
+1 -1
arch/ia64/mm/init.c
··· 64 64 if (test_bit(PG_arch_1, &page->flags)) 65 65 return; /* i-cache is already coherent with d-cache */ 66 66 67 - flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); 67 + flush_icache_range(addr, addr + page_size(page)); 68 68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */ 69 69 } 70 70
+2 -3
drivers/crypto/chelsio/chtls/chtls_io.c
··· 1078 1078 bool merge; 1079 1079 1080 1080 if (page) 1081 - pg_size <<= compound_order(page); 1081 + pg_size = page_size(page); 1082 1082 if (off < pg_size && 1083 1083 skb_can_coalesce(skb, i, page, off)) { 1084 1084 merge = 1; ··· 1105 1105 __GFP_NORETRY, 1106 1106 order); 1107 1107 if (page) 1108 - pg_size <<= 1109 - compound_order(page); 1108 + pg_size <<= order; 1110 1109 } 1111 1110 if (!page) { 1112 1111 page = alloc_page(gfp);
+2 -2
drivers/staging/android/ion/ion_system_heap.c
··· 120 120 if (!page) 121 121 goto free_pages; 122 122 list_add_tail(&page->lru, &pages); 123 - size_remaining -= PAGE_SIZE << compound_order(page); 123 + size_remaining -= page_size(page); 124 124 max_order = compound_order(page); 125 125 i++; 126 126 } ··· 133 133 134 134 sg = table->sgl; 135 135 list_for_each_entry_safe(page, tmp_page, &pages, lru) { 136 - sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); 136 + sg_set_page(sg, page, page_size(page), 0); 137 137 sg = sg_next(sg); 138 138 list_del(&page->lru); 139 139 }
+1 -2
drivers/target/tcm_fc/tfc_io.c
··· 136 136 page, off_in_page, tlen); 137 137 fr_len(fp) += tlen; 138 138 fp_skb(fp)->data_len += tlen; 139 - fp_skb(fp)->truesize += 140 - PAGE_SIZE << compound_order(page); 139 + fp_skb(fp)->truesize += page_size(page); 141 140 } else { 142 141 BUG_ON(!page); 143 142 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
+1 -1
fs/io_uring.c
··· 3319 3319 } 3320 3320 3321 3321 page = virt_to_head_page(ptr); 3322 - if (sz > (PAGE_SIZE << compound_order(page))) 3322 + if (sz > page_size(page)) 3323 3323 return -EINVAL; 3324 3324 3325 3325 pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
+1 -1
include/linux/hugetlb.h
··· 454 454 static inline struct hstate *page_hstate(struct page *page) 455 455 { 456 456 VM_BUG_ON_PAGE(!PageHuge(page), page); 457 - return size_to_hstate(PAGE_SIZE << compound_order(page)); 457 + return size_to_hstate(page_size(page)); 458 458 } 459 459 460 460 static inline unsigned hstate_index_to_shift(unsigned index)
+6
include/linux/mm.h
··· 805 805 page[1].compound_order = order; 806 806 } 807 807 808 + /* Returns the number of bytes in this potentially compound page. */ 809 + static inline unsigned long page_size(struct page *page) 810 + { 811 + return PAGE_SIZE << compound_order(page); 812 + } 813 + 808 814 void free_compound_page(struct page *page); 809 815 810 816 #ifdef CONFIG_MMU
+1 -1
lib/iov_iter.c
··· 878 878 head = compound_head(page); 879 879 v += (page - head) << PAGE_SHIFT; 880 880 881 - if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) 881 + if (likely(n <= v && v <= (page_size(head)))) 882 882 return true; 883 883 WARN_ON(1); 884 884 return false;
+3 -5
mm/kasan/common.c
··· 338 338 339 339 for (i = 0; i < (1 << compound_order(page)); i++) 340 340 page_kasan_tag_reset(page + i); 341 - kasan_poison_shadow(page_address(page), 342 - PAGE_SIZE << compound_order(page), 341 + kasan_poison_shadow(page_address(page), page_size(page), 343 342 KASAN_KMALLOC_REDZONE); 344 343 } 345 344 ··· 541 542 page = virt_to_page(ptr); 542 543 redzone_start = round_up((unsigned long)(ptr + size), 543 544 KASAN_SHADOW_SCALE_SIZE); 544 - redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); 545 + redzone_end = (unsigned long)ptr + page_size(page); 545 546 546 547 kasan_unpoison_shadow(ptr, size); 547 548 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, ··· 577 578 kasan_report_invalid_free(ptr, ip); 578 579 return; 579 580 } 580 - kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), 581 - KASAN_FREE_PAGE); 581 + kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE); 582 582 } else { 583 583 __kasan_slab_free(page->slab_cache, ptr, ip, false); 584 584 }
+1 -1
mm/nommu.c
··· 108 108 * The ksize() function is only guaranteed to work for pointers 109 109 * returned by kmalloc(). So handle arbitrary pointers here. 110 110 */ 111 - return PAGE_SIZE << compound_order(page); 111 + return page_size(page); 112 112 } 113 113 114 114 /**
+1 -2
mm/page_vma_mapped.c
··· 153 153 154 154 if (unlikely(PageHuge(pvmw->page))) { 155 155 /* when pud is not present, pte will be NULL */ 156 - pvmw->pte = huge_pte_offset(mm, pvmw->address, 157 - PAGE_SIZE << compound_order(page)); 156 + pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); 158 157 if (!pvmw->pte) 159 158 return false; 160 159
+2 -4
mm/rmap.c
··· 898 898 */ 899 899 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 900 900 0, vma, vma->vm_mm, address, 901 - min(vma->vm_end, address + 902 - (PAGE_SIZE << compound_order(page)))); 901 + min(vma->vm_end, address + page_size(page))); 903 902 mmu_notifier_invalidate_range_start(&range); 904 903 905 904 while (page_vma_mapped_walk(&pvmw)) { ··· 1371 1372 */ 1372 1373 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1373 1374 address, 1374 - min(vma->vm_end, address + 1375 - (PAGE_SIZE << compound_order(page)))); 1375 + min(vma->vm_end, address + page_size(page))); 1376 1376 if (PageHuge(page)) { 1377 1377 /* 1378 1378 * If sharing is possible, start and end will be adjusted
+1 -1
mm/slob.c
··· 539 539 540 540 sp = virt_to_page(block); 541 541 if (unlikely(!PageSlab(sp))) 542 - return PAGE_SIZE << compound_order(sp); 542 + return page_size(sp); 543 543 544 544 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 545 545 m = (unsigned int *)(block - align);
+9 -9
mm/slub.c
··· 829 829 return 1; 830 830 831 831 start = page_address(page); 832 - length = PAGE_SIZE << compound_order(page); 832 + length = page_size(page); 833 833 end = start + length; 834 834 remainder = length % s->size; 835 835 if (!remainder) ··· 1074 1074 init_tracking(s, object); 1075 1075 } 1076 1076 1077 - static void setup_page_debug(struct kmem_cache *s, void *addr, int order) 1077 + static 1078 + void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) 1078 1079 { 1079 1080 if (!(s->flags & SLAB_POISON)) 1080 1081 return; 1081 1082 1082 1083 metadata_access_enable(); 1083 - memset(addr, POISON_INUSE, PAGE_SIZE << order); 1084 + memset(addr, POISON_INUSE, page_size(page)); 1084 1085 metadata_access_disable(); 1085 1086 } 1086 1087 ··· 1341 1340 #else /* !CONFIG_SLUB_DEBUG */ 1342 1341 static inline void setup_object_debug(struct kmem_cache *s, 1343 1342 struct page *page, void *object) {} 1344 - static inline void setup_page_debug(struct kmem_cache *s, 1345 - void *addr, int order) {} 1343 + static inline 1344 + void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} 1346 1345 1347 1346 static inline int alloc_debug_processing(struct kmem_cache *s, 1348 1347 struct page *page, void *object, unsigned long addr) { return 0; } ··· 1640 1639 struct kmem_cache_order_objects oo = s->oo; 1641 1640 gfp_t alloc_gfp; 1642 1641 void *start, *p, *next; 1643 - int idx, order; 1642 + int idx; 1644 1643 bool shuffle; 1645 1644 1646 1645 flags &= gfp_allowed_mask; ··· 1674 1673 1675 1674 page->objects = oo_objects(oo); 1676 1675 1677 - order = compound_order(page); 1678 1676 page->slab_cache = s; 1679 1677 __SetPageSlab(page); 1680 1678 if (page_is_pfmemalloc(page)) ··· 1683 1683 1684 1684 start = page_address(page); 1685 1685 1686 - setup_page_debug(s, start, order); 1686 + setup_page_debug(s, page, start); 1687 1687 1688 1688 shuffle = shuffle_freelist(s, page); 1689 1689 ··· 3932 3932 3933 3933 if (unlikely(!PageSlab(page))) { 3934 3934 WARN_ON(!PageCompound(page)); 3935 - return PAGE_SIZE << compound_order(page); 3935 + return page_size(page); 3936 3936 } 3937 3937 3938 3938 return slab_ksize(page->slab_cache);
+1 -1
net/xdp/xsk.c
··· 977 977 /* Matches the smp_wmb() in xsk_init_queue */ 978 978 smp_rmb(); 979 979 qpg = virt_to_head_page(q->ring); 980 - if (size > (PAGE_SIZE << compound_order(qpg))) 980 + if (size > page_size(qpg)) 981 981 return -EINVAL; 982 982 983 983 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;