Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove CONFIG_TRANSPARENT_HUGE_PAGECACHE

Commit e496cf3d7821 ("thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE")
notes that it should be reverted when the PowerPC problem was fixed. The
commit fixing the PowerPC problem (953c66c2b22a) did not revert the
commit; instead setting CONFIG_TRANSPARENT_HUGE_PAGECACHE to the same as
CONFIG_TRANSPARENT_HUGEPAGE. Checking with Kirill and Aneesh, this was an
oversight, so remove the Kconfig symbol and undo the work of commit
e496cf3d7821.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Link: http://lkml.kernel.org/r/20200318140253.6141-6-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Linus Torvalds
396bcc52 a0650604

+27 -44
+1 -9
include/linux/shmem_fs.h
··· 78 78 extern int shmem_unuse(unsigned int type, bool frontswap, 79 79 unsigned long *fs_pages_to_unuse); 80 80 81 + extern bool shmem_huge_enabled(struct vm_area_struct *vma); 81 82 extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); 82 83 extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, 83 84 pgoff_t start, pgoff_t end); ··· 114 113 115 114 extern bool shmem_charge(struct inode *inode, long pages); 116 115 extern void shmem_uncharge(struct inode *inode, long pages); 117 - 118 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 119 - extern bool shmem_huge_enabled(struct vm_area_struct *vma); 120 - #else 121 - static inline bool shmem_huge_enabled(struct vm_area_struct *vma) 122 - { 123 - return false; 124 - } 125 - #endif 126 116 127 117 #ifdef CONFIG_SHMEM 128 118 extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+1 -5
mm/Kconfig
··· 420 420 421 421 For selection by architectures with reasonable THP sizes. 422 422 423 - config TRANSPARENT_HUGE_PAGECACHE 424 - def_bool y 425 - depends on TRANSPARENT_HUGEPAGE 426 - 427 423 # 428 424 # UP and nommu archs use km based percpu allocator 429 425 # ··· 710 714 711 715 config READ_ONLY_THP_FOR_FS 712 716 bool "Read-only THP for filesystems (EXPERIMENTAL)" 713 - depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM 717 + depends on TRANSPARENT_HUGEPAGE && SHMEM 714 718 715 719 help 716 720 Allow khugepaged to put read-only file-backed pages in THP.
+1 -1
mm/huge_memory.c
··· 326 326 &defrag_attr.attr, 327 327 &use_zero_page_attr.attr, 328 328 &hpage_pmd_size_attr.attr, 329 - #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 329 + #ifdef CONFIG_SHMEM 330 330 &shmem_enabled_attr.attr, 331 331 #endif 332 332 #ifdef CONFIG_DEBUG_VM
+4 -8
mm/khugepaged.c
··· 414 414 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 415 415 vma->vm_file && 416 416 (vm_flags & VM_DENYWRITE))) { 417 - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 418 - return false; 419 417 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, 420 418 HPAGE_PMD_NR); 421 419 } ··· 1256 1258 } 1257 1259 } 1258 1260 1259 - #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) 1261 + #ifdef CONFIG_SHMEM 1260 1262 /* 1261 1263 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then 1262 1264 * khugepaged should try to collapse the page table. ··· 1971 1973 if (khugepaged_scan.address < hstart) 1972 1974 khugepaged_scan.address = hstart; 1973 1975 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); 1976 + if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) 1977 + goto skip; 1974 1978 1975 1979 while (khugepaged_scan.address < hend) { 1976 1980 int ret; ··· 1984 1984 khugepaged_scan.address + HPAGE_PMD_SIZE > 1985 1985 hend); 1986 1986 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { 1987 - struct file *file; 1987 + struct file *file = get_file(vma->vm_file); 1988 1988 pgoff_t pgoff = linear_page_index(vma, 1989 1989 khugepaged_scan.address); 1990 1990 1991 - if (shmem_file(vma->vm_file) 1992 - && !shmem_huge_enabled(vma)) 1993 - goto skip; 1994 - file = get_file(vma->vm_file); 1995 1991 up_read(&mm->mmap_sem); 1996 1992 ret = 1; 1997 1993 khugepaged_scan_file(mm, file, pgoff, hpage);
+2 -3
mm/memory.c
··· 3373 3373 return 0; 3374 3374 } 3375 3375 3376 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3376 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3377 3377 static void deposit_prealloc_pte(struct vm_fault *vmf) 3378 3378 { 3379 3379 struct vm_area_struct *vma = vmf->vma; ··· 3475 3475 pte_t entry; 3476 3476 vm_fault_t ret; 3477 3477 3478 - if (pmd_none(*vmf->pmd) && PageTransCompound(page) && 3479 - IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 3478 + if (pmd_none(*vmf->pmd) && PageTransCompound(page)) { 3480 3479 /* THP on COW? */ 3481 3480 VM_BUG_ON_PAGE(memcg, page); 3482 3481
+1 -1
mm/rmap.c
··· 933 933 set_pte_at(vma->vm_mm, address, pte, entry); 934 934 ret = 1; 935 935 } else { 936 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 936 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 937 937 pmd_t *pmd = pvmw.pmd; 938 938 pmd_t entry; 939 939
+17 -17
mm/shmem.c
··· 410 410 #define SHMEM_HUGE_DENY (-1) 411 411 #define SHMEM_HUGE_FORCE (-2) 412 412 413 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 413 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 414 414 /* ifdef here to avoid bloating shmem.o when not necessary */ 415 415 416 416 static int shmem_huge __read_mostly; ··· 580 580 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 581 581 return READ_ONCE(sbinfo->shrinklist_len); 582 582 } 583 - #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 583 + #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 584 584 585 585 #define shmem_huge SHMEM_HUGE_DENY 586 586 ··· 589 589 { 590 590 return 0; 591 591 } 592 - #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 592 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 593 593 594 594 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 595 595 { 596 - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 596 + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 597 597 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && 598 598 shmem_huge != SHMEM_HUGE_DENY) 599 599 return true; ··· 1059 1059 * Part of the huge page can be beyond i_size: subject 1060 1060 * to shrink under memory pressure. 1061 1061 */ 1062 - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1062 + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1063 1063 spin_lock(&sbinfo->shrinklist_lock); 1064 1064 /* 1065 1065 * _careful to defend against unlocked access to ··· 1510 1510 int nr; 1511 1511 int err = -ENOSPC; 1512 1512 1513 - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1513 + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1514 1514 huge = false; 1515 1515 nr = huge ? HPAGE_PMD_NR : 1; 1516 1516 ··· 2093 2093 get_area = current->mm->get_unmapped_area; 2094 2094 addr = get_area(file, uaddr, len, pgoff, flags); 2095 2095 2096 - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2096 + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2097 2097 return addr; 2098 2098 if (IS_ERR_VALUE(addr)) 2099 2099 return addr; ··· 2232 2232 2233 2233 file_accessed(file); 2234 2234 vma->vm_ops = &shmem_vm_ops; 2235 - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2235 + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2236 2236 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2237 2237 (vma->vm_end & HPAGE_PMD_MASK)) { 2238 2238 khugepaged_enter(vma, vma->vm_flags); ··· 3459 3459 case Opt_huge: 3460 3460 ctx->huge = result.uint_32; 3461 3461 if (ctx->huge != SHMEM_HUGE_NEVER && 3462 - !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 3462 + !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 3463 3463 has_transparent_hugepage())) 3464 3464 goto unsupported_parameter; 3465 3465 ctx->seen |= SHMEM_SEEN_HUGE; ··· 3605 3605 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 3606 3606 seq_printf(seq, ",gid=%u", 3607 3607 from_kgid_munged(&init_user_ns, sbinfo->gid)); 3608 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3608 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3609 3609 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 3610 3610 if (sbinfo->huge) 3611 3611 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); ··· 3850 3850 .evict_inode = shmem_evict_inode, 3851 3851 .drop_inode = generic_delete_inode, 3852 3852 .put_super = shmem_put_super, 3853 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3853 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3854 3854 .nr_cached_objects = shmem_unused_huge_count, 3855 3855 .free_cached_objects = shmem_unused_huge_scan, 3856 3856 #endif ··· 3912 3912 goto out1; 3913 3913 } 3914 3914 3915 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3915 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3916 3916 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 3917 3917 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 3918 3918 else ··· 3928 3928 return error; 3929 3929 } 3930 3930 3931 - #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 3931 + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 3932 3932 static ssize_t shmem_enabled_show(struct kobject *kobj, 3933 3933 struct kobj_attribute *attr, char *buf) 3934 3934 { ··· 3980 3980 3981 3981 struct kobj_attribute shmem_enabled_attr = 3982 3982 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); 3983 - #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 3983 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 3984 3984 3985 - #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 3985 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3986 3986 bool shmem_huge_enabled(struct vm_area_struct *vma) 3987 3987 { 3988 3988 struct inode *inode = file_inode(vma->vm_file); ··· 4017 4017 return false; 4018 4018 } 4019 4019 } 4020 - #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 4020 + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4021 4021 4022 4022 #else /* !CONFIG_SHMEM */ 4023 4023 ··· 4186 4186 vma->vm_file = file; 4187 4187 vma->vm_ops = &shmem_vm_ops; 4188 4188 4189 - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4189 + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4190 4190 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 4191 4191 (vma->vm_end & HPAGE_PMD_MASK)) { 4192 4192 khugepaged_enter(vma, vma->vm_flags);