Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: drop hugetlb_get_unmapped_area{_*} functions

Hugetlb mappings are now handled through normal channels just like any
other mapping, so we no longer need hugetlb_get_unmapped_area* specific
functions.

Link: https://lkml.kernel.org/r/20241007075037.267650-8-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Donet Tom <donettom@linux.ibm.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Oscar Salvador and committed by
Andrew Morton
cc92882e 7bd3f1e1

+6 -422
-21
arch/parisc/mm/hugetlbpage.c
··· 21 21 #include <asm/mmu_context.h> 22 22 23 23 24 - unsigned long 25 - hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 26 - unsigned long len, unsigned long pgoff, unsigned long flags) 27 - { 28 - struct hstate *h = hstate_file(file); 29 - 30 - if (len & ~huge_page_mask(h)) 31 - return -EINVAL; 32 - if (len > TASK_SIZE) 33 - return -ENOMEM; 34 - 35 - if (flags & MAP_FIXED) 36 - if (prepare_hugepage_range(file, addr, len)) 37 - return -EINVAL; 38 - 39 - if (addr) 40 - addr = ALIGN(addr, huge_page_size(h)); 41 - 42 - /* we need to make sure the colouring is OK */ 43 - return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0); 44 - } 45 24 46 25 47 26 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
-10
arch/powerpc/mm/book3s64/slice.c
··· 814 814 815 815 return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); 816 816 } 817 - 818 - unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 819 - unsigned long len, unsigned long pgoff, 820 - unsigned long flags) 821 - { 822 - if (radix_enabled()) 823 - return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); 824 - 825 - return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1); 826 - } 827 817 #endif
-85
arch/s390/mm/hugetlbpage.c
··· 242 242 else 243 243 return false; 244 244 } 245 - 246 - static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 247 - unsigned long addr, unsigned long len, 248 - unsigned long pgoff, unsigned long flags) 249 - { 250 - struct hstate *h = hstate_file(file); 251 - struct vm_unmapped_area_info info = {}; 252 - 253 - info.length = len; 254 - info.low_limit = current->mm->mmap_base; 255 - info.high_limit = TASK_SIZE; 256 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 257 - return vm_unmapped_area(&info); 258 - } 259 - 260 - static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 261 - unsigned long addr0, unsigned long len, 262 - unsigned long pgoff, unsigned long flags) 263 - { 264 - struct hstate *h = hstate_file(file); 265 - struct vm_unmapped_area_info info = {}; 266 - unsigned long addr; 267 - 268 - info.flags = VM_UNMAPPED_AREA_TOPDOWN; 269 - info.length = len; 270 - info.low_limit = PAGE_SIZE; 271 - info.high_limit = current->mm->mmap_base; 272 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 273 - addr = vm_unmapped_area(&info); 274 - 275 - /* 276 - * A failed mmap() very likely causes application failure, 277 - * so fall back to the bottom-up function here. This scenario 278 - * can happen with large stack limits and large mmap() 279 - * allocations. 280 - */ 281 - if (addr & ~PAGE_MASK) { 282 - VM_BUG_ON(addr != -ENOMEM); 283 - info.flags = 0; 284 - info.low_limit = TASK_UNMAPPED_BASE; 285 - info.high_limit = TASK_SIZE; 286 - addr = vm_unmapped_area(&info); 287 - } 288 - 289 - return addr; 290 - } 291 - 292 - unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 293 - unsigned long len, unsigned long pgoff, unsigned long flags) 294 - { 295 - struct hstate *h = hstate_file(file); 296 - struct mm_struct *mm = current->mm; 297 - struct vm_area_struct *vma; 298 - 299 - if (len & ~huge_page_mask(h)) 300 - return -EINVAL; 301 - if (len > TASK_SIZE - mmap_min_addr) 302 - return -ENOMEM; 303 - 304 - if (flags & MAP_FIXED) { 305 - if (prepare_hugepage_range(file, addr, len)) 306 - return -EINVAL; 307 - goto check_asce_limit; 308 - } 309 - 310 - if (addr) { 311 - addr = ALIGN(addr, huge_page_size(h)); 312 - vma = find_vma(mm, addr); 313 - if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 314 - (!vma || addr + len <= vm_start_gap(vma))) 315 - goto check_asce_limit; 316 - } 317 - 318 - if (!test_bit(MMF_TOPDOWN, &mm->flags)) 319 - addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, 320 - pgoff, flags); 321 - else 322 - addr = hugetlb_get_unmapped_area_topdown(file, addr, len, 323 - pgoff, flags); 324 - if (offset_in_page(addr)) 325 - return addr; 326 - 327 - check_asce_limit: 328 - return check_asce_limit(mm, addr, len); 329 - }
-108
arch/sparc/mm/hugetlbpage.c
··· 19 19 #include <asm/cacheflush.h> 20 20 #include <asm/mmu_context.h> 21 21 22 - /* Slightly simplified from the non-hugepage variant because by 23 - * definition we don't have to worry about any page coloring stuff 24 - */ 25 - 26 - static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 27 - unsigned long addr, 28 - unsigned long len, 29 - unsigned long pgoff, 30 - unsigned long flags) 31 - { 32 - struct hstate *h = hstate_file(filp); 33 - unsigned long task_size = TASK_SIZE; 34 - struct vm_unmapped_area_info info = {}; 35 - 36 - if (test_thread_flag(TIF_32BIT)) 37 - task_size = STACK_TOP32; 38 - 39 - info.length = len; 40 - info.low_limit = TASK_UNMAPPED_BASE; 41 - info.high_limit = min(task_size, VA_EXCLUDE_START); 42 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 43 - addr = vm_unmapped_area(&info); 44 - 45 - if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 46 - VM_BUG_ON(addr != -ENOMEM); 47 - info.low_limit = VA_EXCLUDE_END; 48 - info.high_limit = task_size; 49 - addr = vm_unmapped_area(&info); 50 - } 51 - 52 - return addr; 53 - } 54 - 55 - static unsigned long 56 - hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 57 - const unsigned long len, 58 - const unsigned long pgoff, 59 - const unsigned long flags) 60 - { 61 - struct hstate *h = hstate_file(filp); 62 - struct mm_struct *mm = current->mm; 63 - unsigned long addr = addr0; 64 - struct vm_unmapped_area_info info = {}; 65 - 66 - /* This should only ever run for 32-bit processes. */ 67 - BUG_ON(!test_thread_flag(TIF_32BIT)); 68 - 69 - info.flags = VM_UNMAPPED_AREA_TOPDOWN; 70 - info.length = len; 71 - info.low_limit = PAGE_SIZE; 72 - info.high_limit = mm->mmap_base; 73 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 74 - addr = vm_unmapped_area(&info); 75 - 76 - /* 77 - * A failed mmap() very likely causes application failure, 78 - * so fall back to the bottom-up function here. This scenario 79 - * can happen with large stack limits and large mmap() 80 - * allocations. 81 - */ 82 - if (addr & ~PAGE_MASK) { 83 - VM_BUG_ON(addr != -ENOMEM); 84 - info.flags = 0; 85 - info.low_limit = TASK_UNMAPPED_BASE; 86 - info.high_limit = STACK_TOP32; 87 - addr = vm_unmapped_area(&info); 88 - } 89 - 90 - return addr; 91 - } 92 - 93 - unsigned long 94 - hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 95 - unsigned long len, unsigned long pgoff, unsigned long flags) 96 - { 97 - struct hstate *h = hstate_file(file); 98 - struct mm_struct *mm = current->mm; 99 - struct vm_area_struct *vma; 100 - unsigned long task_size = TASK_SIZE; 101 - 102 - if (test_thread_flag(TIF_32BIT)) 103 - task_size = STACK_TOP32; 104 - 105 - if (len & ~huge_page_mask(h)) 106 - return -EINVAL; 107 - if (len > task_size) 108 - return -ENOMEM; 109 - 110 - if (flags & MAP_FIXED) { 111 - if (prepare_hugepage_range(file, addr, len)) 112 - return -EINVAL; 113 - return addr; 114 - } 115 - 116 - if (addr) { 117 - addr = ALIGN(addr, huge_page_size(h)); 118 - vma = find_vma(mm, addr); 119 - if (task_size - len >= addr && 120 - (!vma || addr + len <= vm_start_gap(vma))) 121 - return addr; 122 - } 123 - if (!test_bit(MMF_TOPDOWN, &mm->flags)) 124 - return hugetlb_get_unmapped_area_bottomup(file, addr, len, 125 - pgoff, flags); 126 - else 127 - return hugetlb_get_unmapped_area_topdown(file, addr, len, 128 - pgoff, flags); 129 - } 130 22 131 23 static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 132 24 {
-101
arch/x86/mm/hugetlbpage.c
··· 19 19 #include <asm/tlbflush.h> 20 20 #include <asm/elf.h> 21 21 22 - #ifdef CONFIG_HUGETLB_PAGE 23 - static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 24 - unsigned long addr, unsigned long len, 25 - unsigned long pgoff, unsigned long flags) 26 - { 27 - struct hstate *h = hstate_file(file); 28 - struct vm_unmapped_area_info info = {}; 29 - 30 - info.length = len; 31 - info.low_limit = get_mmap_base(1); 32 - 33 - /* 34 - * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 35 - * in the full address space. 36 - */ 37 - info.high_limit = in_32bit_syscall() ? 38 - task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); 39 - 40 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 41 - return vm_unmapped_area(&info); 42 - } 43 - 44 - static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, 45 - unsigned long addr, unsigned long len, 46 - unsigned long pgoff, unsigned long flags) 47 - { 48 - struct hstate *h = hstate_file(file); 49 - struct vm_unmapped_area_info info = {}; 50 - 51 - info.flags = VM_UNMAPPED_AREA_TOPDOWN; 52 - info.length = len; 53 - info.low_limit = PAGE_SIZE; 54 - info.high_limit = get_mmap_base(0); 55 - 56 - /* 57 - * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area 58 - * in the full address space. 59 - */ 60 - if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) 61 - info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; 62 - 63 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 64 - addr = vm_unmapped_area(&info); 65 - 66 - /* 67 - * A failed mmap() very likely causes application failure, 68 - * so fall back to the bottom-up function here. This scenario 69 - * can happen with large stack limits and large mmap() 70 - * allocations. 71 - */ 72 - if (addr & ~PAGE_MASK) { 73 - VM_BUG_ON(addr != -ENOMEM); 74 - info.flags = 0; 75 - info.low_limit = TASK_UNMAPPED_BASE; 76 - info.high_limit = TASK_SIZE_LOW; 77 - addr = vm_unmapped_area(&info); 78 - } 79 - 80 - return addr; 81 - } 82 - 83 - unsigned long 84 - hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 85 - unsigned long len, unsigned long pgoff, unsigned long flags) 86 - { 87 - struct hstate *h = hstate_file(file); 88 - struct mm_struct *mm = current->mm; 89 - struct vm_area_struct *vma; 90 - 91 - if (len & ~huge_page_mask(h)) 92 - return -EINVAL; 93 - 94 - if (len > TASK_SIZE) 95 - return -ENOMEM; 96 - 97 - /* No address checking. See comment at mmap_address_hint_valid() */ 98 - if (flags & MAP_FIXED) { 99 - if (prepare_hugepage_range(file, addr, len)) 100 - return -EINVAL; 101 - return addr; 102 - } 103 - 104 - if (addr) { 105 - addr &= huge_page_mask(h); 106 - if (!mmap_address_hint_valid(addr, len)) 107 - goto get_unmapped_area; 108 - 109 - vma = find_vma(mm, addr); 110 - if (!vma || addr + len <= vm_start_gap(vma)) 111 - return addr; 112 - } 113 - 114 - get_unmapped_area: 115 - if (!test_bit(MMF_TOPDOWN, &mm->flags)) 116 - return hugetlb_get_unmapped_area_bottomup(file, addr, len, 117 - pgoff, flags); 118 - else 119 - return hugetlb_get_unmapped_area_topdown(file, addr, len, 120 - pgoff, flags); 121 - } 122 - #endif /* CONFIG_HUGETLB_PAGE */ 123 22 124 23 #ifdef CONFIG_X86_64 125 24 bool __init arch_hugetlb_valid_size(unsigned long size)
+5 -91
fs/hugetlbfs/inode.c
··· 171 171 * Called under mmap_write_lock(mm). 172 172 */ 173 173 174 - static unsigned long 175 - hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, 176 - unsigned long len, unsigned long pgoff, unsigned long flags) 177 - { 178 - struct hstate *h = hstate_file(file); 179 - struct vm_unmapped_area_info info = {}; 180 - 181 - info.length = len; 182 - info.low_limit = current->mm->mmap_base; 183 - info.high_limit = arch_get_mmap_end(addr, len, flags); 184 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 185 - return vm_unmapped_area(&info); 186 - } 187 - 188 - static unsigned long 189 - hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, 190 - unsigned long len, unsigned long pgoff, unsigned long flags) 191 - { 192 - struct hstate *h = hstate_file(file); 193 - struct vm_unmapped_area_info info = {}; 194 - 195 - info.flags = VM_UNMAPPED_AREA_TOPDOWN; 196 - info.length = len; 197 - info.low_limit = PAGE_SIZE; 198 - info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); 199 - info.align_mask = PAGE_MASK & ~huge_page_mask(h); 200 - addr = vm_unmapped_area(&info); 201 - 202 - /* 203 - * A failed mmap() very likely causes application failure, 204 - * so fall back to the bottom-up function here. This scenario 205 - * can happen with large stack limits and large mmap() 206 - * allocations. 207 - */ 208 - if (unlikely(offset_in_page(addr))) { 209 - VM_BUG_ON(addr != -ENOMEM); 210 - info.flags = 0; 211 - info.low_limit = current->mm->mmap_base; 212 - info.high_limit = arch_get_mmap_end(addr, len, flags); 213 - addr = vm_unmapped_area(&info); 214 - } 215 - 216 - return addr; 217 - } 218 - 219 174 unsigned long 220 - generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 221 - unsigned long len, unsigned long pgoff, 222 - unsigned long flags) 223 - { 224 - struct mm_struct *mm = current->mm; 225 - struct vm_area_struct *vma, *prev; 226 - struct hstate *h = hstate_file(file); 227 - const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); 228 - 229 - if (len & ~huge_page_mask(h)) 230 - return -EINVAL; 231 - if (len > mmap_end - mmap_min_addr) 232 - return -ENOMEM; 233 - 234 - if (flags & MAP_FIXED) { 235 - if (prepare_hugepage_range(file, addr, len)) 236 - return -EINVAL; 237 - return addr; 238 - } 239 - 240 - if (addr) { 241 - addr = ALIGN(addr, huge_page_size(h)); 242 - vma = find_vma_prev(mm, addr, &prev); 243 - if (mmap_end - len >= addr && addr >= mmap_min_addr && 244 - (!vma || addr + len <= vm_start_gap(vma)) && 245 - (!prev || addr >= vm_end_gap(prev))) 246 - return addr; 247 - } 248 - 249 - /* 250 - * Use MMF_TOPDOWN flag as a hint to use topdown routine. 251 - * If architectures have special needs, they should define their own 252 - * version of hugetlb_get_unmapped_area. 253 - */ 254 - if (test_bit(MMF_TOPDOWN, &mm->flags)) 255 - return hugetlb_get_unmapped_area_topdown(file, addr, len, 256 - pgoff, flags); 257 - return hugetlb_get_unmapped_area_bottomup(file, addr, len, 258 - pgoff, flags); 259 - } 260 - 261 - unsigned long 262 - __hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 263 - unsigned long len, unsigned long flags) 175 + hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 176 + unsigned long len, unsigned long pgoff, 177 + unsigned long flags) 264 178 { 265 179 unsigned long addr0 = 0; 266 180 struct hstate *h = hstate_file(file); ··· 186 272 if (addr) 187 273 addr0 = ALIGN(addr, huge_page_size(h)); 188 274 189 - return mm_get_unmapped_area_vmflags(current->mm, file, addr, len, pgoff, 275 + return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, 190 276 flags, 0); 191 277 } 192 278 ··· 1222 1308 .read_iter = hugetlbfs_read_iter, 1223 1309 .mmap = hugetlbfs_file_mmap, 1224 1310 .fsync = noop_fsync, 1225 - .get_unmapped_area = __hugetlb_get_unmapped_area, 1311 + .get_unmapped_area = hugetlb_get_unmapped_area, 1226 1312 .llseek = default_llseek, 1227 1313 .fallocate = hugetlbfs_fallocate, 1228 1314 .fop_flags = FOP_HUGE_PAGES,
+1 -6
include/linux/hugetlb.h
··· 547 547 #endif /* !CONFIG_HUGETLBFS */ 548 548 549 549 unsigned long 550 - __generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 550 + hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 551 551 unsigned long len, unsigned long pgoff, 552 552 unsigned long flags); 553 - 554 - unsigned long 555 - generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 556 - unsigned long len, unsigned long pgoff, 557 - unsigned long flags); 558 553 559 554 /* 560 555 * huegtlb page specific state flags. These flags are located in page.private