Merge tag 'mm-hotfixes-stable-2026-02-06-12-37' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
"A couple of late-breaking MM fixes. One against a new-in-this-cycle
patch and the other addresses a locking issue which has been there for
over a year"

* tag 'mm-hotfixes-stable-2026-02-06-12-37' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/memory-failure: reject unsupported non-folio compound page
procfs: avoid fetching build ID while holding VMA lock

+80 -49
+27 -15
fs/proc/task_mmu.c
··· 656 struct proc_maps_locking_ctx lock_ctx = { .mm = mm }; 657 struct procmap_query karg; 658 struct vm_area_struct *vma; 659 const char *name = NULL; 660 char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; 661 __u64 usize; ··· 728 karg.inode = 0; 729 } 730 731 - if (karg.build_id_size) { 732 - __u32 build_id_sz; 733 - 734 - err = build_id_parse(vma, build_id_buf, &build_id_sz); 735 - if (err) { 736 - karg.build_id_size = 0; 737 - } else { 738 - if (karg.build_id_size < build_id_sz) { 739 - err = -ENAMETOOLONG; 740 - goto out; 741 - } 742 - karg.build_id_size = build_id_sz; 743 - } 744 - } 745 - 746 if (karg.vma_name_size) { 747 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); 748 const struct path *path; ··· 761 karg.vma_name_size = name_sz; 762 } 763 764 /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ 765 query_vma_teardown(&lock_ctx); 766 mmput(mm); 767 768 if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), 769 name, karg.vma_name_size)) { ··· 808 out: 809 query_vma_teardown(&lock_ctx); 810 mmput(mm); 811 kfree(name_buf); 812 return err; 813 }
··· 656 struct proc_maps_locking_ctx lock_ctx = { .mm = mm }; 657 struct procmap_query karg; 658 struct vm_area_struct *vma; 659 + struct file *vm_file = NULL; 660 const char *name = NULL; 661 char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL; 662 __u64 usize; ··· 727 karg.inode = 0; 728 } 729 730 if (karg.vma_name_size) { 731 size_t name_buf_sz = min_t(size_t, PATH_MAX, karg.vma_name_size); 732 const struct path *path; ··· 775 karg.vma_name_size = name_sz; 776 } 777 778 + if (karg.build_id_size && vma->vm_file) 779 + vm_file = get_file(vma->vm_file); 780 + 781 /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ 782 query_vma_teardown(&lock_ctx); 783 mmput(mm); 784 + 785 + if (karg.build_id_size) { 786 + __u32 build_id_sz; 787 + 788 + if (vm_file) 789 + err = build_id_parse_file(vm_file, build_id_buf, &build_id_sz); 790 + else 791 + err = -ENOENT; 792 + if (err) { 793 + karg.build_id_size = 0; 794 + } else { 795 + if (karg.build_id_size < build_id_sz) { 796 + err = -ENAMETOOLONG; 797 + goto out; 798 + } 799 + karg.build_id_size = build_id_sz; 800 + } 801 + } 802 + 803 + if (vm_file) 804 + fput(vm_file); 805 806 if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), 807 name, karg.vma_name_size)) { ··· 798 out: 799 query_vma_teardown(&lock_ctx); 800 mmput(mm); 801 + if (vm_file) 802 + fput(vm_file); 803 kfree(name_buf); 804 return err; 805 }
+3
include/linux/buildid.h
··· 7 #define BUILD_ID_SIZE_MAX 20 8 9 struct vm_area_struct; 10 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 11 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 12 int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); 13
··· 7 #define BUILD_ID_SIZE_MAX 20 8 9 struct vm_area_struct; 10 + struct file; 11 + 12 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 13 + int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size); 14 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); 15 int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); 16
+30 -12
lib/buildid.c
··· 279 /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ 280 #define MAX_FREADER_BUF_SZ 64 281 282 - static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, 283 __u32 *size, bool may_fault) 284 { 285 const Elf32_Ehdr *ehdr; ··· 287 char buf[MAX_FREADER_BUF_SZ]; 288 int ret; 289 290 - /* only works for page backed storage */ 291 - if (!vma->vm_file) 292 - return -EINVAL; 293 - 294 - freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); 295 296 /* fetch first 18 bytes of ELF header for checks */ 297 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); ··· 315 return ret; 316 } 317 318 - /* 319 - * Parse build ID of ELF file mapped to vma 320 * @vma: vma object 321 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 322 * @size: returns actual build id size in case of success ··· 328 */ 329 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 330 { 331 - return __build_id_parse(vma, build_id, size, false /* !may_fault */); 332 } 333 334 - /* 335 - * Parse build ID of ELF file mapped to VMA 336 * @vma: vma object 337 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 338 * @size: returns actual build id size in case of success ··· 347 */ 348 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 349 { 350 - return __build_id_parse(vma, build_id, size, true /* may_fault */); 351 } 352 353 /**
··· 279 /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ 280 #define MAX_FREADER_BUF_SZ 64 281 282 + static int __build_id_parse(struct file *file, unsigned char *build_id, 283 __u32 *size, bool may_fault) 284 { 285 const Elf32_Ehdr *ehdr; ··· 287 char buf[MAX_FREADER_BUF_SZ]; 288 int ret; 289 290 + freader_init_from_file(&r, buf, sizeof(buf), file, may_fault); 291 292 /* fetch first 18 bytes of ELF header for checks */ 293 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); ··· 319 return ret; 320 } 321 322 + /** 323 + * build_id_parse_nofault() - Parse build ID of ELF file mapped to vma 324 * @vma: vma object 325 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 326 * @size: returns actual build id size in case of success ··· 332 */ 333 int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 334 { 335 + if (!vma->vm_file) 336 + return -EINVAL; 337 + 338 + return __build_id_parse(vma->vm_file, build_id, size, false /* !may_fault */); 339 } 340 341 + /** 342 + * build_id_parse() - Parse build ID of ELF file mapped to VMA 343 * @vma: vma object 344 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 345 * @size: returns actual build id size in case of success ··· 348 */ 349 int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) 350 { 351 + if (!vma->vm_file) 352 + return -EINVAL; 353 + 354 + return __build_id_parse(vma->vm_file, build_id, size, true /* may_fault */); 355 + } 356 + 357 + /** 358 + * build_id_parse_file() - Parse build ID of ELF file 359 + * @file: file object 360 + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long 361 + * @size: returns actual build id size in case of success 362 + * 363 + * Assumes faultable context and can cause page faults to bring in file data 364 + * into page cache. 365 + * 366 + * Return: 0 on success; negative error, otherwise 367 + */ 368 + int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size) 369 + { 370 + return __build_id_parse(file, build_id, size, true /* may_fault */); 371 } 372 373 /**
+20 -22
mm/memory-failure.c
··· 2411 * In fact it's dangerous to directly bump up page count from 0, 2412 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 2413 */ 2414 - if (!(flags & MF_COUNT_INCREASED)) { 2415 - res = get_hwpoison_page(p, flags); 2416 - if (!res) { 2417 - if (is_free_buddy_page(p)) { 2418 - if (take_page_off_buddy(p)) { 2419 - page_ref_inc(p); 2420 - res = MF_RECOVERED; 2421 - } else { 2422 - /* We lost the race, try again */ 2423 - if (retry) { 2424 - ClearPageHWPoison(p); 2425 - retry = false; 2426 - goto try_again; 2427 - } 2428 - res = MF_FAILED; 2429 - } 2430 - res = action_result(pfn, MF_MSG_BUDDY, res); 2431 } else { 2432 - res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 2433 } 2434 - goto unlock_mutex; 2435 - } else if (res < 0) { 2436 - res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); 2437 - goto unlock_mutex; 2438 } 2439 } 2440 2441 folio = page_folio(p);
··· 2411 * In fact it's dangerous to directly bump up page count from 0, 2412 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. 2413 */ 2414 + res = get_hwpoison_page(p, flags); 2415 + if (!res) { 2416 + if (is_free_buddy_page(p)) { 2417 + if (take_page_off_buddy(p)) { 2418 + page_ref_inc(p); 2419 + res = MF_RECOVERED; 2420 } else { 2421 + /* We lost the race, try again */ 2422 + if (retry) { 2423 + ClearPageHWPoison(p); 2424 + retry = false; 2425 + goto try_again; 2426 + } 2427 + res = MF_FAILED; 2428 } 2429 + res = action_result(pfn, MF_MSG_BUDDY, res); 2430 + } else { 2431 + res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); 2432 } 2433 + goto unlock_mutex; 2434 + } else if (res < 0) { 2435 + res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); 2436 + goto unlock_mutex; 2437 } 2438 2439 folio = page_folio(p);