Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
"Six hotfixes.

The page_table_check one from Miaohe Lin is considered a minor thing
so it isn't marked for -stable. The remainder address pre-5.19 issues
and are cc:stable"

* tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/page_table_check: fix accessing unmapped ptep
kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
mm/page_alloc: always attempt to allocate at least one page during bulk allocation
hugetlb: fix huge_pmd_unshare address update
zsmalloc: fix races between asynchronous zspage free and page migration
Revert "mm/cma.c: remove redundant cma_mutex lock"

+103 -51
+10
arch/s390/include/asm/kexec.h
··· 9 9 #ifndef _S390_KEXEC_H 10 10 #define _S390_KEXEC_H 11 11 12 + #include <linux/module.h> 13 + 12 14 #include <asm/processor.h> 13 15 #include <asm/page.h> 14 16 #include <asm/setup.h> ··· 85 83 extern const struct kexec_file_ops s390_kexec_image_ops; 86 84 extern const struct kexec_file_ops s390_kexec_elf_ops; 87 85 86 + #ifdef CONFIG_KEXEC_FILE 87 + struct purgatory_info; 88 + int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 89 + Elf_Shdr *section, 90 + const Elf_Shdr *relsec, 91 + const Elf_Shdr *symtab); 92 + #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add 93 + #endif 88 94 #endif /*_S390_KEXEC_H */
+8
arch/x86/include/asm/kexec.h
··· 186 186 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages); 187 187 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages 188 188 189 + #ifdef CONFIG_KEXEC_FILE 190 + struct purgatory_info; 191 + int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 192 + Elf_Shdr *section, 193 + const Elf_Shdr *relsec, 194 + const Elf_Shdr *symtab); 195 + #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add 196 + #endif 189 197 #endif 190 198 191 199 typedef void crash_vmclear_fn(void);
+38 -8
include/linux/kexec.h
··· 193 193 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, 194 194 unsigned long buf_len); 195 195 void *arch_kexec_kernel_image_load(struct kimage *image); 196 - int arch_kexec_apply_relocations_add(struct purgatory_info *pi, 197 - Elf_Shdr *section, 198 - const Elf_Shdr *relsec, 199 - const Elf_Shdr *symtab); 200 - int arch_kexec_apply_relocations(struct purgatory_info *pi, 201 - Elf_Shdr *section, 202 - const Elf_Shdr *relsec, 203 - const Elf_Shdr *symtab); 204 196 int arch_kimage_file_post_load_cleanup(struct kimage *image); 205 197 #ifdef CONFIG_KEXEC_SIG 206 198 int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, ··· 221 229 unsigned long long mend); 222 230 extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, 223 231 void **addr, unsigned long *sz); 232 + 233 + #ifndef arch_kexec_apply_relocations_add 234 + /* 235 + * arch_kexec_apply_relocations_add - apply relocations of type RELA 236 + * @pi: Purgatory to be relocated. 237 + * @section: Section relocations applying to. 238 + * @relsec: Section containing RELAs. 239 + * @symtab: Corresponding symtab. 240 + * 241 + * Return: 0 on success, negative errno on error. 242 + */ 243 + static inline int 244 + arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, 245 + const Elf_Shdr *relsec, const Elf_Shdr *symtab) 246 + { 247 + pr_err("RELA relocation unsupported.\n"); 248 + return -ENOEXEC; 249 + } 250 + #endif 251 + 252 + #ifndef arch_kexec_apply_relocations 253 + /* 254 + * arch_kexec_apply_relocations - apply relocations of type REL 255 + * @pi: Purgatory to be relocated. 256 + * @section: Section relocations applying to. 257 + * @relsec: Section containing RELs. 258 + * @symtab: Corresponding symtab. 259 + * 260 + * Return: 0 on success, negative errno on error. 261 + */ 262 + static inline int 263 + arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, 264 + const Elf_Shdr *relsec, const Elf_Shdr *symtab) 265 + { 266 + pr_err("REL relocation unsupported.\n"); 267 + return -ENOEXEC; 268 + } 269 + #endif 224 270 #endif /* CONFIG_KEXEC_FILE */ 225 271 226 272 #ifdef CONFIG_KEXEC_ELF
-34
kernel/kexec_file.c
··· 109 109 #endif 110 110 111 111 /* 112 - * arch_kexec_apply_relocations_add - apply relocations of type RELA 113 - * @pi: Purgatory to be relocated. 114 - * @section: Section relocations applying to. 115 - * @relsec: Section containing RELAs. 116 - * @symtab: Corresponding symtab. 117 - * 118 - * Return: 0 on success, negative errno on error. 119 - */ 120 - int __weak 121 - arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, 122 - const Elf_Shdr *relsec, const Elf_Shdr *symtab) 123 - { 124 - pr_err("RELA relocation unsupported.\n"); 125 - return -ENOEXEC; 126 - } 127 - 128 - /* 129 - * arch_kexec_apply_relocations - apply relocations of type REL 130 - * @pi: Purgatory to be relocated. 131 - * @section: Section relocations applying to. 132 - * @relsec: Section containing RELs. 133 - * @symtab: Corresponding symtab. 134 - * 135 - * Return: 0 on success, negative errno on error. 136 - */ 137 - int __weak 138 - arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, 139 - const Elf_Shdr *relsec, const Elf_Shdr *symtab) 140 - { 141 - pr_err("REL relocation unsupported.\n"); 142 - return -ENOEXEC; 143 - } 144 - 145 - /* 146 112 * Free up memory used by kernel, initrd, and command line. This is temporary 147 113 * memory allocation which is not needed any more after these buffers have 148 114 * been loaded into separate segments and have been copied elsewhere.
+3 -1
mm/cma.c
··· 37 37 38 38 struct cma cma_areas[MAX_CMA_AREAS]; 39 39 unsigned cma_area_count; 40 + static DEFINE_MUTEX(cma_mutex); 40 41 41 42 phys_addr_t cma_get_base(const struct cma *cma) 42 43 { ··· 469 468 spin_unlock_irq(&cma->lock); 470 469 471 470 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); 471 + mutex_lock(&cma_mutex); 472 472 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, 473 473 GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); 474 - 474 + mutex_unlock(&cma_mutex); 475 475 if (ret == 0) { 476 476 page = pfn_to_page(pfn); 477 477 break;
+8 -1
mm/hugetlb.c
··· 6755 6755 pud_clear(pud); 6756 6756 put_page(virt_to_page(ptep)); 6757 6757 mm_dec_nr_pmds(mm); 6758 - *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; 6758 + /* 6759 + * This update of passed address optimizes loops sequentially 6760 + * processing addresses in increments of huge page size (PMD_SIZE 6761 + * in this case). By clearing the pud, a PUD_SIZE area is unmapped. 6762 + * Update address to the 'last page' in the cleared area so that 6763 + * calling loop can move to first page past this area. 6764 + */ 6765 + *addr |= PUD_SIZE - PMD_SIZE; 6759 6766 return 1; 6760 6767 } 6761 6768
+2 -2
mm/page_alloc.c
··· 5324 5324 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5325 5325 pcp, pcp_list); 5326 5326 if (unlikely(!page)) { 5327 - /* Try and get at least one page */ 5328 - if (!nr_populated) 5327 + /* Try and allocate at least one page */ 5328 + if (!nr_account) 5329 5329 goto failed_irq; 5330 5330 break; 5331 5331 }
+1 -1
mm/page_table_check.c
··· 234 234 pte_t *ptep = pte_offset_map(&pmd, addr); 235 235 unsigned long i; 236 236 237 - pte_unmap(ptep); 238 237 for (i = 0; i < PTRS_PER_PTE; i++) { 239 238 __page_table_check_pte_clear(mm, addr, *ptep); 240 239 addr += PAGE_SIZE; 241 240 ptep++; 242 241 } 242 + pte_unmap(ptep - PTRS_PER_PTE); 243 243 } 244 244 }
+33 -4
mm/zsmalloc.c
··· 1718 1718 */ 1719 1719 static void lock_zspage(struct zspage *zspage) 1720 1720 { 1721 - struct page *page = get_first_page(zspage); 1721 + struct page *curr_page, *page; 1722 1722 1723 - do { 1724 - lock_page(page); 1725 - } while ((page = get_next_page(page)) != NULL); 1723 + /* 1724 + * Pages we haven't locked yet can be migrated off the list while we're 1725 + * trying to lock them, so we need to be careful and only attempt to 1726 + * lock each page under migrate_read_lock(). Otherwise, the page we lock 1727 + * may no longer belong to the zspage. This means that we may wait for 1728 + * the wrong page to unlock, so we must take a reference to the page 1729 + * prior to waiting for it to unlock outside migrate_read_lock(). 1730 + */ 1731 + while (1) { 1732 + migrate_read_lock(zspage); 1733 + page = get_first_page(zspage); 1734 + if (trylock_page(page)) 1735 + break; 1736 + get_page(page); 1737 + migrate_read_unlock(zspage); 1738 + wait_on_page_locked(page); 1739 + put_page(page); 1740 + } 1741 + 1742 + curr_page = page; 1743 + while ((page = get_next_page(curr_page))) { 1744 + if (trylock_page(page)) { 1745 + curr_page = page; 1746 + } else { 1747 + get_page(page); 1748 + migrate_read_unlock(zspage); 1749 + wait_on_page_locked(page); 1750 + put_page(page); 1751 + migrate_read_lock(zspage); 1752 + } 1753 + } 1754 + migrate_read_unlock(zspage); 1726 1755 } 1727 1756 1728 1757 static int zs_init_fs_context(struct fs_context *fc)