Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"Twelve hotfixes, mostly against mm/.

Five of these fixes are cc:stable"

* tag 'mm-hotfixes-stable-2023-02-13-13-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
of: reserved_mem: Have kmemleak ignore dynamically allocated reserved mem
scripts/gdb: fix 'lx-current' for x86
lib: parser: optimize match_NUMBER apis to use local array
mm: shrinkers: fix deadlock in shrinker debugfs
mm: hwpoison: support recovery from ksm_might_need_to_copy()
kasan: fix Oops due to missing calls to kasan_arch_is_ready()
revert "squashfs: harden sanity check in squashfs_read_xattr_id_table"
fsdax: dax_unshare_iter() should return a valid length
mm/gup: add folio to list when folio_isolate_lru() succeed
aio: fix mremap after fork null-deref
mailmap: add entry for Alexander Mikhalitsyn
mm: extend max struct page size for kmsan

+102 -45
+2
.mailmap
··· 25 25 Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru> 26 26 Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com> 27 27 Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru> 28 + Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com> 29 + Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com> 28 30 Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com> 29 31 Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com> 30 32 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
+2 -1
drivers/of/of_reserved_mem.c
··· 48 48 err = memblock_mark_nomap(base, size); 49 49 if (err) 50 50 memblock_phys_free(base, size); 51 - kmemleak_ignore_phys(base); 52 51 } 52 + 53 + kmemleak_ignore_phys(base); 53 54 54 55 return err; 55 56 }
+4
fs/aio.c
··· 361 361 spin_lock(&mm->ioctx_lock); 362 362 rcu_read_lock(); 363 363 table = rcu_dereference(mm->ioctx_table); 364 + if (!table) 365 + goto out_unlock; 366 + 364 367 for (i = 0; i < table->nr; i++) { 365 368 struct kioctx *ctx; 366 369 ··· 377 374 } 378 375 } 379 376 377 + out_unlock: 380 378 rcu_read_unlock(); 381 379 spin_unlock(&mm->ioctx_lock); 382 380 return res;
+3 -2
fs/dax.c
··· 1271 1271 if (ret < 0) 1272 1272 goto out_unlock; 1273 1273 1274 - ret = copy_mc_to_kernel(daddr, saddr, length); 1275 - if (ret) 1274 + if (copy_mc_to_kernel(daddr, saddr, length) == 0) 1275 + ret = length; 1276 + else 1276 1277 ret = -EIO; 1277 1278 1278 1279 out_unlock:
+1 -1
fs/squashfs/xattr_id.c
··· 76 76 /* Sanity check values */ 77 77 78 78 /* there is always at least one xattr id */ 79 - if (*xattr_ids <= 0) 79 + if (*xattr_ids == 0) 80 80 return ERR_PTR(-EINVAL); 81 81 82 82 len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+9 -3
include/linux/mm.h
··· 137 137 * define their own version of this macro in <asm/pgtable.h> 138 138 */ 139 139 #if BITS_PER_LONG == 64 140 - /* This function must be updated when the size of struct page grows above 80 140 + /* This function must be updated when the size of struct page grows above 96 141 141 * or reduces below 56. The idea that compiler optimizes out switch() 142 142 * statement, and only leaves move/store instructions. Also the compiler can 143 143 * combine write statements if they are both assignments and can be reordered, ··· 148 148 { 149 149 unsigned long *_pp = (void *)page; 150 150 151 - /* Check that struct page is either 56, 64, 72, or 80 bytes */ 151 + /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */ 152 152 BUILD_BUG_ON(sizeof(struct page) & 7); 153 153 BUILD_BUG_ON(sizeof(struct page) < 56); 154 - BUILD_BUG_ON(sizeof(struct page) > 80); 154 + BUILD_BUG_ON(sizeof(struct page) > 96); 155 155 156 156 switch (sizeof(struct page)) { 157 + case 96: 158 + _pp[11] = 0; 159 + fallthrough; 160 + case 88: 161 + _pp[10] = 0; 162 + fallthrough; 157 163 case 80: 158 164 _pp[9] = 0; 159 165 fallthrough;
+3 -2
include/linux/shrinker.h
··· 107 107 108 108 #ifdef CONFIG_SHRINKER_DEBUG 109 109 extern int shrinker_debugfs_add(struct shrinker *shrinker); 110 - extern void shrinker_debugfs_remove(struct shrinker *shrinker); 110 + extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker); 111 111 extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, 112 112 const char *fmt, ...); 113 113 #else /* CONFIG_SHRINKER_DEBUG */ ··· 115 115 { 116 116 return 0; 117 117 } 118 - static inline void shrinker_debugfs_remove(struct shrinker *shrinker) 118 + static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker) 119 119 { 120 + return NULL; 120 121 } 121 122 static inline __printf(2, 3) 122 123 int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+20 -19
lib/parser.c
··· 11 11 #include <linux/slab.h> 12 12 #include <linux/string.h> 13 13 14 + /* 15 + * max size needed by different bases to express U64 16 + * HEX: "0xFFFFFFFFFFFFFFFF" --> 18 17 + * DEC: "18446744073709551615" --> 20 18 + * OCT: "01777777777777777777777" --> 23 19 + * pick the max one to define NUMBER_BUF_LEN 20 + */ 21 + #define NUMBER_BUF_LEN 24 22 + 14 23 /** 15 24 * match_one - Determines if a string matches a simple pattern 16 25 * @s: the string to examine for presence of the pattern ··· 138 129 static int match_number(substring_t *s, int *result, int base) 139 130 { 140 131 char *endp; 141 - char *buf; 132 + char buf[NUMBER_BUF_LEN]; 142 133 int ret; 143 134 long val; 144 135 145 - buf = match_strdup(s); 146 - if (!buf) 147 - return -ENOMEM; 148 - 136 + if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) 137 + return -ERANGE; 149 138 ret = 0; 150 139 val = simple_strtol(buf, &endp, base); 151 140 if (endp == buf) ··· 152 145 ret = -ERANGE; 153 146 else 154 147 *result = (int) val; 155 - kfree(buf); 156 148 return ret; 157 149 } 158 150 ··· 169 163 */ 170 164 static int match_u64int(substring_t *s, u64 *result, int base) 171 165 { 172 - char *buf; 166 + char buf[NUMBER_BUF_LEN]; 173 167 int ret; 174 168 u64 val; 175 169 176 - buf = match_strdup(s); 177 - if (!buf) 178 - return -ENOMEM; 179 - 170 + if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) 171 + return -ERANGE; 180 172 ret = kstrtoull(buf, base, &val); 181 173 if (!ret) 182 174 *result = val; 183 - kfree(buf); 184 175 return ret; 185 176 } 186 177 ··· 209 206 */ 210 207 int match_uint(substring_t *s, unsigned int *result) 211 208 { 212 - int err = -ENOMEM; 213 - char *buf = match_strdup(s); 209 + char buf[NUMBER_BUF_LEN]; 214 210 215 - if (buf) { 216 - err = kstrtouint(buf, 10, result); 217 - kfree(buf); 218 - } 219 - return err; 211 + if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) 212 + return -ERANGE; 213 + 214 + return kstrtouint(buf, 10, result); 220 215 } 221 216 EXPORT_SYMBOL(match_uint); 222 217
+1 -1
mm/gup.c
··· 1914 1914 drain_allow = false; 1915 1915 } 1916 1916 1917 - if (!folio_isolate_lru(folio)) 1917 + if (folio_isolate_lru(folio)) 1918 1918 continue; 1919 1919 1920 1920 list_add_tail(&folio->lru, movable_page_list);
+3
mm/kasan/common.c
··· 246 246 247 247 static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip) 248 248 { 249 + if (!kasan_arch_is_ready()) 250 + return false; 251 + 249 252 if (ptr != page_address(virt_to_head_page(ptr))) { 250 253 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE); 251 254 return true;
+6 -1
mm/kasan/generic.c
··· 191 191 192 192 bool kasan_byte_accessible(const void *addr) 193 193 { 194 - s8 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); 194 + s8 shadow_byte; 195 + 196 + if (!kasan_arch_is_ready()) 197 + return true; 198 + 199 + shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); 195 200 196 201 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; 197 202 }
+12
mm/kasan/shadow.c
··· 291 291 unsigned long shadow_start, shadow_end; 292 292 int ret; 293 293 294 + if (!kasan_arch_is_ready()) 295 + return 0; 296 + 294 297 if (!is_vmalloc_or_module_addr((void *)addr)) 295 298 return 0; 296 299 ··· 462 459 unsigned long region_start, region_end; 463 460 unsigned long size; 464 461 462 + if (!kasan_arch_is_ready()) 463 + return; 464 + 465 465 region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE); 466 466 region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE); 467 467 ··· 508 502 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored. 509 503 */ 510 504 505 + if (!kasan_arch_is_ready()) 506 + return (void *)start; 507 + 511 508 if (!is_vmalloc_or_module_addr(start)) 512 509 return (void *)start; 513 510 ··· 533 524 */ 534 525 void __kasan_poison_vmalloc(const void *start, unsigned long size) 535 526 { 527 + if (!kasan_arch_is_ready()) 528 + return; 529 + 536 530 if (!is_vmalloc_or_module_addr(start)) 537 531 return; 538 532
+5 -2
mm/ksm.c
··· 2629 2629 new_page = NULL; 2630 2630 } 2631 2631 if (new_page) { 2632 - copy_user_highpage(new_page, page, address, vma); 2633 - 2632 + if (copy_mc_user_highpage(new_page, page, address, vma)) { 2633 + put_page(new_page); 2634 + memory_failure_queue(page_to_pfn(page), 0); 2635 + return ERR_PTR(-EHWPOISON); 2636 + } 2634 2637 SetPageDirty(new_page); 2635 2638 __SetPageUptodate(new_page); 2636 2639 __SetPageLocked(new_page);
+3
mm/memory.c
··· 3840 3840 if (unlikely(!page)) { 3841 3841 ret = VM_FAULT_OOM; 3842 3842 goto out_page; 3843 + } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) { 3844 + ret = VM_FAULT_HWPOISON; 3845 + goto out_page; 3843 3846 } 3844 3847 folio = page_folio(page); 3845 3848
+8 -5
mm/shrinker_debug.c
··· 246 246 } 247 247 EXPORT_SYMBOL(shrinker_debugfs_rename); 248 248 249 - void shrinker_debugfs_remove(struct shrinker *shrinker) 249 + struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker) 250 250 { 251 + struct dentry *entry = shrinker->debugfs_entry; 252 + 251 253 lockdep_assert_held(&shrinker_rwsem); 252 254 253 255 kfree_const(shrinker->name); 254 256 shrinker->name = NULL; 255 257 256 - if (!shrinker->debugfs_entry) 257 - return; 258 + if (entry) { 259 + ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id); 260 + shrinker->debugfs_entry = NULL; 261 + } 258 262 259 - debugfs_remove_recursive(shrinker->debugfs_entry); 260 - ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id); 263 + return entry; 261 264 } 262 265 263 266 static int __init shrinker_debugfs_init(void)
+14 -6
mm/swapfile.c
··· 1764 1764 struct page *swapcache; 1765 1765 spinlock_t *ptl; 1766 1766 pte_t *pte, new_pte; 1767 + bool hwposioned = false; 1767 1768 int ret = 1; 1768 1769 1769 1770 swapcache = page; 1770 1771 page = ksm_might_need_to_copy(page, vma, addr); 1771 1772 if (unlikely(!page)) 1772 1773 return -ENOMEM; 1774 + else if (unlikely(PTR_ERR(page) == -EHWPOISON)) 1775 + hwposioned = true; 1773 1776 1774 1777 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1775 1778 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { ··· 1780 1777 goto out; 1781 1778 } 1782 1779 1783 - if (unlikely(!PageUptodate(page))) { 1784 - pte_t pteval; 1780 + if (unlikely(hwposioned || !PageUptodate(page))) { 1781 + swp_entry_t swp_entry; 1785 1782 1786 1783 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 1787 - pteval = swp_entry_to_pte(make_swapin_error_entry()); 1788 - set_pte_at(vma->vm_mm, addr, pte, pteval); 1789 - swap_free(entry); 1784 + if (hwposioned) { 1785 + swp_entry = make_hwpoison_entry(swapcache); 1786 + page = swapcache; 1787 + } else { 1788 + swp_entry = make_swapin_error_entry(); 1789 + } 1790 + new_pte = swp_entry_to_pte(swp_entry); 1790 1791 ret = 0; 1791 - goto out; 1792 + goto setpte; 1792 1793 } 1793 1794 1794 1795 /* See do_swap_page() */ ··· 1824 1817 new_pte = pte_mksoft_dirty(new_pte); 1825 1818 if (pte_swp_uffd_wp(*pte)) 1826 1819 new_pte = pte_mkuffd_wp(new_pte); 1820 + setpte: 1827 1821 set_pte_at(vma->vm_mm, addr, pte, new_pte); 1828 1822 swap_free(entry); 1829 1823 out:
+5 -1
mm/vmscan.c
··· 741 741 */ 742 742 void unregister_shrinker(struct shrinker *shrinker) 743 743 { 744 + struct dentry *debugfs_entry; 745 + 744 746 if (!(shrinker->flags & SHRINKER_REGISTERED)) 745 747 return; 746 748 ··· 751 749 shrinker->flags &= ~SHRINKER_REGISTERED; 752 750 if (shrinker->flags & SHRINKER_MEMCG_AWARE) 753 751 unregister_memcg_shrinker(shrinker); 754 - shrinker_debugfs_remove(shrinker); 752 + debugfs_entry = shrinker_debugfs_remove(shrinker); 755 753 up_write(&shrinker_rwsem); 754 + 755 + debugfs_remove_recursive(debugfs_entry); 756 756 757 757 kfree(shrinker->nr_deferred); 758 758 shrinker->nr_deferred = NULL;
+1 -1
scripts/gdb/linux/cpus.py
··· 163 163 task_ptr_type = task_type.get_type().pointer() 164 164 165 165 if utils.is_target_arch("x86"): 166 - var_ptr = gdb.parse_and_eval("&current_task") 166 + var_ptr = gdb.parse_and_eval("&pcpu_hot.current_task") 167 167 return per_cpu(var_ptr, cpu).dereference() 168 168 elif utils.is_target_arch("aarch64"): 169 169 current_task_addr = gdb.parse_and_eval("$SP_EL0")