Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: mte: Avoid the racy walk of the vma list during core dump

The MTE coredump code in arch/arm64/kernel/elfcore.c iterates over the
vma list without the mmap_lock held. This can race with another process
or userfaultfd concurrently modifying the vma list. Change the
for_each_mte_vma macro and its callers to instead use the vma snapshot
taken by dump_vma_snapshot() and stored in the cprm object.

Fixes: 6dd8b1a0b6cb ("arm64: mte: Dump the MTE tags in the core file")
Cc: <stable@vger.kernel.org> # 5.18.x
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Seth Jenkins <sethjenkins@google.com>
Suggested-by: Seth Jenkins <sethjenkins@google.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20221222181251.1345752-4-catalin.marinas@arm.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Catalin Marinas and committed by
Will Deacon
4f4c549f 19e183b5

+26 -30
+26 -30
arch/arm64/kernel/elfcore.c
··· 8 8 #include <asm/cpufeature.h> 9 9 #include <asm/mte.h> 10 10 11 - #define for_each_mte_vma(vmi, vma) \ 11 + #define for_each_mte_vma(cprm, i, m) \ 12 12 if (system_supports_mte()) \ 13 - for_each_vma(vmi, vma) \ 14 - if (vma->vm_flags & VM_MTE) 13 + for (i = 0, m = cprm->vma_meta; \ 14 + i < cprm->vma_count; \ 15 + i++, m = cprm->vma_meta + i) \ 16 + if (m->flags & VM_MTE) 15 17 16 - static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma) 18 + static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m) 17 19 { 18 - if (vma->vm_flags & VM_DONTDUMP) 19 - return 0; 20 - 21 - return vma_pages(vma) * MTE_PAGE_TAG_STORAGE; 20 + return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE; 22 21 } 23 22 24 23 /* Derived from dump_user_range(); start/end must be page-aligned */ 25 24 static int mte_dump_tag_range(struct coredump_params *cprm, 26 - unsigned long start, unsigned long end) 25 + unsigned long start, unsigned long len) 27 26 { 28 27 int ret = 1; 29 28 unsigned long addr; 30 29 void *tags = NULL; 31 30 32 - for (addr = start; addr < end; addr += PAGE_SIZE) { 31 + for (addr = start; addr < start + len; addr += PAGE_SIZE) { 33 32 struct page *page = get_dump_page(addr); 34 33 35 34 /* ··· 77 78 78 79 Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm) 79 80 { 80 - struct vm_area_struct *vma; 81 + int i; 82 + struct core_vma_metadata *m; 81 83 int vma_count = 0; 82 - VMA_ITERATOR(vmi, current->mm, 0); 83 84 84 - for_each_mte_vma(vmi, vma) 85 + for_each_mte_vma(cprm, i, m) 85 86 vma_count++; 86 87 87 88 return vma_count; ··· 89 90 90 91 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) 91 92 { 92 - struct vm_area_struct *vma; 93 - VMA_ITERATOR(vmi, current->mm, 0); 93 + int i; 94 + struct core_vma_metadata *m; 94 95 95 - for_each_mte_vma(vmi, vma) { 96 + for_each_mte_vma(cprm, i, m) { 96 97 struct elf_phdr phdr; 97 98 98 99 phdr.p_type = PT_AARCH64_MEMTAG_MTE; 99 100 phdr.p_offset = offset; 100 - phdr.p_vaddr = vma->vm_start; 101 + phdr.p_vaddr = m->start; 101 102 phdr.p_paddr = 0; 102 - phdr.p_filesz = mte_vma_tag_dump_size(vma); 103 - phdr.p_memsz = vma->vm_end - vma->vm_start; 103 + phdr.p_filesz = mte_vma_tag_dump_size(m); 104 + phdr.p_memsz = m->end - m->start; 104 105 offset += phdr.p_filesz; 105 106 phdr.p_flags = 0; 106 107 phdr.p_align = 0; ··· 114 115 115 116 size_t elf_core_extra_data_size(struct coredump_params *cprm) 116 117 { 117 - struct vm_area_struct *vma; 118 + int i; 119 + struct core_vma_metadata *m; 118 120 size_t data_size = 0; 119 - VMA_ITERATOR(vmi, current->mm, 0); 120 121 121 - for_each_mte_vma(vmi, vma) 122 - data_size += mte_vma_tag_dump_size(vma); 122 + for_each_mte_vma(cprm, i, m) 123 + data_size += mte_vma_tag_dump_size(m); 123 124 124 125 return data_size; 125 126 } 126 127 127 128 int elf_core_write_extra_data(struct coredump_params *cprm) 128 129 { 129 - struct vm_area_struct *vma; 130 - VMA_ITERATOR(vmi, current->mm, 0); 130 + int i; 131 + struct core_vma_metadata *m; 131 132 132 - for_each_mte_vma(vmi, vma) { 133 - if (vma->vm_flags & VM_DONTDUMP) 134 - continue; 135 - 136 - if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end)) 133 + for_each_mte_vma(cprm, i, m) { 134 + if (!mte_dump_tag_range(cprm, m->start, m->dump_size)) 137 135 return 0; 138 136 } 139 137