Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/fault: convert remaining simple cases to lock_mm_and_find_vma()

This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper. They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer. That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment. The cases are all simple, and I went through the
changes several times, but...

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+45 -124
+1
arch/alpha/Kconfig
··· 30 30 select HAS_IOPORT 31 31 select HAVE_ARCH_AUDITSYSCALL 32 32 select HAVE_MOD_ARCH_SPECIFIC 33 + select LOCK_MM_AND_FIND_VMA 33 34 select MODULES_USE_ELF_RELA 34 35 select ODD_RT_SIGACTION 35 36 select OLD_SIGSUSPEND
+3 -10
arch/alpha/mm/fault.c
··· 119 119 flags |= FAULT_FLAG_USER; 120 120 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 121 121 retry: 122 - mmap_read_lock(mm); 123 - vma = find_vma(mm, address); 122 + vma = lock_mm_and_find_vma(mm, address, regs); 124 123 if (!vma) 125 - goto bad_area; 126 - if (vma->vm_start <= address) 127 - goto good_area; 128 - if (!(vma->vm_flags & VM_GROWSDOWN)) 129 - goto bad_area; 130 - if (expand_stack(vma, address)) 131 - goto bad_area; 124 + goto bad_area_nosemaphore; 132 125 133 126 /* Ok, we have a good vm_area for this memory access, so 134 127 we can handle it. */ 135 - good_area: 136 128 si_code = SEGV_ACCERR; 137 129 if (cause < 0) { 138 130 if (!(vma->vm_flags & VM_EXEC)) ··· 184 192 bad_area: 185 193 mmap_read_unlock(mm); 186 194 195 + bad_area_nosemaphore: 187 196 if (user_mode(regs)) 188 197 goto do_sigsegv; 189 198
+1
arch/arc/Kconfig
··· 41 41 select HAVE_PERF_EVENTS 42 42 select HAVE_SYSCALL_TRACEPOINTS 43 43 select IRQ_DOMAIN 44 + select LOCK_MM_AND_FIND_VMA 44 45 select MODULES_USE_ELF_RELA 45 46 select OF 46 47 select OF_EARLY_FLATTREE
+3 -8
arch/arc/mm/fault.c
··· 113 113 114 114 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 115 115 retry: 116 - mmap_read_lock(mm); 117 - 118 - vma = find_vma(mm, address); 116 + vma = lock_mm_and_find_vma(mm, address, regs); 119 117 if (!vma) 120 - goto bad_area; 121 - if (unlikely(address < vma->vm_start)) { 122 - if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) 123 - goto bad_area; 124 - } 118 + goto bad_area_nosemaphore; 125 119 126 120 /* 127 121 * vm_area is good, now check permissions for this memory access ··· 155 161 bad_area: 156 162 mmap_read_unlock(mm); 157 163 164 + bad_area_nosemaphore: 158 165 /* 159 166 * Major/minor page fault accounting 160 167 * (in case of retry we only land here once)
+1
arch/csky/Kconfig
··· 96 96 select HAVE_REGS_AND_STACK_ACCESS_API 97 97 select HAVE_STACKPROTECTOR 98 98 select HAVE_SYSCALL_TRACEPOINTS 99 + select LOCK_MM_AND_FIND_VMA 99 100 select MAY_HAVE_SPARSE_IRQ 100 101 select MODULES_USE_ELF_RELA if MODULES 101 102 select OF
+5 -17
arch/csky/mm/fault.c
··· 97 97 BUG(); 98 98 } 99 99 100 - static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) 100 + static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) 101 101 { 102 102 /* 103 103 * Something tried to access memory that isn't in our memory map. 104 104 * Fix it, but check if it's kernel or user first. 105 105 */ 106 - mmap_read_unlock(mm); 107 106 /* User mode accesses just cause a SIGSEGV */ 108 107 if (user_mode(regs)) { 109 108 do_trap(regs, SIGSEGV, code, addr); ··· 237 238 if (is_write(regs)) 238 239 flags |= FAULT_FLAG_WRITE; 239 240 retry: 240 - mmap_read_lock(mm); 241 - vma = find_vma(mm, addr); 241 + vma = lock_mm_and_find_vma(mm, address, regs); 242 242 if (unlikely(!vma)) { 243 - bad_area(regs, mm, code, addr); 244 - return; 245 - } 246 - if (likely(vma->vm_start <= addr)) 247 - goto good_area; 248 - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 249 - bad_area(regs, mm, code, addr); 250 - return; 251 - } 252 - if (unlikely(expand_stack(vma, addr))) { 253 - bad_area(regs, mm, code, addr); 243 + bad_area_nosemaphore(regs, mm, code, addr); 254 244 return; 255 245 } 256 246 ··· 247 259 * Ok, we have a good vm_area for this memory access, so 248 260 * we can handle it. 249 261 */ 250 - good_area: 251 262 code = SEGV_ACCERR; 252 263 253 264 if (unlikely(access_error(regs, vma))) { 254 - bad_area(regs, mm, code, addr); 265 + mmap_read_unlock(mm); 266 + bad_area_nosemaphore(regs, mm, code, addr); 255 267 return; 256 268 } 257 269
+1
arch/hexagon/Kconfig
··· 28 28 select GENERIC_SMP_IDLE_THREAD 29 29 select STACKTRACE_SUPPORT 30 30 select GENERIC_CLOCKEVENTS_BROADCAST 31 + select LOCK_MM_AND_FIND_VMA 31 32 select MODULES_USE_ELF_RELA 32 33 select GENERIC_CPU_DEVICES 33 34 select ARCH_WANT_LD_ORPHAN_WARN
+4 -14
arch/hexagon/mm/vm_fault.c
··· 57 57 58 58 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 59 59 retry: 60 - mmap_read_lock(mm); 61 - vma = find_vma(mm, address); 62 - if (!vma) 63 - goto bad_area; 60 + vma = lock_mm_and_find_vma(mm, address, regs); 61 + if (unlikely(!vma)) 62 + goto bad_area_nosemaphore; 64 63 65 - if (vma->vm_start <= address) 66 - goto good_area; 67 - 68 - if (!(vma->vm_flags & VM_GROWSDOWN)) 69 - goto bad_area; 70 - 71 - if (expand_stack(vma, address)) 72 - goto bad_area; 73 - 74 - good_area: 75 64 /* Address space is OK. Now check access rights. */ 76 65 si_code = SEGV_ACCERR; 77 66 ··· 132 143 bad_area: 133 144 mmap_read_unlock(mm); 134 145 146 + bad_area_nosemaphore: 135 147 if (user_mode(regs)) { 136 148 force_sig_fault(SIGSEGV, si_code, (void __user *)address); 137 149 return;
+1
arch/loongarch/Kconfig
··· 130 130 select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP 131 131 select IRQ_FORCED_THREADING 132 132 select IRQ_LOONGARCH_CPU 133 + select LOCK_MM_AND_FIND_VMA 133 134 select MMU_GATHER_MERGE_VMAS if MMU 134 135 select MODULES_USE_ELF_RELA if MODULES 135 136 select NEED_PER_CPU_EMBED_FIRST_CHUNK
+6 -10
arch/loongarch/mm/fault.c
··· 169 169 170 170 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 171 171 retry: 172 - mmap_read_lock(mm); 173 - vma = find_vma(mm, address); 174 - if (!vma) 175 - goto bad_area; 176 - if (vma->vm_start <= address) 177 - goto good_area; 178 - if (!(vma->vm_flags & VM_GROWSDOWN)) 179 - goto bad_area; 180 - if (!expand_stack(vma, address)) 181 - goto good_area; 172 + vma = lock_mm_and_find_vma(mm, address, regs); 173 + if (unlikely(!vma)) 174 + goto bad_area_nosemaphore; 175 + goto good_area; 176 + 182 177 /* 183 178 * Something tried to access memory that isn't in our memory map.. 184 179 * Fix it, but check if it's kernel or user first.. 185 180 */ 186 181 bad_area: 187 182 mmap_read_unlock(mm); 183 + bad_area_nosemaphore: 188 184 do_sigsegv(regs, write, address, si_code); 189 185 return; 190 186
+1
arch/nios2/Kconfig
··· 16 16 select HAVE_ARCH_TRACEHOOK 17 17 select HAVE_ARCH_KGDB 18 18 select IRQ_DOMAIN 19 + select LOCK_MM_AND_FIND_VMA 19 20 select MODULES_USE_ELF_RELA 20 21 select OF 21 22 select OF_EARLY_FLATTREE
+2 -15
arch/nios2/mm/fault.c
··· 86 86 87 87 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 88 88 89 - if (!mmap_read_trylock(mm)) { 90 - if (!user_mode(regs) && !search_exception_tables(regs->ea)) 91 - goto bad_area_nosemaphore; 92 89 retry: 93 - mmap_read_lock(mm); 94 - } 95 - 96 - vma = find_vma(mm, address); 90 + vma = lock_mm_and_find_vma(mm, address, regs); 97 91 if (!vma) 98 - goto bad_area; 99 - if (vma->vm_start <= address) 100 - goto good_area; 101 - if (!(vma->vm_flags & VM_GROWSDOWN)) 102 - goto bad_area; 103 - if (expand_stack(vma, address)) 104 - goto bad_area; 92 + goto bad_area_nosemaphore; 105 93 /* 106 94 * Ok, we have a good vm_area for this memory access, so 107 95 * we can handle it.. 108 96 */ 109 - good_area: 110 97 code = SEGV_ACCERR; 111 98 112 99 switch (cause) {
+1
arch/sh/Kconfig
··· 59 59 select HAVE_STACKPROTECTOR 60 60 select HAVE_SYSCALL_TRACEPOINTS 61 61 select IRQ_FORCED_THREADING 62 + select LOCK_MM_AND_FIND_VMA 62 63 select MODULES_USE_ELF_RELA 63 64 select NEED_SG_DMA_LENGTH 64 65 select NO_DMA if !MMU && !DMA_COHERENT
+2 -15
arch/sh/mm/fault.c
··· 439 439 } 440 440 441 441 retry: 442 - mmap_read_lock(mm); 443 - 444 - vma = find_vma(mm, address); 442 + vma = lock_mm_and_find_vma(mm, address, regs); 445 443 if (unlikely(!vma)) { 446 - bad_area(regs, error_code, address); 447 - return; 448 - } 449 - if (likely(vma->vm_start <= address)) 450 - goto good_area; 451 - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { 452 - bad_area(regs, error_code, address); 453 - return; 454 - } 455 - if (unlikely(expand_stack(vma, address))) { 456 - bad_area(regs, error_code, address); 444 + bad_area_nosemaphore(regs, error_code, address); 457 445 return; 458 446 } 459 447 ··· 449 461 * Ok, we have a good vm_area for this memory access, so 450 462 * we can handle it.. 451 463 */ 452 - good_area: 453 464 if (unlikely(access_error(error_code, vma))) { 454 465 bad_area_access_error(regs, error_code, address); 455 466 return;
+1
arch/sparc/Kconfig
··· 57 57 select DMA_DIRECT_REMAP 58 58 select GENERIC_ATOMIC64 59 59 select HAVE_UID16 60 + select LOCK_MM_AND_FIND_VMA 60 61 select OLD_SIGACTION 61 62 select ZONE_DMA 62 63
+8 -24
arch/sparc/mm/fault_32.c
··· 143 143 if (pagefault_disabled() || !mm) 144 144 goto no_context; 145 145 146 + if (!from_user && address >= PAGE_OFFSET) 147 + goto no_context; 148 + 146 149 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 147 150 148 151 retry: 149 - mmap_read_lock(mm); 150 - 151 - if (!from_user && address >= PAGE_OFFSET) 152 - goto bad_area; 153 - 154 - vma = find_vma(mm, address); 152 + vma = lock_mm_and_find_vma(mm, address, regs); 155 153 if (!vma) 156 - goto bad_area; 157 - if (vma->vm_start <= address) 158 - goto good_area; 159 - if (!(vma->vm_flags & VM_GROWSDOWN)) 160 - goto bad_area; 161 - if (expand_stack(vma, address)) 162 - goto bad_area; 154 + goto bad_area_nosemaphore; 163 155 /* 164 156 * Ok, we have a good vm_area for this memory access, so 165 157 * we can handle it.. 166 158 */ 167 - good_area: 168 159 code = SEGV_ACCERR; 169 160 if (write) { 170 161 if (!(vma->vm_flags & VM_WRITE)) ··· 312 321 313 322 code = SEGV_MAPERR; 314 323 315 - mmap_read_lock(mm); 316 - vma = find_vma(mm, address); 324 + vma = lock_mm_and_find_vma(mm, address, regs); 317 325 if (!vma) 318 - goto bad_area; 319 - if (vma->vm_start <= address) 320 - goto good_area; 321 - if (!(vma->vm_flags & VM_GROWSDOWN)) 322 - goto bad_area; 323 - if (expand_stack(vma, address)) 324 - goto bad_area; 325 - good_area: 326 + goto bad_area_nosemaphore; 326 327 code = SEGV_ACCERR; 327 328 if (write) { 328 329 if (!(vma->vm_flags & VM_WRITE)) ··· 333 350 return; 334 351 bad_area: 335 352 mmap_read_unlock(mm); 353 + bad_area_nosemaphore: 336 354 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); 337 355 return; 338 356
+1
arch/xtensa/Kconfig
··· 49 49 select HAVE_SYSCALL_TRACEPOINTS 50 50 select HAVE_VIRT_CPU_ACCOUNTING_GEN 51 51 select IRQ_DOMAIN 52 + select LOCK_MM_AND_FIND_VMA 52 53 select MODULES_USE_ELF_RELA 53 54 select PERF_USE_VMALLOC 54 55 select TRACE_IRQFLAGS_SUPPORT
+3 -11
arch/xtensa/mm/fault.c
··· 130 130 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 131 131 132 132 retry: 133 - mmap_read_lock(mm); 134 - vma = find_vma(mm, address); 135 - 133 + vma = lock_mm_and_find_vma(mm, address, regs); 136 134 if (!vma) 137 - goto bad_area; 138 - if (vma->vm_start <= address) 139 - goto good_area; 140 - if (!(vma->vm_flags & VM_GROWSDOWN)) 141 - goto bad_area; 142 - if (expand_stack(vma, address)) 143 - goto bad_area; 135 + goto bad_area_nosemaphore; 144 136 145 137 /* Ok, we have a good vm_area for this memory access, so 146 138 * we can handle it.. 147 139 */ 148 140 149 - good_area: 150 141 code = SEGV_ACCERR; 151 142 152 143 if (is_write) { ··· 196 205 */ 197 206 bad_area: 198 207 mmap_read_unlock(mm); 208 + bad_area_nosemaphore: 199 209 if (user_mode(regs)) { 200 210 force_sig_fault(SIGSEGV, code, (void *) address); 201 211 return;