Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: convert return type of handle_mm_fault() caller to vm_fault_t

Use new return type vm_fault_t for fault handler. For now, this is just
documenting that the function returns a VM_FAULT value rather than an
errno. Once all instances are converted, vm_fault_t will become a
distinct type.

Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t")

In this patch all the caller of handle_mm_fault() are changed to return
vm_fault_t type.

Link: http://lkml.kernel.org/r/20180617084810.GA6730@jordon-HP-15-Notebook-PC
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Michal Simek <monstr@monstr.eu>
Cc: James Hogan <jhogan@kernel.org>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: James E.J. Bottomley <jejb@parisc-linux.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: David S. Miller <davem@davemloft.net>
Cc: Richard Weinberger <richard@nod.at>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Levin, Alexander (Sasha Levin)" <alexander.levin@verizon.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Souptick Joarder and committed by
Linus Torvalds
50a7ca3c 0882ff91

+69 -51
+2 -1
arch/alpha/mm/fault.c
··· 87 87 struct vm_area_struct * vma; 88 88 struct mm_struct *mm = current->mm; 89 89 const struct exception_table_entry *fixup; 90 - int fault, si_code = SEGV_MAPERR; 90 + int si_code = SEGV_MAPERR; 91 + vm_fault_t fault; 91 92 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 92 93 93 94 /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
+3 -1
arch/arc/mm/fault.c
··· 15 15 #include <linux/uaccess.h> 16 16 #include <linux/kdebug.h> 17 17 #include <linux/perf_event.h> 18 + #include <linux/mm_types.h> 18 19 #include <asm/pgalloc.h> 19 20 #include <asm/mmu.h> 20 21 ··· 67 66 struct task_struct *tsk = current; 68 67 struct mm_struct *mm = tsk->mm; 69 68 siginfo_t info; 70 - int fault, ret; 69 + int ret; 70 + vm_fault_t fault; 71 71 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ 72 72 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 73 73
+4 -3
arch/arm/mm/fault.c
··· 224 224 return vma->vm_flags & mask ? false : true; 225 225 } 226 226 227 - static int __kprobes 227 + static vm_fault_t __kprobes 228 228 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 229 229 unsigned int flags, struct task_struct *tsk) 230 230 { 231 231 struct vm_area_struct *vma; 232 - int fault; 232 + vm_fault_t fault; 233 233 234 234 vma = find_vma(mm, addr); 235 235 fault = VM_FAULT_BADMAP; ··· 264 264 { 265 265 struct task_struct *tsk; 266 266 struct mm_struct *mm; 267 - int fault, sig, code; 267 + int sig, code; 268 + vm_fault_t fault; 268 269 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 269 270 270 271 if (notify_page_fault(regs, fsr))
+3 -3
arch/arm64/mm/fault.c
··· 379 379 #define VM_FAULT_BADMAP 0x010000 380 380 #define VM_FAULT_BADACCESS 0x020000 381 381 382 - static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 382 + static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, 383 383 unsigned int mm_flags, unsigned long vm_flags, 384 384 struct task_struct *tsk) 385 385 { 386 386 struct vm_area_struct *vma; 387 - int fault; 387 + vm_fault_t fault; 388 388 389 389 vma = find_vma(mm, addr); 390 390 fault = VM_FAULT_BADMAP; ··· 427 427 struct task_struct *tsk; 428 428 struct mm_struct *mm; 429 429 struct siginfo si; 430 - int fault, major = 0; 430 + vm_fault_t fault, major = 0; 431 431 unsigned long vm_flags = VM_READ | VM_WRITE; 432 432 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 433 433
+1 -1
arch/hexagon/mm/vm_fault.c
··· 52 52 struct mm_struct *mm = current->mm; 53 53 int si_signo; 54 54 int si_code = SEGV_MAPERR; 55 - int fault; 55 + vm_fault_t fault; 56 56 const struct exception_table_entry *fixup; 57 57 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 58 58
+1 -1
arch/ia64/mm/fault.c
··· 86 86 struct vm_area_struct *vma, *prev_vma; 87 87 struct mm_struct *mm = current->mm; 88 88 unsigned long mask; 89 - int fault; 89 + vm_fault_t fault; 90 90 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 91 91 92 92 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+2 -2
arch/m68k/mm/fault.c
··· 70 70 { 71 71 struct mm_struct *mm = current->mm; 72 72 struct vm_area_struct * vma; 73 - int fault; 73 + vm_fault_t fault; 74 74 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 75 75 76 76 pr_debug("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", ··· 136 136 */ 137 137 138 138 fault = handle_mm_fault(vma, address, flags); 139 - pr_debug("handle_mm_fault returns %d\n", fault); 139 + pr_debug("handle_mm_fault returns %x\n", fault); 140 140 141 141 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 142 142 return 0;
+1 -1
arch/microblaze/mm/fault.c
··· 90 90 struct mm_struct *mm = current->mm; 91 91 int code = SEGV_MAPERR; 92 92 int is_write = error_code & ESR_S; 93 - int fault; 93 + vm_fault_t fault; 94 94 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 95 95 96 96 regs->ear = address;
+1 -1
arch/mips/mm/fault.c
··· 43 43 struct mm_struct *mm = tsk->mm; 44 44 const int field = sizeof(unsigned long) * 2; 45 45 int si_code; 46 - int fault; 46 + vm_fault_t fault; 47 47 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 48 48 49 49 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+1 -1
arch/nds32/mm/fault.c
··· 73 73 struct mm_struct *mm; 74 74 struct vm_area_struct *vma; 75 75 int si_code; 76 - int fault; 76 + vm_fault_t fault; 77 77 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; 78 78 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 79 79
+1 -1
arch/nios2/mm/fault.c
··· 47 47 struct task_struct *tsk = current; 48 48 struct mm_struct *mm = tsk->mm; 49 49 int code = SEGV_MAPERR; 50 - int fault; 50 + vm_fault_t fault; 51 51 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 52 52 53 53 cause >>= 2;
+1 -1
arch/openrisc/mm/fault.c
··· 53 53 struct mm_struct *mm; 54 54 struct vm_area_struct *vma; 55 55 int si_code; 56 - int fault; 56 + vm_fault_t fault; 57 57 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 58 58 59 59 tsk = current;
+1 -1
arch/parisc/mm/fault.c
··· 262 262 struct task_struct *tsk; 263 263 struct mm_struct *mm; 264 264 unsigned long acc_type; 265 - int fault = 0; 265 + vm_fault_t fault = 0; 266 266 unsigned int flags; 267 267 268 268 if (faulthandler_disabled())
+3 -1
arch/powerpc/include/asm/copro.h
··· 10 10 #ifndef _ASM_POWERPC_COPRO_H 11 11 #define _ASM_POWERPC_COPRO_H 12 12 13 + #include <linux/mm_types.h> 14 + 13 15 struct copro_slb 14 16 { 15 17 u64 esid, vsid; 16 18 }; 17 19 18 20 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 19 - unsigned long dsisr, unsigned *flt); 21 + unsigned long dsisr, vm_fault_t *flt); 20 22 21 23 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); 22 24
+1 -1
arch/powerpc/mm/copro_fault.c
··· 34 34 * to handle fortunately. 35 35 */ 36 36 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, 37 - unsigned long dsisr, unsigned *flt) 37 + unsigned long dsisr, vm_fault_t *flt) 38 38 { 39 39 struct vm_area_struct *vma; 40 40 unsigned long is_write;
+4 -3
arch/powerpc/mm/fault.c
··· 156 156 } 157 157 158 158 static int do_sigbus(struct pt_regs *regs, unsigned long address, 159 - unsigned int fault) 159 + vm_fault_t fault) 160 160 { 161 161 siginfo_t info; 162 162 unsigned int lsb = 0; ··· 187 187 return 0; 188 188 } 189 189 190 - static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) 190 + static int mm_fault_error(struct pt_regs *regs, unsigned long addr, 191 + vm_fault_t fault) 191 192 { 192 193 /* 193 194 * Kernel page fault interrupted by SIGKILL. We have no reason to ··· 416 415 int is_exec = TRAP(regs) == 0x400; 417 416 int is_user = user_mode(regs); 418 417 int is_write = page_fault_is_write(error_code); 419 - int fault, major = 0; 418 + vm_fault_t fault, major = 0; 420 419 bool must_retry = false; 421 420 422 421 if (notify_page_fault(regs))
+1 -1
arch/powerpc/platforms/cell/spufs/fault.c
··· 111 111 { 112 112 u64 ea, dsisr, access; 113 113 unsigned long flags; 114 - unsigned flt = 0; 114 + vm_fault_t flt = 0; 115 115 int ret; 116 116 117 117 /*
+2 -1
arch/riscv/mm/fault.c
··· 41 41 struct mm_struct *mm; 42 42 unsigned long addr, cause; 43 43 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 44 - int fault, code = SEGV_MAPERR; 44 + int code = SEGV_MAPERR; 45 + vm_fault_t fault; 45 46 46 47 cause = regs->scause; 47 48 addr = regs->sbadaddr;
+8 -5
arch/s390/mm/fault.c
··· 341 341 return -EACCES; 342 342 } 343 343 344 - static noinline void do_fault_error(struct pt_regs *regs, int access, int fault) 344 + static noinline void do_fault_error(struct pt_regs *regs, int access, 345 + vm_fault_t fault) 345 346 { 346 347 int si_code; 347 348 ··· 402 401 * 11 Page translation -> Not present (nullification) 403 402 * 3b Region third trans. -> Not present (nullification) 404 403 */ 405 - static inline int do_exception(struct pt_regs *regs, int access) 404 + static inline vm_fault_t do_exception(struct pt_regs *regs, int access) 406 405 { 407 406 struct gmap *gmap; 408 407 struct task_struct *tsk; ··· 412 411 unsigned long trans_exc_code; 413 412 unsigned long address; 414 413 unsigned int flags; 415 - int fault; 414 + vm_fault_t fault; 416 415 417 416 tsk = current; 418 417 /* ··· 565 564 void do_protection_exception(struct pt_regs *regs) 566 565 { 567 566 unsigned long trans_exc_code; 568 - int access, fault; 567 + int access; 568 + vm_fault_t fault; 569 569 570 570 trans_exc_code = regs->int_parm_long; 571 571 /* ··· 601 599 602 600 void do_dat_exception(struct pt_regs *regs) 603 601 { 604 - int access, fault; 602 + int access; 603 + vm_fault_t fault; 605 604 606 605 access = VM_READ | VM_EXEC | VM_WRITE; 607 606 fault = do_exception(regs, access);
+2 -2
arch/sh/mm/fault.c
··· 313 313 314 314 static noinline int 315 315 mm_fault_error(struct pt_regs *regs, unsigned long error_code, 316 - unsigned long address, unsigned int fault) 316 + unsigned long address, vm_fault_t fault) 317 317 { 318 318 /* 319 319 * Pagefault was interrupted by SIGKILL. We have no reason to ··· 396 396 struct task_struct *tsk; 397 397 struct mm_struct *mm; 398 398 struct vm_area_struct * vma; 399 - int fault; 399 + vm_fault_t fault; 400 400 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 401 401 402 402 tsk = current;
+2 -1
arch/sparc/mm/fault_32.c
··· 166 166 unsigned int fixup; 167 167 unsigned long g2; 168 168 int from_user = !(regs->psr & PSR_PS); 169 - int fault, code; 169 + int code; 170 + vm_fault_t fault; 170 171 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 171 172 172 173 if (text_fault)
+2 -1
arch/sparc/mm/fault_64.c
··· 278 278 struct mm_struct *mm = current->mm; 279 279 struct vm_area_struct *vma; 280 280 unsigned int insn = 0; 281 - int si_code, fault_code, fault; 281 + int si_code, fault_code; 282 + vm_fault_t fault; 282 283 unsigned long address, mm_rss; 283 284 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 284 285
+1 -1
arch/um/kernel/trap.c
··· 72 72 } 73 73 74 74 do { 75 - int fault; 75 + vm_fault_t fault; 76 76 77 77 fault = handle_mm_fault(vma, address, flags); 78 78
+5 -4
arch/unicore32/mm/fault.c
··· 168 168 return vma->vm_flags & mask ? false : true; 169 169 } 170 170 171 - static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr, 172 - unsigned int flags, struct task_struct *tsk) 171 + static vm_fault_t __do_pf(struct mm_struct *mm, unsigned long addr, 172 + unsigned int fsr, unsigned int flags, struct task_struct *tsk) 173 173 { 174 174 struct vm_area_struct *vma; 175 - int fault; 175 + vm_fault_t fault; 176 176 177 177 vma = find_vma(mm, addr); 178 178 fault = VM_FAULT_BADMAP; ··· 209 209 { 210 210 struct task_struct *tsk; 211 211 struct mm_struct *mm; 212 - int fault, sig, code; 212 + int sig, code; 213 + vm_fault_t fault; 213 214 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 214 215 215 216 tsk = current;
+3 -2
arch/x86/mm/fault.c
··· 16 16 #include <linux/prefetch.h> /* prefetchw */ 17 17 #include <linux/context_tracking.h> /* exception_enter(), ... */ 18 18 #include <linux/uaccess.h> /* faulthandler_disabled() */ 19 + #include <linux/mm_types.h> 19 20 20 21 #include <asm/cpufeature.h> /* boot_cpu_has, ... */ 21 22 #include <asm/traps.h> /* dotraplinkage, ... */ ··· 1000 999 1001 1000 static noinline void 1002 1001 mm_fault_error(struct pt_regs *regs, unsigned long error_code, 1003 - unsigned long address, u32 *pkey, unsigned int fault) 1002 + unsigned long address, u32 *pkey, vm_fault_t fault) 1004 1003 { 1005 1004 if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { 1006 1005 no_context(regs, error_code, address, 0, 0); ··· 1214 1213 struct vm_area_struct *vma; 1215 1214 struct task_struct *tsk; 1216 1215 struct mm_struct *mm; 1217 - int fault, major = 0; 1216 + vm_fault_t fault, major = 0; 1218 1217 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1219 1218 u32 pkey; 1220 1219
+1 -1
arch/xtensa/mm/fault.c
··· 42 42 int code; 43 43 44 44 int is_write, is_exec; 45 - int fault; 45 + vm_fault_t fault; 46 46 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 47 47 48 48 code = SEGV_MAPERR;
+1 -1
drivers/iommu/amd_iommu_v2.c
··· 508 508 { 509 509 struct fault *fault = container_of(work, struct fault, work); 510 510 struct vm_area_struct *vma; 511 - int ret = VM_FAULT_ERROR; 511 + vm_fault_t ret = VM_FAULT_ERROR; 512 512 unsigned int flags = 0; 513 513 struct mm_struct *mm; 514 514 u64 address;
+3 -1
drivers/iommu/intel-svm.c
··· 24 24 #include <linux/pci-ats.h> 25 25 #include <linux/dmar.h> 26 26 #include <linux/interrupt.h> 27 + #include <linux/mm_types.h> 27 28 #include <asm/page.h> 28 29 29 30 #define PASID_ENTRY_P BIT_ULL(0) ··· 595 594 struct vm_area_struct *vma; 596 595 struct page_req_dsc *req; 597 596 struct qi_desc resp; 598 - int ret, result; 597 + int result; 598 + vm_fault_t ret; 599 599 u64 address; 600 600 601 601 handled = 1;
+1 -1
drivers/misc/cxl/fault.c
··· 134 134 135 135 int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) 136 136 { 137 - unsigned flt = 0; 137 + vm_fault_t flt = 0; 138 138 int result; 139 139 unsigned long access, flags, inv_flags = 0; 140 140
+2 -1
drivers/misc/ocxl/link.c
··· 2 2 // Copyright 2017 IBM Corp. 3 3 #include <linux/sched/mm.h> 4 4 #include <linux/mutex.h> 5 + #include <linux/mm_types.h> 5 6 #include <linux/mmu_context.h> 6 7 #include <asm/copro.h> 7 8 #include <asm/pnv-ocxl.h> ··· 127 126 128 127 static void xsl_fault_handler_bh(struct work_struct *fault_work) 129 128 { 130 - unsigned int flt = 0; 129 + vm_fault_t flt = 0; 131 130 unsigned long access, flags, inv_flags = 0; 132 131 enum xsl_response r; 133 132 struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
+4 -4
mm/hmm.c
··· 299 299 struct hmm_vma_walk *hmm_vma_walk = walk->private; 300 300 struct hmm_range *range = hmm_vma_walk->range; 301 301 struct vm_area_struct *vma = walk->vma; 302 - int r; 302 + vm_fault_t ret; 303 303 304 304 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; 305 305 flags |= write_fault ? FAULT_FLAG_WRITE : 0; 306 - r = handle_mm_fault(vma, addr, flags); 307 - if (r & VM_FAULT_RETRY) 306 + ret = handle_mm_fault(vma, addr, flags); 307 + if (ret & VM_FAULT_RETRY) 308 308 return -EBUSY; 309 - if (r & VM_FAULT_ERROR) { 309 + if (ret & VM_FAULT_ERROR) { 310 310 *pfn = range->values[HMM_PFN_ERROR]; 311 311 return -EFAULT; 312 312 }
+1 -1
mm/ksm.c
··· 470 470 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 471 471 { 472 472 struct page *page; 473 - int ret = 0; 473 + vm_fault_t ret = 0; 474 474 475 475 do { 476 476 cond_resched();