Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: do not pass mm_struct into handle_mm_fault

We always have vma->vm_mm around.

Link: http://lkml.kernel.org/r/1466021202-61880-8-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
dcddffd4 6fb8ddfc

+48 -51
+1 -1
arch/alpha/mm/fault.c
··· 147 147 /* If for any reason at all we couldn't handle the fault, 148 148 make sure we exit gracefully rather than endlessly redo 149 149 the fault. */ 150 - fault = handle_mm_fault(mm, vma, address, flags); 150 + fault = handle_mm_fault(vma, address, flags); 151 151 152 152 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 153 153 return;
+1 -1
arch/arc/mm/fault.c
··· 137 137 * make sure we exit gracefully rather than endlessly redo 138 138 * the fault. 139 139 */ 140 - fault = handle_mm_fault(mm, vma, address, flags); 140 + fault = handle_mm_fault(vma, address, flags); 141 141 142 142 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ 143 143 if (unlikely(fatal_signal_pending(current))) {
+1 -1
arch/arm/mm/fault.c
··· 243 243 goto out; 244 244 } 245 245 246 - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 246 + return handle_mm_fault(vma, addr & PAGE_MASK, flags); 247 247 248 248 check_stack: 249 249 /* Don't allow expansion below FIRST_USER_ADDRESS */
+1 -1
arch/arm64/mm/fault.c
··· 233 233 goto out; 234 234 } 235 235 236 - return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); 236 + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); 237 237 238 238 check_stack: 239 239 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+1 -1
arch/avr32/mm/fault.c
··· 134 134 * sure we exit gracefully rather than endlessly redo the 135 135 * fault. 136 136 */ 137 - fault = handle_mm_fault(mm, vma, address, flags); 137 + fault = handle_mm_fault(vma, address, flags); 138 138 139 139 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 140 140 return;
+1 -1
arch/cris/mm/fault.c
··· 168 168 * the fault. 169 169 */ 170 170 171 - fault = handle_mm_fault(mm, vma, address, flags); 171 + fault = handle_mm_fault(vma, address, flags); 172 172 173 173 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 174 174 return;
+1 -1
arch/frv/mm/fault.c
··· 164 164 * make sure we exit gracefully rather than endlessly redo 165 165 * the fault. 166 166 */ 167 - fault = handle_mm_fault(mm, vma, ear0, flags); 167 + fault = handle_mm_fault(vma, ear0, flags); 168 168 if (unlikely(fault & VM_FAULT_ERROR)) { 169 169 if (fault & VM_FAULT_OOM) 170 170 goto out_of_memory;
+1 -1
arch/hexagon/mm/vm_fault.c
··· 101 101 break; 102 102 } 103 103 104 - fault = handle_mm_fault(mm, vma, address, flags); 104 + fault = handle_mm_fault(vma, address, flags); 105 105 106 106 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 107 107 return;
+1 -1
arch/ia64/mm/fault.c
··· 159 159 * sure we exit gracefully rather than endlessly redo the 160 160 * fault. 161 161 */ 162 - fault = handle_mm_fault(mm, vma, address, flags); 162 + fault = handle_mm_fault(vma, address, flags); 163 163 164 164 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 165 165 return;
+1 -1
arch/m32r/mm/fault.c
··· 196 196 */ 197 197 addr = (address & PAGE_MASK); 198 198 set_thread_fault_code(error_code); 199 - fault = handle_mm_fault(mm, vma, addr, flags); 199 + fault = handle_mm_fault(vma, addr, flags); 200 200 if (unlikely(fault & VM_FAULT_ERROR)) { 201 201 if (fault & VM_FAULT_OOM) 202 202 goto out_of_memory;
+1 -1
arch/m68k/mm/fault.c
··· 136 136 * the fault. 137 137 */ 138 138 139 - fault = handle_mm_fault(mm, vma, address, flags); 139 + fault = handle_mm_fault(vma, address, flags); 140 140 pr_debug("handle_mm_fault returns %d\n", fault); 141 141 142 142 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+1 -1
arch/metag/mm/fault.c
··· 133 133 * make sure we exit gracefully rather than endlessly redo 134 134 * the fault. 135 135 */ 136 - fault = handle_mm_fault(mm, vma, address, flags); 136 + fault = handle_mm_fault(vma, address, flags); 137 137 138 138 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 139 139 return 0;
+1 -1
arch/microblaze/mm/fault.c
··· 216 216 * make sure we exit gracefully rather than endlessly redo 217 217 * the fault. 218 218 */ 219 - fault = handle_mm_fault(mm, vma, address, flags); 219 + fault = handle_mm_fault(vma, address, flags); 220 220 221 221 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 222 222 return;
+1 -1
arch/mips/mm/fault.c
··· 153 153 * make sure we exit gracefully rather than endlessly redo 154 154 * the fault. 155 155 */ 156 - fault = handle_mm_fault(mm, vma, address, flags); 156 + fault = handle_mm_fault(vma, address, flags); 157 157 158 158 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 159 159 return;
+1 -1
arch/mn10300/mm/fault.c
··· 254 254 * make sure we exit gracefully rather than endlessly redo 255 255 * the fault. 256 256 */ 257 - fault = handle_mm_fault(mm, vma, address, flags); 257 + fault = handle_mm_fault(vma, address, flags); 258 258 259 259 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 260 260 return;
+1 -1
arch/nios2/mm/fault.c
··· 131 131 * make sure we exit gracefully rather than endlessly redo 132 132 * the fault. 133 133 */ 134 - fault = handle_mm_fault(mm, vma, address, flags); 134 + fault = handle_mm_fault(vma, address, flags); 135 135 136 136 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 137 137 return;
+1 -1
arch/openrisc/mm/fault.c
··· 163 163 * the fault. 164 164 */ 165 165 166 - fault = handle_mm_fault(mm, vma, address, flags); 166 + fault = handle_mm_fault(vma, address, flags); 167 167 168 168 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 169 169 return;
+1 -1
arch/parisc/mm/fault.c
··· 239 239 * fault. 240 240 */ 241 241 242 - fault = handle_mm_fault(mm, vma, address, flags); 242 + fault = handle_mm_fault(vma, address, flags); 243 243 244 244 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 245 245 return;
+1 -1
arch/powerpc/mm/copro_fault.c
··· 75 75 } 76 76 77 77 ret = 0; 78 - *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); 78 + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); 79 79 if (unlikely(*flt & VM_FAULT_ERROR)) { 80 80 if (*flt & VM_FAULT_OOM) { 81 81 ret = -ENOMEM;
+1 -1
arch/powerpc/mm/fault.c
··· 429 429 * make sure we exit gracefully rather than endlessly redo 430 430 * the fault. 431 431 */ 432 - fault = handle_mm_fault(mm, vma, address, flags); 432 + fault = handle_mm_fault(vma, address, flags); 433 433 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 434 434 if (fault & VM_FAULT_SIGSEGV) 435 435 goto bad_area;
+1 -1
arch/s390/mm/fault.c
··· 456 456 * make sure we exit gracefully rather than endlessly redo 457 457 * the fault. 458 458 */ 459 - fault = handle_mm_fault(mm, vma, address, flags); 459 + fault = handle_mm_fault(vma, address, flags); 460 460 /* No reason to continue if interrupted by SIGKILL. */ 461 461 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { 462 462 fault = VM_FAULT_SIGNAL;
+1 -1
arch/score/mm/fault.c
··· 111 111 * make sure we exit gracefully rather than endlessly redo 112 112 * the fault. 113 113 */ 114 - fault = handle_mm_fault(mm, vma, address, flags); 114 + fault = handle_mm_fault(vma, address, flags); 115 115 if (unlikely(fault & VM_FAULT_ERROR)) { 116 116 if (fault & VM_FAULT_OOM) 117 117 goto out_of_memory;
+1 -1
arch/sh/mm/fault.c
··· 487 487 * make sure we exit gracefully rather than endlessly redo 488 488 * the fault. 489 489 */ 490 - fault = handle_mm_fault(mm, vma, address, flags); 490 + fault = handle_mm_fault(vma, address, flags); 491 491 492 492 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) 493 493 if (mm_fault_error(regs, error_code, address, fault))
+2 -2
arch/sparc/mm/fault_32.c
··· 241 241 * make sure we exit gracefully rather than endlessly redo 242 242 * the fault. 243 243 */ 244 - fault = handle_mm_fault(mm, vma, address, flags); 244 + fault = handle_mm_fault(vma, address, flags); 245 245 246 246 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 247 247 return; ··· 411 411 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 412 412 goto bad_area; 413 413 } 414 - switch (handle_mm_fault(mm, vma, address, flags)) { 414 + switch (handle_mm_fault(vma, address, flags)) { 415 415 case VM_FAULT_SIGBUS: 416 416 case VM_FAULT_OOM: 417 417 goto do_sigbus;
+1 -1
arch/sparc/mm/fault_64.c
··· 436 436 goto bad_area; 437 437 } 438 438 439 - fault = handle_mm_fault(mm, vma, address, flags); 439 + fault = handle_mm_fault(vma, address, flags); 440 440 441 441 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 442 442 goto exit_exception;
+1 -1
arch/tile/mm/fault.c
··· 434 434 * make sure we exit gracefully rather than endlessly redo 435 435 * the fault. 436 436 */ 437 - fault = handle_mm_fault(mm, vma, address, flags); 437 + fault = handle_mm_fault(vma, address, flags); 438 438 439 439 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 440 440 return 0;
+1 -1
arch/um/kernel/trap.c
··· 73 73 do { 74 74 int fault; 75 75 76 - fault = handle_mm_fault(mm, vma, address, flags); 76 + fault = handle_mm_fault(vma, address, flags); 77 77 78 78 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 79 79 goto out_nosemaphore;
+1 -1
arch/unicore32/mm/fault.c
··· 194 194 * If for any reason at all we couldn't handle the fault, make 195 195 * sure we exit gracefully rather than endlessly redo the fault. 196 196 */ 197 - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 197 + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); 198 198 return fault; 199 199 200 200 check_stack:
+1 -1
arch/x86/mm/fault.c
··· 1353 1353 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1354 1354 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1355 1355 */ 1356 - fault = handle_mm_fault(mm, vma, address, flags); 1356 + fault = handle_mm_fault(vma, address, flags); 1357 1357 major |= fault & VM_FAULT_MAJOR; 1358 1358 1359 1359 /*
+1 -1
arch/xtensa/mm/fault.c
··· 110 110 * make sure we exit gracefully rather than endlessly redo 111 111 * the fault. 112 112 */ 113 - fault = handle_mm_fault(mm, vma, address, flags); 113 + fault = handle_mm_fault(vma, address, flags); 114 114 115 115 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 116 116 return;
+1 -2
drivers/iommu/amd_iommu_v2.c
··· 538 538 if (access_error(vma, fault)) 539 539 goto out; 540 540 541 - ret = handle_mm_fault(mm, vma, address, flags); 542 - 541 + ret = handle_mm_fault(vma, address, flags); 543 542 out: 544 543 up_read(&mm->mmap_sem); 545 544
+1 -1
drivers/iommu/intel-svm.c
··· 583 583 if (access_error(vma, req)) 584 584 goto invalid; 585 585 586 - ret = handle_mm_fault(svm->mm, vma, address, 586 + ret = handle_mm_fault(vma, address, 587 587 req->wr_req ? FAULT_FLAG_WRITE : 0); 588 588 if (ret & VM_FAULT_ERROR) 589 589 goto invalid;
+4 -5
include/linux/mm.h
··· 1215 1215 int invalidate_inode_page(struct page *page); 1216 1216 1217 1217 #ifdef CONFIG_MMU 1218 - extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 1219 - unsigned long address, unsigned int flags); 1218 + extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 1219 + unsigned int flags); 1220 1220 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1221 1221 unsigned long address, unsigned int fault_flags, 1222 1222 bool *unlocked); 1223 1223 #else 1224 - static inline int handle_mm_fault(struct mm_struct *mm, 1225 - struct vm_area_struct *vma, unsigned long address, 1226 - unsigned int flags) 1224 + static inline int handle_mm_fault(struct vm_area_struct *vma, 1225 + unsigned long address, unsigned int flags) 1227 1226 { 1228 1227 /* should never happen if there's no MMU */ 1229 1228 BUG();
+2 -3
mm/gup.c
··· 352 352 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, 353 353 unsigned long address, unsigned int *flags, int *nonblocking) 354 354 { 355 - struct mm_struct *mm = vma->vm_mm; 356 355 unsigned int fault_flags = 0; 357 356 int ret; 358 357 ··· 376 377 fault_flags |= FAULT_FLAG_TRIED; 377 378 } 378 379 379 - ret = handle_mm_fault(mm, vma, address, fault_flags); 380 + ret = handle_mm_fault(vma, address, fault_flags); 380 381 if (ret & VM_FAULT_ERROR) { 381 382 if (ret & VM_FAULT_OOM) 382 383 return -ENOMEM; ··· 691 692 if (!vma_permits_fault(vma, fault_flags)) 692 693 return -EFAULT; 693 694 694 - ret = handle_mm_fault(mm, vma, address, fault_flags); 695 + ret = handle_mm_fault(vma, address, fault_flags); 695 696 major |= ret & VM_FAULT_MAJOR; 696 697 if (ret & VM_FAULT_ERROR) { 697 698 if (ret & VM_FAULT_OOM)
+2 -3
mm/ksm.c
··· 376 376 if (IS_ERR_OR_NULL(page)) 377 377 break; 378 378 if (PageKsm(page)) 379 - ret = handle_mm_fault(vma->vm_mm, vma, addr, 380 - FAULT_FLAG_WRITE | 381 - FAULT_FLAG_REMOTE); 379 + ret = handle_mm_fault(vma, addr, 380 + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); 382 381 else 383 382 ret = VM_FAULT_WRITE; 384 383 put_page(page);
+7 -6
mm/memory.c
··· 3420 3420 * The mmap_sem may have been released depending on flags and our 3421 3421 * return value. See filemap_fault() and __lock_page_or_retry(). 3422 3422 */ 3423 - static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3424 - unsigned long address, unsigned int flags) 3423 + static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 3424 + unsigned int flags) 3425 3425 { 3426 + struct mm_struct *mm = vma->vm_mm; 3426 3427 pgd_t *pgd; 3427 3428 pud_t *pud; 3428 3429 pmd_t *pmd; ··· 3510 3509 * The mmap_sem may have been released depending on flags and our 3511 3510 * return value. See filemap_fault() and __lock_page_or_retry(). 3512 3511 */ 3513 - int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3514 - unsigned long address, unsigned int flags) 3512 + int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 3513 + unsigned int flags) 3515 3514 { 3516 3515 int ret; 3517 3516 3518 3517 __set_current_state(TASK_RUNNING); 3519 3518 3520 3519 count_vm_event(PGFAULT); 3521 - mem_cgroup_count_vm_event(mm, PGFAULT); 3520 + mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); 3522 3521 3523 3522 /* do counter updates before entering really critical section. */ 3524 3523 check_sync_rss_stat(current); ··· 3530 3529 if (flags & FAULT_FLAG_USER) 3531 3530 mem_cgroup_oom_enable(); 3532 3531 3533 - ret = __handle_mm_fault(mm, vma, address, flags); 3532 + ret = __handle_mm_fault(vma, address, flags); 3534 3533 3535 3534 if (flags & FAULT_FLAG_USER) { 3536 3535 mem_cgroup_oom_disable();