Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.3-rc6 874 lines 24 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * From i386 code copyright (C) 1995 Linus Torvalds 15 */ 16 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/smp.h> 27#include <linux/interrupt.h> 28#include <linux/init.h> 29#include <linux/tty.h> 30#include <linux/vt_kern.h> /* For unblank_screen() */ 31#include <linux/highmem.h> 32#include <linux/module.h> 33#include <linux/kprobes.h> 34#include <linux/hugetlb.h> 35#include <linux/syscalls.h> 36#include <linux/uaccess.h> 37 38#include <asm/system.h> 39#include <asm/pgalloc.h> 40#include <asm/sections.h> 41#include <asm/traps.h> 42#include <asm/syscalls.h> 43 44#include <arch/interrupts.h> 45 46static noinline void force_sig_info_fault(const char *type, int si_signo, 47 int si_code, unsigned long address, 48 int fault_num, 49 struct task_struct *tsk, 50 struct pt_regs *regs) 51{ 52 siginfo_t info; 53 54 if (unlikely(tsk->pid < 2)) { 55 panic("Signal %d (code %d) at %#lx sent to %s!", 56 si_signo, si_code & 0xffff, address, 57 is_idle_task(tsk) ? "the idle task" : "init"); 58 } 59 60 info.si_signo = si_signo; 61 info.si_errno = 0; 62 info.si_code = si_code; 63 info.si_addr = (void __user *)address; 64 info.si_trapno = fault_num; 65 trace_unhandled_signal(type, regs, address, si_signo); 66 force_sig_info(si_signo, &info, tsk); 67} 68 69#ifndef __tilegx__ 70/* 71 * Synthesize the fault a PL0 process would get by doing a word-load of 72 * an unaligned address or a high kernel address. 73 */ 74SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address, 75 struct pt_regs *, regs) 76{ 77 if (address >= PAGE_OFFSET) 78 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR, 79 address, INT_DTLB_MISS, current, regs); 80 else 81 force_sig_info_fault("atomic alignment fault", SIGBUS, 82 BUS_ADRALN, address, 83 INT_UNALIGN_DATA, current, regs); 84 85 /* 86 * Adjust pc to point at the actual instruction, which is unusual 87 * for syscalls normally, but is appropriate when we are claiming 88 * that a syscall swint1 caused a page fault or bus error. 89 */ 90 regs->pc -= 8; 91 92 /* 93 * Mark this as a caller-save interrupt, like a normal page fault, 94 * so that when we go through the signal handler path we will 95 * properly restore r0, r1, and r2 for the signal handler arguments. 96 */ 97 regs->flags |= PT_FLAGS_CALLER_SAVES; 98 99 return 0; 100} 101#endif 102 103static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 104{ 105 unsigned index = pgd_index(address); 106 pgd_t *pgd_k; 107 pud_t *pud, *pud_k; 108 pmd_t *pmd, *pmd_k; 109 110 pgd += index; 111 pgd_k = init_mm.pgd + index; 112 113 if (!pgd_present(*pgd_k)) 114 return NULL; 115 116 pud = pud_offset(pgd, address); 117 pud_k = pud_offset(pgd_k, address); 118 if (!pud_present(*pud_k)) 119 return NULL; 120 121 pmd = pmd_offset(pud, address); 122 pmd_k = pmd_offset(pud_k, address); 123 if (!pmd_present(*pmd_k)) 124 return NULL; 125 if (!pmd_present(*pmd)) { 126 set_pmd(pmd, *pmd_k); 127 arch_flush_lazy_mmu_mode(); 128 } else 129 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); 130 return pmd_k; 131} 132 133/* 134 * Handle a fault on the vmalloc or module mapping area 135 */ 136static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) 137{ 138 pmd_t *pmd_k; 139 pte_t *pte_k; 140 141 /* Make sure we are in vmalloc area */ 142 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 143 return -1; 144 145 /* 146 * Synchronize this task's top level page-table 147 * with the 'reference' page table. 148 */ 149 pmd_k = vmalloc_sync_one(pgd, address); 150 if (!pmd_k) 151 return -1; 152 if (pmd_huge(*pmd_k)) 153 return 0; /* support TILE huge_vmap() API */ 154 pte_k = pte_offset_kernel(pmd_k, address); 155 if (!pte_present(*pte_k)) 156 return -1; 157 return 0; 158} 159 160/* Wait until this PTE has completed migration. */ 161static void wait_for_migration(pte_t *pte) 162{ 163 if (pte_migrating(*pte)) { 164 /* 165 * Wait until the migrater fixes up this pte. 166 * We scale the loop count by the clock rate so we'll wait for 167 * a few seconds here. 168 */ 169 int retries = 0; 170 int bound = get_clock_rate(); 171 while (pte_migrating(*pte)) { 172 barrier(); 173 if (++retries > bound) 174 panic("Hit migrating PTE (%#llx) and" 175 " page PFN %#lx still migrating", 176 pte->val, pte_pfn(*pte)); 177 } 178 } 179} 180 181/* 182 * It's not generally safe to use "current" to get the page table pointer, 183 * since we might be running an oprofile interrupt in the middle of a 184 * task switch. 185 */ 186static pgd_t *get_current_pgd(void) 187{ 188 HV_Context ctx = hv_inquire_context(); 189 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; 190 struct page *pgd_page = pfn_to_page(pgd_pfn); 191 BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ 192 return (pgd_t *) __va(ctx.page_table); 193} 194 195/* 196 * We can receive a page fault from a migrating PTE at any time. 197 * Handle it by just waiting until the fault resolves. 198 * 199 * It's also possible to get a migrating kernel PTE that resolves 200 * itself during the downcall from hypervisor to Linux. We just check 201 * here to see if the PTE seems valid, and if so we retry it. 202 * 203 * NOTE! We MUST NOT take any locks for this case. We may be in an 204 * interrupt or a critical region, and must do as little as possible. 205 * Similarly, we can't use atomic ops here, since we may be handling a 206 * fault caused by an atomic op access. 207 */ 208static int handle_migrating_pte(pgd_t *pgd, int fault_num, 209 unsigned long address, 210 int is_kernel_mode, int write) 211{ 212 pud_t *pud; 213 pmd_t *pmd; 214 pte_t *pte; 215 pte_t pteval; 216 217 if (pgd_addr_invalid(address)) 218 return 0; 219 220 pgd += pgd_index(address); 221 pud = pud_offset(pgd, address); 222 if (!pud || !pud_present(*pud)) 223 return 0; 224 pmd = pmd_offset(pud, address); 225 if (!pmd || !pmd_present(*pmd)) 226 return 0; 227 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : 228 pte_offset_kernel(pmd, address); 229 pteval = *pte; 230 if (pte_migrating(pteval)) { 231 wait_for_migration(pte); 232 return 1; 233 } 234 235 if (!is_kernel_mode || !pte_present(pteval)) 236 return 0; 237 if (fault_num == INT_ITLB_MISS) { 238 if (pte_exec(pteval)) 239 return 1; 240 } else if (write) { 241 if (pte_write(pteval)) 242 return 1; 243 } else { 244 if (pte_read(pteval)) 245 return 1; 246 } 247 248 return 0; 249} 250 251/* 252 * This routine is responsible for faulting in user pages. 253 * It passes the work off to one of the appropriate routines. 254 * It returns true if the fault was successfully handled. 255 */ 256static int handle_page_fault(struct pt_regs *regs, 257 int fault_num, 258 int is_page_fault, 259 unsigned long address, 260 int write) 261{ 262 struct task_struct *tsk; 263 struct mm_struct *mm; 264 struct vm_area_struct *vma; 265 unsigned long stack_offset; 266 int fault; 267 int si_code; 268 int is_kernel_mode; 269 pgd_t *pgd; 270 271 /* on TILE, protection faults are always writes */ 272 if (!is_page_fault) 273 write = 1; 274 275 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); 276 277 tsk = validate_current(); 278 279 /* 280 * Check to see if we might be overwriting the stack, and bail 281 * out if so. The page fault code is a relatively likely 282 * place to get trapped in an infinite regress, and once we 283 * overwrite the whole stack, it becomes very hard to recover. 284 */ 285 stack_offset = stack_pointer & (THREAD_SIZE-1); 286 if (stack_offset < THREAD_SIZE / 8) { 287 pr_alert("Potential stack overrun: sp %#lx\n", 288 stack_pointer); 289 show_regs(regs); 290 pr_alert("Killing current process %d/%s\n", 291 tsk->pid, tsk->comm); 292 do_group_exit(SIGKILL); 293 } 294 295 /* 296 * Early on, we need to check for migrating PTE entries; 297 * see homecache.c. If we find a migrating PTE, we wait until 298 * the backing page claims to be done migrating, then we proceed. 299 * For kernel PTEs, we rewrite the PTE and return and retry. 300 * Otherwise, we treat the fault like a normal "no PTE" fault, 301 * rather than trying to patch up the existing PTE. 302 */ 303 pgd = get_current_pgd(); 304 if (handle_migrating_pte(pgd, fault_num, address, 305 is_kernel_mode, write)) 306 return 1; 307 308 si_code = SEGV_MAPERR; 309 310 /* 311 * We fault-in kernel-space virtual memory on-demand. The 312 * 'reference' page table is init_mm.pgd. 313 * 314 * NOTE! We MUST NOT take any locks for this case. We may 315 * be in an interrupt or a critical region, and should 316 * only copy the information from the master page table, 317 * nothing more. 318 * 319 * This verifies that the fault happens in kernel space 320 * and that the fault was not a protection fault. 321 */ 322 if (unlikely(address >= TASK_SIZE && 323 !is_arch_mappable_range(address, 0))) { 324 if (is_kernel_mode && is_page_fault && 325 vmalloc_fault(pgd, address) >= 0) 326 return 1; 327 /* 328 * Don't take the mm semaphore here. If we fixup a prefetch 329 * fault we could otherwise deadlock. 330 */ 331 mm = NULL; /* happy compiler */ 332 vma = NULL; 333 goto bad_area_nosemaphore; 334 } 335 336 /* 337 * If we're trying to touch user-space addresses, we must 338 * be either at PL0, or else with interrupts enabled in the 339 * kernel, so either way we can re-enable interrupts here. 340 */ 341 local_irq_enable(); 342 343 mm = tsk->mm; 344 345 /* 346 * If we're in an interrupt, have no user context or are running in an 347 * atomic region then we must not take the fault. 348 */ 349 if (in_atomic() || !mm) { 350 vma = NULL; /* happy compiler */ 351 goto bad_area_nosemaphore; 352 } 353 354 /* 355 * When running in the kernel we expect faults to occur only to 356 * addresses in user space. All other faults represent errors in the 357 * kernel and should generate an OOPS. Unfortunately, in the case of an 358 * erroneous fault occurring in a code path which already holds mmap_sem 359 * we will deadlock attempting to validate the fault against the 360 * address space. Luckily the kernel only validly references user 361 * space from well defined areas of code, which are listed in the 362 * exceptions table. 363 * 364 * As the vast majority of faults will be valid we will only perform 365 * the source reference check when there is a possibility of a deadlock. 366 * Attempt to lock the address space, if we cannot we then validate the 367 * source. If this is invalid we can skip the address space check, 368 * thus avoiding the deadlock. 369 */ 370 if (!down_read_trylock(&mm->mmap_sem)) { 371 if (is_kernel_mode && 372 !search_exception_tables(regs->pc)) { 373 vma = NULL; /* happy compiler */ 374 goto bad_area_nosemaphore; 375 } 376 down_read(&mm->mmap_sem); 377 } 378 379 vma = find_vma(mm, address); 380 if (!vma) 381 goto bad_area; 382 if (vma->vm_start <= address) 383 goto good_area; 384 if (!(vma->vm_flags & VM_GROWSDOWN)) 385 goto bad_area; 386 if (regs->sp < PAGE_OFFSET) { 387 /* 388 * accessing the stack below sp is always a bug. 389 */ 390 if (address < regs->sp) 391 goto bad_area; 392 } 393 if (expand_stack(vma, address)) 394 goto bad_area; 395 396/* 397 * Ok, we have a good vm_area for this memory access, so 398 * we can handle it.. 399 */ 400good_area: 401 si_code = SEGV_ACCERR; 402 if (fault_num == INT_ITLB_MISS) { 403 if (!(vma->vm_flags & VM_EXEC)) 404 goto bad_area; 405 } else if (write) { 406#ifdef TEST_VERIFY_AREA 407 if (!is_page_fault && regs->cs == KERNEL_CS) 408 pr_err("WP fault at "REGFMT"\n", regs->eip); 409#endif 410 if (!(vma->vm_flags & VM_WRITE)) 411 goto bad_area; 412 } else { 413 if (!is_page_fault || !(vma->vm_flags & VM_READ)) 414 goto bad_area; 415 } 416 417 survive: 418 /* 419 * If for any reason at all we couldn't handle the fault, 420 * make sure we exit gracefully rather than endlessly redo 421 * the fault. 422 */ 423 fault = handle_mm_fault(mm, vma, address, write); 424 if (unlikely(fault & VM_FAULT_ERROR)) { 425 if (fault & VM_FAULT_OOM) 426 goto out_of_memory; 427 else if (fault & VM_FAULT_SIGBUS) 428 goto do_sigbus; 429 BUG(); 430 } 431 if (fault & VM_FAULT_MAJOR) 432 tsk->maj_flt++; 433 else 434 tsk->min_flt++; 435 436#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 437 /* 438 * If this was an asynchronous fault, 439 * restart the appropriate engine. 440 */ 441 switch (fault_num) { 442#if CHIP_HAS_TILE_DMA() 443 case INT_DMATLB_MISS: 444 case INT_DMATLB_MISS_DWNCL: 445 case INT_DMATLB_ACCESS: 446 case INT_DMATLB_ACCESS_DWNCL: 447 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 448 break; 449#endif 450#if CHIP_HAS_SN_PROC() 451 case INT_SNITLB_MISS: 452 case INT_SNITLB_MISS_DWNCL: 453 __insn_mtspr(SPR_SNCTL, 454 __insn_mfspr(SPR_SNCTL) & 455 ~SPR_SNCTL__FRZPROC_MASK); 456 break; 457#endif 458 } 459#endif 460 461 up_read(&mm->mmap_sem); 462 return 1; 463 464/* 465 * Something tried to access memory that isn't in our memory map.. 466 * Fix it, but check if it's kernel or user first.. 467 */ 468bad_area: 469 up_read(&mm->mmap_sem); 470 471bad_area_nosemaphore: 472 /* User mode accesses just cause a SIGSEGV */ 473 if (!is_kernel_mode) { 474 /* 475 * It's possible to have interrupts off here. 476 */ 477 local_irq_enable(); 478 479 force_sig_info_fault("segfault", SIGSEGV, si_code, address, 480 fault_num, tsk, regs); 481 return 0; 482 } 483 484no_context: 485 /* Are we prepared to handle this kernel fault? */ 486 if (fixup_exception(regs)) 487 return 0; 488 489/* 490 * Oops. The kernel tried to access some bad page. We'll have to 491 * terminate things with extreme prejudice. 492 */ 493 494 bust_spinlocks(1); 495 496 /* FIXME: no lookup_address() yet */ 497#ifdef SUPPORT_LOOKUP_ADDRESS 498 if (fault_num == INT_ITLB_MISS) { 499 pte_t *pte = lookup_address(address); 500 501 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 502 pr_crit("kernel tried to execute" 503 " non-executable page - exploit attempt?" 504 " (uid: %d)\n", current->uid); 505 } 506#endif 507 if (address < PAGE_SIZE) 508 pr_alert("Unable to handle kernel NULL pointer dereference\n"); 509 else 510 pr_alert("Unable to handle kernel paging request\n"); 511 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", 512 address, regs->pc); 513 514 show_regs(regs); 515 516 if (unlikely(tsk->pid < 2)) { 517 panic("Kernel page fault running %s!", 518 is_idle_task(tsk) ? "the idle task" : "init"); 519 } 520 521 /* 522 * More FIXME: we should probably copy the i386 here and 523 * implement a generic die() routine. Not today. 524 */ 525#ifdef SUPPORT_DIE 526 die("Oops", regs); 527#endif 528 bust_spinlocks(1); 529 530 do_group_exit(SIGKILL); 531 532/* 533 * We ran out of memory, or some other thing happened to us that made 534 * us unable to handle the page fault gracefully. 535 */ 536out_of_memory: 537 up_read(&mm->mmap_sem); 538 if (is_global_init(tsk)) { 539 yield(); 540 down_read(&mm->mmap_sem); 541 goto survive; 542 } 543 pr_alert("VM: killing process %s\n", tsk->comm); 544 if (!is_kernel_mode) 545 do_group_exit(SIGKILL); 546 goto no_context; 547 548do_sigbus: 549 up_read(&mm->mmap_sem); 550 551 /* Kernel mode? Handle exceptions or die */ 552 if (is_kernel_mode) 553 goto no_context; 554 555 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address, 556 fault_num, tsk, regs); 557 return 0; 558} 559 560#ifndef __tilegx__ 561 562/* We must release ICS before panicking or we won't get anywhere. */ 563#define ics_panic(fmt, ...) do { \ 564 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ 565 panic(fmt, __VA_ARGS__); \ 566} while (0) 567 568/* 569 * When we take an ITLB or DTLB fault or access violation in the 570 * supervisor while the critical section bit is set, the hypervisor is 571 * reluctant to write new values into the EX_CONTEXT_K_x registers, 572 * since that might indicate we have not yet squirreled the SPR 573 * contents away and can thus safely take a recursive interrupt. 574 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2. 575 * 576 * Note that this routine is called before homecache_tlb_defer_enter(), 577 * which means that we can properly unlock any atomics that might 578 * be used there (good), but also means we must be very sensitive 579 * to not touch any data structures that might be located in memory 580 * that could migrate, as we could be entering the kernel on a dataplane 581 * cpu that has been deferring kernel TLB updates. This means, for 582 * example, that we can't migrate init_mm or its pgd. 583 */ 584struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, 585 unsigned long address, 586 unsigned long info) 587{ 588 unsigned long pc = info & ~1; 589 int write = info & 1; 590 pgd_t *pgd = get_current_pgd(); 591 592 /* Retval is 1 at first since we will handle the fault fully. */ 593 struct intvec_state state = { 594 do_page_fault, fault_num, address, write, 1 595 }; 596 597 /* Validate that we are plausibly in the right routine. */ 598 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || 599 (fault_num != INT_DTLB_MISS && 600 fault_num != INT_DTLB_ACCESS)) { 601 unsigned long old_pc = regs->pc; 602 regs->pc = pc; 603 ics_panic("Bad ICS page fault args:" 604 " old PC %#lx, fault %d/%d at %#lx\n", 605 old_pc, fault_num, write, address); 606 } 607 608 /* We might be faulting on a vmalloc page, so check that first. */ 609 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) 610 return state; 611 612 /* 613 * If we faulted with ICS set in sys_cmpxchg, we are providing 614 * a user syscall service that should generate a signal on 615 * fault. We didn't set up a kernel stack on initial entry to 616 * sys_cmpxchg, but instead had one set up by the fault, which 617 * (because sys_cmpxchg never releases ICS) came to us via the 618 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are 619 * still referencing the original user code. We release the 620 * atomic lock and rewrite pt_regs so that it appears that we 621 * came from user-space directly, and after we finish the 622 * fault we'll go back to user space and re-issue the swint. 623 * This way the backtrace information is correct if we need to 624 * emit a stack dump at any point while handling this. 625 * 626 * Must match register use in sys_cmpxchg(). 627 */ 628 if (pc >= (unsigned long) sys_cmpxchg && 629 pc < (unsigned long) __sys_cmpxchg_end) { 630#ifdef CONFIG_SMP 631 /* Don't unlock before we could have locked. */ 632 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { 633 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 634 __atomic_fault_unlock(lock_ptr); 635 } 636#endif 637 regs->sp = regs->regs[27]; 638 } 639 640 /* 641 * We can also fault in the atomic assembly, in which 642 * case we use the exception table to do the first-level fixup. 643 * We may re-fixup again in the real fault handler if it 644 * turns out the faulting address is just bad, and not, 645 * for example, migrating. 646 */ 647 else if (pc >= (unsigned long) __start_atomic_asm_code && 648 pc < (unsigned long) __end_atomic_asm_code) { 649 const struct exception_table_entry *fixup; 650#ifdef CONFIG_SMP 651 /* Unlock the atomic lock. */ 652 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 653 __atomic_fault_unlock(lock_ptr); 654#endif 655 fixup = search_exception_tables(pc); 656 if (!fixup) 657 ics_panic("ICS atomic fault not in table:" 658 " PC %#lx, fault %d", pc, fault_num); 659 regs->pc = fixup->fixup; 660 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); 661 } 662 663 /* 664 * Now that we have released the atomic lock (if necessary), 665 * it's safe to spin if the PTE that caused the fault was migrating. 666 */ 667 if (fault_num == INT_DTLB_ACCESS) 668 write = 1; 669 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 670 return state; 671 672 /* Return zero so that we continue on with normal fault handling. */ 673 state.retval = 0; 674 return state; 675} 676 677#endif /* !__tilegx__ */ 678 679/* 680 * This routine handles page faults. It determines the address, and the 681 * problem, and then passes it handle_page_fault() for normal DTLB and 682 * ITLB issues, and for DMA or SN processor faults when we are in user 683 * space. For the latter, if we're in kernel mode, we just save the 684 * interrupt away appropriately and return immediately. We can't do 685 * page faults for user code while in kernel mode. 686 */ 687void do_page_fault(struct pt_regs *regs, int fault_num, 688 unsigned long address, unsigned long write) 689{ 690 int is_page_fault; 691 692 /* This case should have been handled by do_page_fault_ics(). */ 693 BUG_ON(write & ~1); 694 695#if CHIP_HAS_TILE_DMA() 696 /* 697 * If it's a DMA fault, suspend the transfer while we're 698 * handling the miss; we'll restart after it's handled. If we 699 * don't suspend, it's possible that this process could swap 700 * out and back in, and restart the engine since the DMA is 701 * still 'running'. 702 */ 703 if (fault_num == INT_DMATLB_MISS || 704 fault_num == INT_DMATLB_ACCESS || 705 fault_num == INT_DMATLB_MISS_DWNCL || 706 fault_num == INT_DMATLB_ACCESS_DWNCL) { 707 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); 708 while (__insn_mfspr(SPR_DMA_USER_STATUS) & 709 SPR_DMA_STATUS__BUSY_MASK) 710 ; 711 } 712#endif 713 714 /* Validate fault num and decide if this is a first-time page fault. */ 715 switch (fault_num) { 716 case INT_ITLB_MISS: 717 case INT_DTLB_MISS: 718#if CHIP_HAS_TILE_DMA() 719 case INT_DMATLB_MISS: 720 case INT_DMATLB_MISS_DWNCL: 721#endif 722#if CHIP_HAS_SN_PROC() 723 case INT_SNITLB_MISS: 724 case INT_SNITLB_MISS_DWNCL: 725#endif 726 is_page_fault = 1; 727 break; 728 729 case INT_DTLB_ACCESS: 730#if CHIP_HAS_TILE_DMA() 731 case INT_DMATLB_ACCESS: 732 case INT_DMATLB_ACCESS_DWNCL: 733#endif 734 is_page_fault = 0; 735 break; 736 737 default: 738 panic("Bad fault number %d in do_page_fault", fault_num); 739 } 740 741#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 742 if (EX1_PL(regs->ex1) != USER_PL) { 743 struct async_tlb *async; 744 switch (fault_num) { 745#if CHIP_HAS_TILE_DMA() 746 case INT_DMATLB_MISS: 747 case INT_DMATLB_ACCESS: 748 case INT_DMATLB_MISS_DWNCL: 749 case INT_DMATLB_ACCESS_DWNCL: 750 async = &current->thread.dma_async_tlb; 751 break; 752#endif 753#if CHIP_HAS_SN_PROC() 754 case INT_SNITLB_MISS: 755 case INT_SNITLB_MISS_DWNCL: 756 async = &current->thread.sn_async_tlb; 757 break; 758#endif 759 default: 760 async = NULL; 761 } 762 if (async) { 763 764 /* 765 * No vmalloc check required, so we can allow 766 * interrupts immediately at this point. 767 */ 768 local_irq_enable(); 769 770 set_thread_flag(TIF_ASYNC_TLB); 771 if (async->fault_num != 0) { 772 panic("Second async fault %d;" 773 " old fault was %d (%#lx/%ld)", 774 fault_num, async->fault_num, 775 address, write); 776 } 777 BUG_ON(fault_num == 0); 778 async->fault_num = fault_num; 779 async->is_fault = is_page_fault; 780 async->is_write = write; 781 async->address = address; 782 return; 783 } 784 } 785#endif 786 787 handle_page_fault(regs, fault_num, is_page_fault, address, write); 788} 789 790 791#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 792/* 793 * Check an async_tlb structure to see if a deferred fault is waiting, 794 * and if so pass it to the page-fault code. 795 */ 796static void handle_async_page_fault(struct pt_regs *regs, 797 struct async_tlb *async) 798{ 799 if (async->fault_num) { 800 /* 801 * Clear async->fault_num before calling the page-fault 802 * handler so that if we re-interrupt before returning 803 * from the function we have somewhere to put the 804 * information from the new interrupt. 805 */ 806 int fault_num = async->fault_num; 807 async->fault_num = 0; 808 handle_page_fault(regs, fault_num, async->is_fault, 809 async->address, async->is_write); 810 } 811} 812 813/* 814 * This routine effectively re-issues asynchronous page faults 815 * when we are returning to user space. 816 */ 817void do_async_page_fault(struct pt_regs *regs) 818{ 819 /* 820 * Clear thread flag early. If we re-interrupt while processing 821 * code here, we will reset it and recall this routine before 822 * returning to user space. 823 */ 824 clear_thread_flag(TIF_ASYNC_TLB); 825 826#if CHIP_HAS_TILE_DMA() 827 handle_async_page_fault(regs, &current->thread.dma_async_tlb); 828#endif 829#if CHIP_HAS_SN_PROC() 830 handle_async_page_fault(regs, &current->thread.sn_async_tlb); 831#endif 832} 833#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ 834 835 836void vmalloc_sync_all(void) 837{ 838#ifdef __tilegx__ 839 /* Currently all L1 kernel pmd's are static and shared. */ 840 BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); 841#else 842 /* 843 * Note that races in the updates of insync and start aren't 844 * problematic: insync can only get set bits added, and updates to 845 * start are only improving performance (without affecting correctness 846 * if undone). 847 */ 848 static DECLARE_BITMAP(insync, PTRS_PER_PGD); 849 static unsigned long start = PAGE_OFFSET; 850 unsigned long address; 851 852 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); 853 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { 854 if (!test_bit(pgd_index(address), insync)) { 855 unsigned long flags; 856 struct list_head *pos; 857 858 spin_lock_irqsave(&pgd_lock, flags); 859 list_for_each(pos, &pgd_list) 860 if (!vmalloc_sync_one(list_to_pgd(pos), 861 address)) { 862 /* Must be at first entry in list. */ 863 BUG_ON(pos != pgd_list.next); 864 break; 865 } 866 spin_unlock_irqrestore(&pgd_lock, flags); 867 if (pos != pgd_list.next) 868 set_bit(pgd_index(address), insync); 869 } 870 if (address == start && test_bit(pgd_index(address), insync)) 871 start = address + PGDIR_SIZE; 872 } 873#endif 874}