Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc8 875 lines 24 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * From i386 code copyright (C) 1995 Linus Torvalds 15 */ 16 17#include <linux/signal.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/string.h> 22#include <linux/types.h> 23#include <linux/ptrace.h> 24#include <linux/mman.h> 25#include <linux/mm.h> 26#include <linux/smp.h> 27#include <linux/smp_lock.h> 28#include <linux/interrupt.h> 29#include <linux/init.h> 30#include <linux/tty.h> 31#include <linux/vt_kern.h> /* For unblank_screen() */ 32#include <linux/highmem.h> 33#include <linux/module.h> 34#include <linux/kprobes.h> 35#include <linux/hugetlb.h> 36#include <linux/syscalls.h> 37#include <linux/uaccess.h> 38 39#include <asm/system.h> 40#include <asm/pgalloc.h> 41#include <asm/sections.h> 42#include <asm/traps.h> 43#include <asm/syscalls.h> 44 45#include <arch/interrupts.h> 46 47static noinline void force_sig_info_fault(int si_signo, int si_code, 48 unsigned long address, int fault_num, struct task_struct *tsk) 49{ 50 siginfo_t info; 51 52 if (unlikely(tsk->pid < 2)) { 53 panic("Signal %d (code %d) at %#lx sent to %s!", 54 si_signo, si_code & 0xffff, address, 55 tsk->pid ? "init" : "the idle task"); 56 } 57 58 info.si_signo = si_signo; 59 info.si_errno = 0; 60 info.si_code = si_code; 61 info.si_addr = (void __user *)address; 62 info.si_trapno = fault_num; 63 force_sig_info(si_signo, &info, tsk); 64} 65 66#ifndef __tilegx__ 67/* 68 * Synthesize the fault a PL0 process would get by doing a word-load of 69 * an unaligned address or a high kernel address. Called indirectly 70 * from sys_cmpxchg() in kernel/intvec.S. 71 */ 72int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) 73{ 74 if (address >= PAGE_OFFSET) 75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 76 INT_DTLB_MISS, current); 77 else 78 force_sig_info_fault(SIGBUS, BUS_ADRALN, address, 79 INT_UNALIGN_DATA, current); 80 81 /* 82 * Adjust pc to point at the actual instruction, which is unusual 83 * for syscalls normally, but is appropriate when we are claiming 84 * that a syscall swint1 caused a page fault or bus error. 85 */ 86 regs->pc -= 8; 87 88 /* 89 * Mark this as a caller-save interrupt, like a normal page fault, 90 * so that when we go through the signal handler path we will 91 * properly restore r0, r1, and r2 for the signal handler arguments. 92 */ 93 regs->flags |= PT_FLAGS_CALLER_SAVES; 94 95 return 0; 96} 97#endif 98 99static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) 100{ 101 unsigned index = pgd_index(address); 102 pgd_t *pgd_k; 103 pud_t *pud, *pud_k; 104 pmd_t *pmd, *pmd_k; 105 106 pgd += index; 107 pgd_k = init_mm.pgd + index; 108 109 if (!pgd_present(*pgd_k)) 110 return NULL; 111 112 pud = pud_offset(pgd, address); 113 pud_k = pud_offset(pgd_k, address); 114 if (!pud_present(*pud_k)) 115 return NULL; 116 117 pmd = pmd_offset(pud, address); 118 pmd_k = pmd_offset(pud_k, address); 119 if (!pmd_present(*pmd_k)) 120 return NULL; 121 if (!pmd_present(*pmd)) { 122 set_pmd(pmd, *pmd_k); 123 arch_flush_lazy_mmu_mode(); 124 } else 125 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); 126 return pmd_k; 127} 128 129/* 130 * Handle a fault on the vmalloc or module mapping area 131 */ 132static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) 133{ 134 pmd_t *pmd_k; 135 pte_t *pte_k; 136 137 /* Make sure we are in vmalloc area */ 138 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 139 return -1; 140 141 /* 142 * Synchronize this task's top level page-table 143 * with the 'reference' page table. 144 */ 145 pmd_k = vmalloc_sync_one(pgd, address); 146 if (!pmd_k) 147 return -1; 148 if (pmd_huge(*pmd_k)) 149 return 0; /* support TILE huge_vmap() API */ 150 pte_k = pte_offset_kernel(pmd_k, address); 151 if (!pte_present(*pte_k)) 152 return -1; 153 return 0; 154} 155 156/* Wait until this PTE has completed migration. */ 157static void wait_for_migration(pte_t *pte) 158{ 159 if (pte_migrating(*pte)) { 160 /* 161 * Wait until the migrater fixes up this pte. 162 * We scale the loop count by the clock rate so we'll wait for 163 * a few seconds here. 164 */ 165 int retries = 0; 166 int bound = get_clock_rate(); 167 while (pte_migrating(*pte)) { 168 barrier(); 169 if (++retries > bound) 170 panic("Hit migrating PTE (%#llx) and" 171 " page PFN %#lx still migrating", 172 pte->val, pte_pfn(*pte)); 173 } 174 } 175} 176 177/* 178 * It's not generally safe to use "current" to get the page table pointer, 179 * since we might be running an oprofile interrupt in the middle of a 180 * task switch. 181 */ 182static pgd_t *get_current_pgd(void) 183{ 184 HV_Context ctx = hv_inquire_context(); 185 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; 186 struct page *pgd_page = pfn_to_page(pgd_pfn); 187 BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ 188 return (pgd_t *) __va(ctx.page_table); 189} 190 191/* 192 * We can receive a page fault from a migrating PTE at any time. 193 * Handle it by just waiting until the fault resolves. 194 * 195 * It's also possible to get a migrating kernel PTE that resolves 196 * itself during the downcall from hypervisor to Linux. We just check 197 * here to see if the PTE seems valid, and if so we retry it. 198 * 199 * NOTE! We MUST NOT take any locks for this case. We may be in an 200 * interrupt or a critical region, and must do as little as possible. 201 * Similarly, we can't use atomic ops here, since we may be handling a 202 * fault caused by an atomic op access. 203 */ 204static int handle_migrating_pte(pgd_t *pgd, int fault_num, 205 unsigned long address, 206 int is_kernel_mode, int write) 207{ 208 pud_t *pud; 209 pmd_t *pmd; 210 pte_t *pte; 211 pte_t pteval; 212 213 if (pgd_addr_invalid(address)) 214 return 0; 215 216 pgd += pgd_index(address); 217 pud = pud_offset(pgd, address); 218 if (!pud || !pud_present(*pud)) 219 return 0; 220 pmd = pmd_offset(pud, address); 221 if (!pmd || !pmd_present(*pmd)) 222 return 0; 223 pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : 224 pte_offset_kernel(pmd, address); 225 pteval = *pte; 226 if (pte_migrating(pteval)) { 227 wait_for_migration(pte); 228 return 1; 229 } 230 231 if (!is_kernel_mode || !pte_present(pteval)) 232 return 0; 233 if (fault_num == INT_ITLB_MISS) { 234 if (pte_exec(pteval)) 235 return 1; 236 } else if (write) { 237 if (pte_write(pteval)) 238 return 1; 239 } else { 240 if (pte_read(pteval)) 241 return 1; 242 } 243 244 return 0; 245} 246 247/* 248 * This routine is responsible for faulting in user pages. 249 * It passes the work off to one of the appropriate routines. 250 * It returns true if the fault was successfully handled. 251 */ 252static int handle_page_fault(struct pt_regs *regs, 253 int fault_num, 254 int is_page_fault, 255 unsigned long address, 256 int write) 257{ 258 struct task_struct *tsk; 259 struct mm_struct *mm; 260 struct vm_area_struct *vma; 261 unsigned long stack_offset; 262 int fault; 263 int si_code; 264 int is_kernel_mode; 265 pgd_t *pgd; 266 267 /* on TILE, protection faults are always writes */ 268 if (!is_page_fault) 269 write = 1; 270 271 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); 272 273 tsk = validate_current(); 274 275 /* 276 * Check to see if we might be overwriting the stack, and bail 277 * out if so. The page fault code is a relatively likely 278 * place to get trapped in an infinite regress, and once we 279 * overwrite the whole stack, it becomes very hard to recover. 280 */ 281 stack_offset = stack_pointer & (THREAD_SIZE-1); 282 if (stack_offset < THREAD_SIZE / 8) { 283 pr_alert("Potential stack overrun: sp %#lx\n", 284 stack_pointer); 285 show_regs(regs); 286 pr_alert("Killing current process %d/%s\n", 287 tsk->pid, tsk->comm); 288 do_group_exit(SIGKILL); 289 } 290 291 /* 292 * Early on, we need to check for migrating PTE entries; 293 * see homecache.c. If we find a migrating PTE, we wait until 294 * the backing page claims to be done migrating, then we procede. 295 * For kernel PTEs, we rewrite the PTE and return and retry. 296 * Otherwise, we treat the fault like a normal "no PTE" fault, 297 * rather than trying to patch up the existing PTE. 298 */ 299 pgd = get_current_pgd(); 300 if (handle_migrating_pte(pgd, fault_num, address, 301 is_kernel_mode, write)) 302 return 1; 303 304 si_code = SEGV_MAPERR; 305 306 /* 307 * We fault-in kernel-space virtual memory on-demand. The 308 * 'reference' page table is init_mm.pgd. 309 * 310 * NOTE! We MUST NOT take any locks for this case. We may 311 * be in an interrupt or a critical region, and should 312 * only copy the information from the master page table, 313 * nothing more. 314 * 315 * This verifies that the fault happens in kernel space 316 * and that the fault was not a protection fault. 317 */ 318 if (unlikely(address >= TASK_SIZE && 319 !is_arch_mappable_range(address, 0))) { 320 if (is_kernel_mode && is_page_fault && 321 vmalloc_fault(pgd, address) >= 0) 322 return 1; 323 /* 324 * Don't take the mm semaphore here. If we fixup a prefetch 325 * fault we could otherwise deadlock. 326 */ 327 mm = NULL; /* happy compiler */ 328 vma = NULL; 329 goto bad_area_nosemaphore; 330 } 331 332 /* 333 * If we're trying to touch user-space addresses, we must 334 * be either at PL0, or else with interrupts enabled in the 335 * kernel, so either way we can re-enable interrupts here. 336 */ 337 local_irq_enable(); 338 339 mm = tsk->mm; 340 341 /* 342 * If we're in an interrupt, have no user context or are running in an 343 * atomic region then we must not take the fault. 344 */ 345 if (in_atomic() || !mm) { 346 vma = NULL; /* happy compiler */ 347 goto bad_area_nosemaphore; 348 } 349 350 /* 351 * When running in the kernel we expect faults to occur only to 352 * addresses in user space. All other faults represent errors in the 353 * kernel and should generate an OOPS. Unfortunately, in the case of an 354 * erroneous fault occurring in a code path which already holds mmap_sem 355 * we will deadlock attempting to validate the fault against the 356 * address space. Luckily the kernel only validly references user 357 * space from well defined areas of code, which are listed in the 358 * exceptions table. 359 * 360 * As the vast majority of faults will be valid we will only perform 361 * the source reference check when there is a possibility of a deadlock. 362 * Attempt to lock the address space, if we cannot we then validate the 363 * source. If this is invalid we can skip the address space check, 364 * thus avoiding the deadlock. 365 */ 366 if (!down_read_trylock(&mm->mmap_sem)) { 367 if (is_kernel_mode && 368 !search_exception_tables(regs->pc)) { 369 vma = NULL; /* happy compiler */ 370 goto bad_area_nosemaphore; 371 } 372 down_read(&mm->mmap_sem); 373 } 374 375 vma = find_vma(mm, address); 376 if (!vma) 377 goto bad_area; 378 if (vma->vm_start <= address) 379 goto good_area; 380 if (!(vma->vm_flags & VM_GROWSDOWN)) 381 goto bad_area; 382 if (regs->sp < PAGE_OFFSET) { 383 /* 384 * accessing the stack below sp is always a bug. 385 */ 386 if (address < regs->sp) 387 goto bad_area; 388 } 389 if (expand_stack(vma, address)) 390 goto bad_area; 391 392/* 393 * Ok, we have a good vm_area for this memory access, so 394 * we can handle it.. 395 */ 396good_area: 397 si_code = SEGV_ACCERR; 398 if (fault_num == INT_ITLB_MISS) { 399 if (!(vma->vm_flags & VM_EXEC)) 400 goto bad_area; 401 } else if (write) { 402#ifdef TEST_VERIFY_AREA 403 if (!is_page_fault && regs->cs == KERNEL_CS) 404 pr_err("WP fault at "REGFMT"\n", regs->eip); 405#endif 406 if (!(vma->vm_flags & VM_WRITE)) 407 goto bad_area; 408 } else { 409 if (!is_page_fault || !(vma->vm_flags & VM_READ)) 410 goto bad_area; 411 } 412 413 survive: 414 /* 415 * If for any reason at all we couldn't handle the fault, 416 * make sure we exit gracefully rather than endlessly redo 417 * the fault. 418 */ 419 fault = handle_mm_fault(mm, vma, address, write); 420 if (unlikely(fault & VM_FAULT_ERROR)) { 421 if (fault & VM_FAULT_OOM) 422 goto out_of_memory; 423 else if (fault & VM_FAULT_SIGBUS) 424 goto do_sigbus; 425 BUG(); 426 } 427 if (fault & VM_FAULT_MAJOR) 428 tsk->maj_flt++; 429 else 430 tsk->min_flt++; 431 432#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 433 /* 434 * If this was an asynchronous fault, 435 * restart the appropriate engine. 436 */ 437 switch (fault_num) { 438#if CHIP_HAS_TILE_DMA() 439 case INT_DMATLB_MISS: 440 case INT_DMATLB_MISS_DWNCL: 441 case INT_DMATLB_ACCESS: 442 case INT_DMATLB_ACCESS_DWNCL: 443 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 444 break; 445#endif 446#if CHIP_HAS_SN_PROC() 447 case INT_SNITLB_MISS: 448 case INT_SNITLB_MISS_DWNCL: 449 __insn_mtspr(SPR_SNCTL, 450 __insn_mfspr(SPR_SNCTL) & 451 ~SPR_SNCTL__FRZPROC_MASK); 452 break; 453#endif 454 } 455#endif 456 457 up_read(&mm->mmap_sem); 458 return 1; 459 460/* 461 * Something tried to access memory that isn't in our memory map.. 462 * Fix it, but check if it's kernel or user first.. 463 */ 464bad_area: 465 up_read(&mm->mmap_sem); 466 467bad_area_nosemaphore: 468 /* User mode accesses just cause a SIGSEGV */ 469 if (!is_kernel_mode) { 470 /* 471 * It's possible to have interrupts off here. 472 */ 473 local_irq_enable(); 474 475 force_sig_info_fault(SIGSEGV, si_code, address, 476 fault_num, tsk); 477 return 0; 478 } 479 480no_context: 481 /* Are we prepared to handle this kernel fault? */ 482 if (fixup_exception(regs)) 483 return 0; 484 485/* 486 * Oops. The kernel tried to access some bad page. We'll have to 487 * terminate things with extreme prejudice. 488 */ 489 490 bust_spinlocks(1); 491 492 /* FIXME: no lookup_address() yet */ 493#ifdef SUPPORT_LOOKUP_ADDRESS 494 if (fault_num == INT_ITLB_MISS) { 495 pte_t *pte = lookup_address(address); 496 497 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) 498 pr_crit("kernel tried to execute" 499 " non-executable page - exploit attempt?" 500 " (uid: %d)\n", current->uid); 501 } 502#endif 503 if (address < PAGE_SIZE) 504 pr_alert("Unable to handle kernel NULL pointer dereference\n"); 505 else 506 pr_alert("Unable to handle kernel paging request\n"); 507 pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", 508 address, regs->pc); 509 510 show_regs(regs); 511 512 if (unlikely(tsk->pid < 2)) { 513 panic("Kernel page fault running %s!", 514 tsk->pid ? "init" : "the idle task"); 515 } 516 517 /* 518 * More FIXME: we should probably copy the i386 here and 519 * implement a generic die() routine. Not today. 520 */ 521#ifdef SUPPORT_DIE 522 die("Oops", regs); 523#endif 524 bust_spinlocks(1); 525 526 do_group_exit(SIGKILL); 527 528/* 529 * We ran out of memory, or some other thing happened to us that made 530 * us unable to handle the page fault gracefully. 531 */ 532out_of_memory: 533 up_read(&mm->mmap_sem); 534 if (is_global_init(tsk)) { 535 yield(); 536 down_read(&mm->mmap_sem); 537 goto survive; 538 } 539 pr_alert("VM: killing process %s\n", tsk->comm); 540 if (!is_kernel_mode) 541 do_group_exit(SIGKILL); 542 goto no_context; 543 544do_sigbus: 545 up_read(&mm->mmap_sem); 546 547 /* Kernel mode? Handle exceptions or die */ 548 if (is_kernel_mode) 549 goto no_context; 550 551 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); 552 return 0; 553} 554 555#ifndef __tilegx__ 556 557/* We must release ICS before panicking or we won't get anywhere. */ 558#define ics_panic(fmt, ...) do { \ 559 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ 560 panic(fmt, __VA_ARGS__); \ 561} while (0) 562 563/* 564 * When we take an ITLB or DTLB fault or access violation in the 565 * supervisor while the critical section bit is set, the hypervisor is 566 * reluctant to write new values into the EX_CONTEXT_1_x registers, 567 * since that might indicate we have not yet squirreled the SPR 568 * contents away and can thus safely take a recursive interrupt. 569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 570 * 571 * Note that this routine is called before homecache_tlb_defer_enter(), 572 * which means that we can properly unlock any atomics that might 573 * be used there (good), but also means we must be very sensitive 574 * to not touch any data structures that might be located in memory 575 * that could migrate, as we could be entering the kernel on a dataplane 576 * cpu that has been deferring kernel TLB updates. This means, for 577 * example, that we can't migrate init_mm or its pgd. 578 */ 579struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, 580 unsigned long address, 581 unsigned long info) 582{ 583 unsigned long pc = info & ~1; 584 int write = info & 1; 585 pgd_t *pgd = get_current_pgd(); 586 587 /* Retval is 1 at first since we will handle the fault fully. */ 588 struct intvec_state state = { 589 do_page_fault, fault_num, address, write, 1 590 }; 591 592 /* Validate that we are plausibly in the right routine. */ 593 if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || 594 (fault_num != INT_DTLB_MISS && 595 fault_num != INT_DTLB_ACCESS)) { 596 unsigned long old_pc = regs->pc; 597 regs->pc = pc; 598 ics_panic("Bad ICS page fault args:" 599 " old PC %#lx, fault %d/%d at %#lx\n", 600 old_pc, fault_num, write, address); 601 } 602 603 /* We might be faulting on a vmalloc page, so check that first. */ 604 if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) 605 return state; 606 607 /* 608 * If we faulted with ICS set in sys_cmpxchg, we are providing 609 * a user syscall service that should generate a signal on 610 * fault. We didn't set up a kernel stack on initial entry to 611 * sys_cmpxchg, but instead had one set up by the fault, which 612 * (because sys_cmpxchg never releases ICS) came to us via the 613 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 614 * still referencing the original user code. We release the 615 * atomic lock and rewrite pt_regs so that it appears that we 616 * came from user-space directly, and after we finish the 617 * fault we'll go back to user space and re-issue the swint. 618 * This way the backtrace information is correct if we need to 619 * emit a stack dump at any point while handling this. 620 * 621 * Must match register use in sys_cmpxchg(). 622 */ 623 if (pc >= (unsigned long) sys_cmpxchg && 624 pc < (unsigned long) __sys_cmpxchg_end) { 625#ifdef CONFIG_SMP 626 /* Don't unlock before we could have locked. */ 627 if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { 628 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 629 __atomic_fault_unlock(lock_ptr); 630 } 631#endif 632 regs->sp = regs->regs[27]; 633 } 634 635 /* 636 * We can also fault in the atomic assembly, in which 637 * case we use the exception table to do the first-level fixup. 638 * We may re-fixup again in the real fault handler if it 639 * turns out the faulting address is just bad, and not, 640 * for example, migrating. 641 */ 642 else if (pc >= (unsigned long) __start_atomic_asm_code && 643 pc < (unsigned long) __end_atomic_asm_code) { 644 const struct exception_table_entry *fixup; 645#ifdef CONFIG_SMP 646 /* Unlock the atomic lock. */ 647 int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); 648 __atomic_fault_unlock(lock_ptr); 649#endif 650 fixup = search_exception_tables(pc); 651 if (!fixup) 652 ics_panic("ICS atomic fault not in table:" 653 " PC %#lx, fault %d", pc, fault_num); 654 regs->pc = fixup->fixup; 655 regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); 656 } 657 658 /* 659 * NOTE: the one other type of access that might bring us here 660 * are the memory ops in __tns_atomic_acquire/__tns_atomic_release, 661 * but we don't have to check specially for them since we can 662 * always safely return to the address of the fault and retry, 663 * since no separate atomic locks are involved. 664 */ 665 666 /* 667 * Now that we have released the atomic lock (if necessary), 668 * it's safe to spin if the PTE that caused the fault was migrating. 669 */ 670 if (fault_num == INT_DTLB_ACCESS) 671 write = 1; 672 if (handle_migrating_pte(pgd, fault_num, address, 1, write)) 673 return state; 674 675 /* Return zero so that we continue on with normal fault handling. */ 676 state.retval = 0; 677 return state; 678} 679 680#endif /* !__tilegx__ */ 681 682/* 683 * This routine handles page faults. It determines the address, and the 684 * problem, and then passes it handle_page_fault() for normal DTLB and 685 * ITLB issues, and for DMA or SN processor faults when we are in user 686 * space. For the latter, if we're in kernel mode, we just save the 687 * interrupt away appropriately and return immediately. We can't do 688 * page faults for user code while in kernel mode. 689 */ 690void do_page_fault(struct pt_regs *regs, int fault_num, 691 unsigned long address, unsigned long write) 692{ 693 int is_page_fault; 694 695 /* This case should have been handled by do_page_fault_ics(). */ 696 BUG_ON(write & ~1); 697 698#if CHIP_HAS_TILE_DMA() 699 /* 700 * If it's a DMA fault, suspend the transfer while we're 701 * handling the miss; we'll restart after it's handled. If we 702 * don't suspend, it's possible that this process could swap 703 * out and back in, and restart the engine since the DMA is 704 * still 'running'. 705 */ 706 if (fault_num == INT_DMATLB_MISS || 707 fault_num == INT_DMATLB_ACCESS || 708 fault_num == INT_DMATLB_MISS_DWNCL || 709 fault_num == INT_DMATLB_ACCESS_DWNCL) { 710 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); 711 while (__insn_mfspr(SPR_DMA_USER_STATUS) & 712 SPR_DMA_STATUS__BUSY_MASK) 713 ; 714 } 715#endif 716 717 /* Validate fault num and decide if this is a first-time page fault. */ 718 switch (fault_num) { 719 case INT_ITLB_MISS: 720 case INT_DTLB_MISS: 721#if CHIP_HAS_TILE_DMA() 722 case INT_DMATLB_MISS: 723 case INT_DMATLB_MISS_DWNCL: 724#endif 725#if CHIP_HAS_SN_PROC() 726 case INT_SNITLB_MISS: 727 case INT_SNITLB_MISS_DWNCL: 728#endif 729 is_page_fault = 1; 730 break; 731 732 case INT_DTLB_ACCESS: 733#if CHIP_HAS_TILE_DMA() 734 case INT_DMATLB_ACCESS: 735 case INT_DMATLB_ACCESS_DWNCL: 736#endif 737 is_page_fault = 0; 738 break; 739 740 default: 741 panic("Bad fault number %d in do_page_fault", fault_num); 742 } 743 744 if (EX1_PL(regs->ex1) != USER_PL) { 745 struct async_tlb *async; 746 switch (fault_num) { 747#if CHIP_HAS_TILE_DMA() 748 case INT_DMATLB_MISS: 749 case INT_DMATLB_ACCESS: 750 case INT_DMATLB_MISS_DWNCL: 751 case INT_DMATLB_ACCESS_DWNCL: 752 async = &current->thread.dma_async_tlb; 753 break; 754#endif 755#if CHIP_HAS_SN_PROC() 756 case INT_SNITLB_MISS: 757 case INT_SNITLB_MISS_DWNCL: 758 async = &current->thread.sn_async_tlb; 759 break; 760#endif 761 default: 762 async = NULL; 763 } 764 if (async) { 765 766 /* 767 * No vmalloc check required, so we can allow 768 * interrupts immediately at this point. 769 */ 770 local_irq_enable(); 771 772 set_thread_flag(TIF_ASYNC_TLB); 773 if (async->fault_num != 0) { 774 panic("Second async fault %d;" 775 " old fault was %d (%#lx/%ld)", 776 fault_num, async->fault_num, 777 address, write); 778 } 779 BUG_ON(fault_num == 0); 780 async->fault_num = fault_num; 781 async->is_fault = is_page_fault; 782 async->is_write = write; 783 async->address = address; 784 return; 785 } 786 } 787 788 handle_page_fault(regs, fault_num, is_page_fault, address, write); 789} 790 791 792#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 793/* 794 * Check an async_tlb structure to see if a deferred fault is waiting, 795 * and if so pass it to the page-fault code. 796 */ 797static void handle_async_page_fault(struct pt_regs *regs, 798 struct async_tlb *async) 799{ 800 if (async->fault_num) { 801 /* 802 * Clear async->fault_num before calling the page-fault 803 * handler so that if we re-interrupt before returning 804 * from the function we have somewhere to put the 805 * information from the new interrupt. 806 */ 807 int fault_num = async->fault_num; 808 async->fault_num = 0; 809 handle_page_fault(regs, fault_num, async->is_fault, 810 async->address, async->is_write); 811 } 812} 813#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ 814 815 816/* 817 * This routine effectively re-issues asynchronous page faults 818 * when we are returning to user space. 819 */ 820void do_async_page_fault(struct pt_regs *regs) 821{ 822 /* 823 * Clear thread flag early. If we re-interrupt while processing 824 * code here, we will reset it and recall this routine before 825 * returning to user space. 826 */ 827 clear_thread_flag(TIF_ASYNC_TLB); 828 829#if CHIP_HAS_TILE_DMA() 830 handle_async_page_fault(regs, &current->thread.dma_async_tlb); 831#endif 832#if CHIP_HAS_SN_PROC() 833 handle_async_page_fault(regs, &current->thread.sn_async_tlb); 834#endif 835} 836 837void vmalloc_sync_all(void) 838{ 839#ifdef __tilegx__ 840 /* Currently all L1 kernel pmd's are static and shared. */ 841 BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); 842#else 843 /* 844 * Note that races in the updates of insync and start aren't 845 * problematic: insync can only get set bits added, and updates to 846 * start are only improving performance (without affecting correctness 847 * if undone). 848 */ 849 static DECLARE_BITMAP(insync, PTRS_PER_PGD); 850 static unsigned long start = PAGE_OFFSET; 851 unsigned long address; 852 853 BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); 854 for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { 855 if (!test_bit(pgd_index(address), insync)) { 856 unsigned long flags; 857 struct list_head *pos; 858 859 spin_lock_irqsave(&pgd_lock, flags); 860 list_for_each(pos, &pgd_list) 861 if (!vmalloc_sync_one(list_to_pgd(pos), 862 address)) { 863 /* Must be at first entry in list. */ 864 BUG_ON(pos != pgd_list.next); 865 break; 866 } 867 spin_unlock_irqrestore(&pgd_lock, flags); 868 if (pos != pgd_list.next) 869 set_bit(pgd_index(address), insync); 870 } 871 if (address == start && test_bit(pgd_index(address), insync)) 872 start = address + PGDIR_SIZE; 873 } 874#endif 875}