Merge tag 'parisc-for-6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fix from Helge Deller:
"On parisc we have suffered since years from random segfaults which
seem to have been triggered due to cache inconsistencies. Those
segfaults happened more often on machines with PA8800 and PA8900 CPUs,
which have much bigger caches than the earlier machines.

Dave Anglin has worked over the last few weeks to fix this bug. His
patch has been successfully tested by various people on various
machines and with various kernels (6.6, 6.8 and 6.9), and the debian
buildd servers haven't shown a single random segfault with this patch.

Since the cache handling has been reworked, the patch is slightly
bigger than I would like in this stage, but the greatly improved
stability IMHO justifies the inclusion now"

* tag 'parisc-for-6.10-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Try to fix random segmentation faults in package builds

Changed files
+279 -184
arch
parisc
include
kernel
+4 -11
arch/parisc/include/asm/cacheflush.h
··· 31 31 void flush_cache_all(void); 32 32 void flush_cache_mm(struct mm_struct *mm); 33 33 34 - void flush_kernel_dcache_page_addr(const void *addr); 35 - 36 34 #define flush_kernel_dcache_range(start,size) \ 37 35 flush_kernel_dcache_range_asm((start), (start)+(size)); 38 36 37 + /* The only way to flush a vmap range is to flush whole cache */ 39 38 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 40 39 void flush_kernel_vmap_range(void *vaddr, int size); 41 40 void invalidate_kernel_vmap_range(void *vaddr, int size); 42 41 43 - #define flush_cache_vmap(start, end) flush_cache_all() 42 + void flush_cache_vmap(unsigned long start, unsigned long end); 44 43 #define flush_cache_vmap_early(start, end) do { } while (0) 45 - #define flush_cache_vunmap(start, end) flush_cache_all() 44 + void flush_cache_vunmap(unsigned long start, unsigned long end); 46 45 47 46 void flush_dcache_folio(struct folio *folio); 48 47 #define flush_dcache_folio flush_dcache_folio ··· 76 77 void flush_cache_range(struct vm_area_struct *vma, 77 78 unsigned long start, unsigned long end); 78 79 79 - /* defined in pacache.S exported in cache.c used by flush_anon_page */ 80 - void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 81 - 82 80 #define ARCH_HAS_FLUSH_ANON_PAGE 83 81 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); 84 82 85 83 #define ARCH_HAS_FLUSH_ON_KUNMAP 86 - static inline void kunmap_flush_on_unmap(const void *addr) 87 - { 88 - flush_kernel_dcache_page_addr(addr); 89 - } 84 + void kunmap_flush_on_unmap(const void *addr); 90 85 91 86 #endif /* _PARISC_CACHEFLUSH_H */ 92 87
+12 -15
arch/parisc/include/asm/pgtable.h
··· 448 448 return pte; 449 449 } 450 450 451 + static inline pte_t ptep_get(pte_t *ptep) 452 + { 453 + return READ_ONCE(*ptep); 454 + } 455 + #define ptep_get ptep_get 456 + 451 457 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 452 458 { 453 459 pte_t pte; 454 460 455 - if (!pte_young(*ptep)) 456 - return 0; 457 - 458 - pte = *ptep; 461 + pte = ptep_get(ptep); 459 462 if (!pte_young(pte)) { 460 463 return 0; 461 464 } ··· 466 463 return 1; 467 464 } 468 465 466 + int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); 467 + pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); 468 + 469 469 struct mm_struct; 470 - static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 471 - { 472 - pte_t old_pte; 473 - 474 - old_pte = *ptep; 475 - set_pte(ptep, __pte(0)); 476 - 477 - return old_pte; 478 - } 479 - 480 470 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 481 471 { 482 472 set_pte(ptep, pte_wrprotect(*ptep)); ··· 507 511 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 508 512 509 513 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 510 - #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 514 + #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 515 + #define __HAVE_ARCH_PTEP_CLEAR_FLUSH 511 516 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 512 517 #define __HAVE_ARCH_PTE_SAME 513 518
+263 -158
arch/parisc/kernel/cache.c
··· 20 20 #include <linux/sched.h> 21 21 #include <linux/sched/mm.h> 22 22 #include <linux/syscalls.h> 23 + #include <linux/vmalloc.h> 23 24 #include <asm/pdc.h> 24 25 #include <asm/cache.h> 25 26 #include <asm/cacheflush.h> ··· 32 31 #include <asm/mmu_context.h> 33 32 #include <asm/cachectl.h> 34 33 34 + #define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE) 35 + 36 + /* 37 + * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number 38 + * of page flushes done flush_cache_page_if_present. There are some 39 + * pros and cons in using this option. It may increase the risk of 40 + * random segmentation faults. 41 + */ 42 + #define CONFIG_FLUSH_PAGE_ACCESSED 0 43 + 35 44 int split_tlb __ro_after_init; 36 45 int dcache_stride __ro_after_init; 37 46 int icache_stride __ro_after_init; 38 47 EXPORT_SYMBOL(dcache_stride); 39 48 49 + /* Internal implementation in arch/parisc/kernel/pacache.S */ 40 50 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 41 51 EXPORT_SYMBOL(flush_dcache_page_asm); 42 52 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 43 53 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 44 - 45 - /* Internal implementation in arch/parisc/kernel/pacache.S */ 46 54 void flush_data_cache_local(void *); /* flushes local data-cache only */ 47 55 void flush_instruction_cache_local(void); /* flushes local code-cache only */ 56 + 57 + static void flush_kernel_dcache_page_addr(const void *addr); 48 58 49 59 /* On some machines (i.e., ones with the Merced bus), there can be 50 60 * only a single PxTLB broadcast at a time; this must be guaranteed ··· 333 321 { 334 322 if (!static_branch_likely(&parisc_has_cache)) 335 323 return; 324 + 325 + /* 326 + * The TLB is the engine of coherence on parisc. The CPU is 327 + * entitled to speculate any page with a TLB mapping, so here 328 + * we kill the mapping then flush the page along a special flush 329 + * only alias mapping. This guarantees that the page is no-longer 330 + * in the cache for any process and nor may it be speculatively 331 + * read in (until the user or kernel specifically accesses it, 332 + * of course). 333 + */ 334 + flush_tlb_page(vma, vmaddr); 335 + 336 336 preempt_disable(); 337 337 flush_dcache_page_asm(physaddr, vmaddr); 338 338 if (vma->vm_flags & VM_EXEC) ··· 352 328 preempt_enable(); 353 329 } 354 330 355 - static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) 331 + static void flush_kernel_dcache_page_addr(const void *addr) 356 332 { 357 - unsigned long flags, space, pgd, prot; 358 - #ifdef CONFIG_TLB_PTLOCK 359 - unsigned long pgd_lock; 360 - #endif 333 + unsigned long vaddr = (unsigned long)addr; 334 + unsigned long flags; 361 335 362 - vmaddr &= PAGE_MASK; 336 + /* Purge TLB entry to remove translation on all CPUs */ 337 + purge_tlb_start(flags); 338 + pdtlb(SR_KERNEL, addr); 339 + purge_tlb_end(flags); 363 340 341 + /* Use tmpalias flush to prevent data cache move-in */ 364 342 preempt_disable(); 365 - 366 - /* Set context for flush */ 367 - local_irq_save(flags); 368 - prot = mfctl(8); 369 - space = mfsp(SR_USER); 370 - pgd = mfctl(25); 371 - #ifdef CONFIG_TLB_PTLOCK 372 - pgd_lock = mfctl(28); 373 - #endif 374 - switch_mm_irqs_off(NULL, vma->vm_mm, NULL); 375 - local_irq_restore(flags); 376 - 377 - flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); 378 - if (vma->vm_flags & VM_EXEC) 379 - flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); 380 - flush_tlb_page(vma, vmaddr); 381 - 382 - /* Restore previous context */ 383 - local_irq_save(flags); 384 - #ifdef CONFIG_TLB_PTLOCK 385 - mtctl(pgd_lock, 28); 386 - #endif 387 - mtctl(pgd, 25); 388 - mtsp(space, SR_USER); 389 - mtctl(prot, 8); 390 - local_irq_restore(flags); 391 - 343 + flush_dcache_page_asm(__pa(vaddr), vaddr); 392 344 preempt_enable(); 393 345 } 346 + 347 + static void flush_kernel_icache_page_addr(const void *addr) 348 + { 349 + unsigned long vaddr = (unsigned long)addr; 350 + unsigned long flags; 351 + 352 + /* Purge TLB entry to remove translation on all CPUs */ 353 + purge_tlb_start(flags); 354 + pdtlb(SR_KERNEL, addr); 355 + purge_tlb_end(flags); 356 + 357 + /* Use tmpalias flush to prevent instruction cache move-in */ 358 + preempt_disable(); 359 + flush_icache_page_asm(__pa(vaddr), vaddr); 360 + preempt_enable(); 361 + } 362 + 363 + void kunmap_flush_on_unmap(const void *addr) 364 + { 365 + flush_kernel_dcache_page_addr(addr); 366 + } 367 + EXPORT_SYMBOL(kunmap_flush_on_unmap); 394 368 395 369 void flush_icache_pages(struct vm_area_struct *vma, struct page *page, 396 370 unsigned int nr) ··· 397 375 398 376 for (;;) { 399 377 flush_kernel_dcache_page_addr(kaddr); 400 - flush_kernel_icache_page(kaddr); 378 + flush_kernel_icache_page_addr(kaddr); 401 379 if (--nr == 0) 402 380 break; 403 381 kaddr += PAGE_SIZE; 404 382 } 405 383 } 406 384 385 + /* 386 + * Walk page directory for MM to find PTEP pointer for address ADDR. 387 + */ 407 388 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) 408 389 { 409 390 pte_t *ptep = NULL; ··· 433 408 { 434 409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) 435 410 == (_PAGE_PRESENT | _PAGE_ACCESSED); 411 + } 412 + 413 + /* 414 + * Return user physical address. Returns 0 if page is not present. 415 + */ 416 + static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr) 417 + { 418 + unsigned long flags, space, pgd, prot, pa; 419 + #ifdef CONFIG_TLB_PTLOCK 420 + unsigned long pgd_lock; 421 + #endif 422 + 423 + /* Save context */ 424 + local_irq_save(flags); 425 + prot = mfctl(8); 426 + space = mfsp(SR_USER); 427 + pgd = mfctl(25); 428 + #ifdef CONFIG_TLB_PTLOCK 429 + pgd_lock = mfctl(28); 430 + #endif 431 + 432 + /* Set context for lpa_user */ 433 + switch_mm_irqs_off(NULL, mm, NULL); 434 + pa = lpa_user(addr); 435 + 436 + /* Restore previous context */ 437 + #ifdef CONFIG_TLB_PTLOCK 438 + mtctl(pgd_lock, 28); 439 + #endif 440 + mtctl(pgd, 25); 441 + mtsp(space, SR_USER); 442 + mtctl(prot, 8); 443 + local_irq_restore(flags); 444 + 445 + return pa; 436 446 } 437 447 438 448 void flush_dcache_folio(struct folio *folio) ··· 518 458 if (addr + nr * PAGE_SIZE > vma->vm_end) 519 459 nr = (vma->vm_end - addr) / PAGE_SIZE; 520 460 521 - if (parisc_requires_coherency()) { 522 - for (i = 0; i < nr; i++) { 523 - pte_t *ptep = get_ptep(vma->vm_mm, 524 - addr + i * PAGE_SIZE); 525 - if (!ptep) 526 - continue; 527 - if (pte_needs_flush(*ptep)) 528 - flush_user_cache_page(vma, 529 - addr + i * PAGE_SIZE); 530 - /* Optimise accesses to the same table? */ 531 - pte_unmap(ptep); 532 - } 533 - } else { 534 - /* 535 - * The TLB is the engine of coherence on parisc: 536 - * The CPU is entitled to speculate any page 537 - * with a TLB mapping, so here we kill the 538 - * mapping then flush the page along a special 539 - * flush only alias mapping. This guarantees that 540 - * the page is no-longer in the cache for any 541 - * process and nor may it be speculatively read 542 - * in (until the user or kernel specifically 543 - * accesses it, of course) 544 - */ 545 - for (i = 0; i < nr; i++) 546 - flush_tlb_page(vma, addr + i * PAGE_SIZE); 547 - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) 461 + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) 548 462 != (addr & (SHM_COLOUR - 1))) { 549 - for (i = 0; i < nr; i++) 550 - __flush_cache_page(vma, 551 - addr + i * PAGE_SIZE, 552 - (pfn + i) * PAGE_SIZE); 553 - /* 554 - * Software is allowed to have any number 555 - * of private mappings to a page. 556 - */ 557 - if (!(vma->vm_flags & VM_SHARED)) 558 - continue; 559 - if (old_addr) 560 - pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", 561 - old_addr, addr, vma->vm_file); 562 - if (nr == folio_nr_pages(folio)) 563 - old_addr = addr; 564 - } 463 + for (i = 0; i < nr; i++) 464 + __flush_cache_page(vma, 465 + addr + i * PAGE_SIZE, 466 + (pfn + i) * PAGE_SIZE); 467 + /* 468 + * Software is allowed to have any number 469 + * of private mappings to a page. 470 + */ 471 + if (!(vma->vm_flags & VM_SHARED)) 472 + continue; 473 + if (old_addr) 474 + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", 475 + old_addr, addr, vma->vm_file); 476 + if (nr == folio_nr_pages(folio)) 477 + old_addr = addr; 565 478 } 566 479 WARN_ON(++count == 4096); 567 480 } ··· 624 591 extern void clear_user_page_asm(void *, unsigned long); 625 592 extern void copy_user_page_asm(void *, void *, unsigned long); 626 593 627 - void flush_kernel_dcache_page_addr(const void *addr) 628 - { 629 - unsigned long flags; 630 - 631 - flush_kernel_dcache_page_asm(addr); 632 - purge_tlb_start(flags); 633 - pdtlb(SR_KERNEL, addr); 634 - purge_tlb_end(flags); 635 - } 636 - EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 637 - 638 594 static void flush_cache_page_if_present(struct vm_area_struct *vma, 639 - unsigned long vmaddr, unsigned long pfn) 595 + unsigned long vmaddr) 640 596 { 597 + #if CONFIG_FLUSH_PAGE_ACCESSED 641 598 bool needs_flush = false; 642 - pte_t *ptep; 599 + pte_t *ptep, pte; 643 600 644 - /* 645 - * The pte check is racy and sometimes the flush will trigger 646 - * a non-access TLB miss. Hopefully, the page has already been 647 - * flushed. 648 - */ 649 601 ptep = get_ptep(vma->vm_mm, vmaddr); 650 602 if (ptep) { 651 - needs_flush = pte_needs_flush(*ptep); 603 + pte = ptep_get(ptep); 604 + needs_flush = pte_needs_flush(pte); 652 605 pte_unmap(ptep); 653 606 } 654 607 if (needs_flush) 655 - flush_cache_page(vma, vmaddr, pfn); 608 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); 609 + #else 610 + struct mm_struct *mm = vma->vm_mm; 611 + unsigned long physaddr = get_upa(mm, vmaddr); 612 + 613 + if (physaddr) 614 + __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); 615 + #endif 656 616 } 657 617 658 618 void copy_user_highpage(struct page *to, struct page *from, ··· 655 629 656 630 kfrom = kmap_local_page(from); 657 631 kto = kmap_local_page(to); 658 - flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); 632 + __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); 659 633 copy_page_asm(kto, kfrom); 660 634 kunmap_local(kto); 661 635 kunmap_local(kfrom); ··· 664 638 void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 665 639 unsigned long user_vaddr, void *dst, void *src, int len) 666 640 { 667 - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); 641 + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); 668 642 memcpy(dst, src, len); 669 - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); 643 + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst)); 670 644 } 671 645 672 646 void copy_from_user_page(struct vm_area_struct *vma, struct page *page, 673 647 unsigned long user_vaddr, void *dst, void *src, int len) 674 648 { 675 - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); 649 + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); 676 650 memcpy(dst, src, len); 651 + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src)); 677 652 } 678 653 679 654 /* __flush_tlb_range() ··· 708 681 709 682 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) 710 683 { 711 - unsigned long addr, pfn; 712 - pte_t *ptep; 684 + unsigned long addr; 713 685 714 - for (addr = start; addr < end; addr += PAGE_SIZE) { 715 - bool needs_flush = false; 716 - /* 717 - * The vma can contain pages that aren't present. Although 718 - * the pte search is expensive, we need the pte to find the 719 - * page pfn and to check whether the page should be flushed. 720 - */ 721 - ptep = get_ptep(vma->vm_mm, addr); 722 - if (ptep) { 723 - needs_flush = pte_needs_flush(*ptep); 724 - pfn = pte_pfn(*ptep); 725 - pte_unmap(ptep); 726 - } 727 - if (needs_flush) { 728 - if (parisc_requires_coherency()) { 729 - flush_user_cache_page(vma, addr); 730 - } else { 731 - if (WARN_ON(!pfn_valid(pfn))) 732 - return; 733 - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 734 - } 735 - } 736 - } 686 + for (addr = start; addr < end; addr += PAGE_SIZE) 687 + flush_cache_page_if_present(vma, addr); 737 688 } 738 689 739 690 static inline unsigned long mm_total_size(struct mm_struct *mm) ··· 762 757 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) 763 758 return; 764 759 flush_tlb_range(vma, start, end); 765 - flush_cache_all(); 760 + if (vma->vm_flags & VM_EXEC) 761 + flush_cache_all(); 762 + else 763 + flush_data_cache(); 766 764 return; 767 765 } 768 766 769 - flush_cache_pages(vma, start, end); 767 + flush_cache_pages(vma, start & PAGE_MASK, end); 770 768 } 771 769 772 770 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 773 771 { 774 - if (WARN_ON(!pfn_valid(pfn))) 775 - return; 776 - if (parisc_requires_coherency()) 777 - flush_user_cache_page(vma, vmaddr); 778 - else 779 - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 772 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 780 773 } 781 774 782 775 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) ··· 782 779 if (!PageAnon(page)) 783 780 return; 784 781 785 - if (parisc_requires_coherency()) { 786 - if (vma->vm_flags & VM_SHARED) 787 - flush_data_cache(); 788 - else 789 - flush_user_cache_page(vma, vmaddr); 782 + __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); 783 + } 784 + 785 + int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, 786 + pte_t *ptep) 787 + { 788 + pte_t pte = ptep_get(ptep); 789 + 790 + if (!pte_young(pte)) 791 + return 0; 792 + set_pte(ptep, pte_mkold(pte)); 793 + #if CONFIG_FLUSH_PAGE_ACCESSED 794 + __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); 795 + #endif 796 + return 1; 797 + } 798 + 799 + /* 800 + * After a PTE is cleared, we have no way to flush the cache for 801 + * the physical page. On PA8800 and PA8900 processors, these lines 802 + * can cause random cache corruption. Thus, we must flush the cache 803 + * as well as the TLB when clearing a PTE that's valid. 804 + */ 805 + pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, 806 + pte_t *ptep) 807 + { 808 + struct mm_struct *mm = (vma)->vm_mm; 809 + pte_t pte = ptep_get_and_clear(mm, addr, ptep); 810 + unsigned long pfn = pte_pfn(pte); 811 + 812 + if (pfn_valid(pfn)) 813 + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 814 + else if (pte_accessible(mm, pte)) 815 + flush_tlb_page(vma, addr); 816 + 817 + return pte; 818 + } 819 + 820 + /* 821 + * The physical address for pages in the ioremap case can be obtained 822 + * from the vm_struct struct. I wasn't able to successfully handle the 823 + * vmalloc and vmap cases. We have an array of struct page pointers in 824 + * the uninitialized vmalloc case but the flush failed using page_to_pfn. 825 + */ 826 + void flush_cache_vmap(unsigned long start, unsigned long end) 827 + { 828 + unsigned long addr, physaddr; 829 + struct vm_struct *vm; 830 + 831 + /* Prevent cache move-in */ 832 + flush_tlb_kernel_range(start, end); 833 + 834 + if (end - start >= parisc_cache_flush_threshold) { 835 + flush_cache_all(); 790 836 return; 791 837 } 792 838 793 - flush_tlb_page(vma, vmaddr); 794 - preempt_disable(); 795 - flush_dcache_page_asm(page_to_phys(page), vmaddr); 796 - preempt_enable(); 797 - } 839 + if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) { 840 + flush_cache_all(); 841 + return; 842 + } 798 843 844 + vm = find_vm_area((void *)start); 845 + if (WARN_ON_ONCE(!vm)) { 846 + flush_cache_all(); 847 + return; 848 + } 849 + 850 + /* The physical addresses of IOREMAP regions are contiguous */ 851 + if (vm->flags & VM_IOREMAP) { 852 + physaddr = vm->phys_addr; 853 + for (addr = start; addr < end; addr += PAGE_SIZE) { 854 + preempt_disable(); 855 + flush_dcache_page_asm(physaddr, start); 856 + flush_icache_page_asm(physaddr, start); 857 + preempt_enable(); 858 + physaddr += PAGE_SIZE; 859 + } 860 + return; 861 + } 862 + 863 + flush_cache_all(); 864 + } 865 + EXPORT_SYMBOL(flush_cache_vmap); 866 + 867 + /* 868 + * The vm_struct has been retired and the page table is set up. The 869 + * last page in the range is a guard page. Its physical address can't 870 + * be determined using lpa, so there is no way to flush the range 871 + * using flush_dcache_page_asm. 872 + */ 873 + void flush_cache_vunmap(unsigned long start, unsigned long end) 874 + { 875 + /* Prevent cache move-in */ 876 + flush_tlb_kernel_range(start, end); 877 + flush_data_cache(); 878 + } 879 + EXPORT_SYMBOL(flush_cache_vunmap); 880 + 881 + /* 882 + * On systems with PA8800/PA8900 processors, there is no way to flush 883 + * a vmap range other than using the architected loop to flush the 884 + * entire cache. The page directory is not set up, so we can't use 885 + * fdc, etc. FDCE/FICE don't work to flush a portion of the cache. 886 + * L2 is physically indexed but FDCE/FICE instructions in virtual 887 + * mode output their virtual address on the core bus, not their 888 + * real address. As a result, the L2 cache index formed from the 889 + * virtual address will most likely not be the same as the L2 index 890 + * formed from the real address. 891 + */ 799 892 void flush_kernel_vmap_range(void *vaddr, int size) 800 893 { 801 894 unsigned long start = (unsigned long)vaddr; 802 895 unsigned long end = start + size; 803 896 804 - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 805 - (unsigned long)size >= parisc_cache_flush_threshold) { 806 - flush_tlb_kernel_range(start, end); 807 - flush_data_cache(); 897 + flush_tlb_kernel_range(start, end); 898 + 899 + if (!static_branch_likely(&parisc_has_dcache)) 900 + return; 901 + 902 + /* If interrupts are disabled, we can only do local flush */ 903 + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { 904 + flush_data_cache_local(NULL); 808 905 return; 809 906 } 810 907 811 - flush_kernel_dcache_range_asm(start, end); 812 - flush_tlb_kernel_range(start, end); 908 + flush_data_cache(); 813 909 } 814 910 EXPORT_SYMBOL(flush_kernel_vmap_range); 815 911 ··· 920 818 /* Ensure DMA is complete */ 921 819 asm_syncdma(); 922 820 923 - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 924 - (unsigned long)size >= parisc_cache_flush_threshold) { 925 - flush_tlb_kernel_range(start, end); 926 - flush_data_cache(); 821 + flush_tlb_kernel_range(start, end); 822 + 823 + if (!static_branch_likely(&parisc_has_dcache)) 824 + return; 825 + 826 + /* If interrupts are disabled, we can only do local flush */ 827 + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { 828 + flush_data_cache_local(NULL); 927 829 return; 928 830 } 929 831 930 - purge_kernel_dcache_range_asm(start, end); 931 - flush_tlb_kernel_range(start, end); 832 + flush_data_cache(); 932 833 } 933 834 EXPORT_SYMBOL(invalidate_kernel_vmap_range); 934 835