Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'parisc-for-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:

- The parisc kernel wrongly allows reading from read-protected
userspace memory without faulting, e.g. when userspace uses
mprotect() to read-protect a memory area and then uses a pointer to
this memory in a write(2, addr, 1) syscall.

To fix this issue, Dave Anglin developed a set of patches which use
the proberi assembler instruction to additionally check read access
permissions at runtime.

- Randy Dunlap contributed two patches to fix a minor typo and to
explain why a 32-bit compiler is needed although a 64-bit kernel is
built

* tag 'parisc-for-6.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Revise __get_user() to probe user read access
parisc: Revise gateway LWS calls to probe user read access
parisc: Drop WARN_ON_ONCE() from flush_cache_vmap
parisc: Try to fixup kernel exception in bad_area_nosemaphore path of do_page_fault()
parisc: Define and use set_pte_at()
parisc: Rename pte_needs_flush() to pte_needs_cache_flush() in cache.c
parisc: Check region is readable by user in raw_copy_from_user()
parisc: Update comments in make_insert_tlb
parisc: Makefile: explain that 64BIT requires both 32-bit and 64-bit compilers
parisc: Makefile: fix a typo in palo.conf

+112 -26
+4 -2
arch/parisc/Makefile
··· 39 39 40 40 export LD_BFD 41 41 42 - # Set default 32 bits cross compilers for vdso 42 + # Set default 32 bits cross compilers for vdso. 43 + # This means that for 64BIT, both the 64-bit tools and the 32-bit tools 44 + # need to be in the path. 43 45 CC_ARCHES_32 = hppa hppa2.0 hppa1.1 44 46 CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux 45 47 CROSS32_COMPILE := $(call cc-cross-prefix, \ ··· 141 139 fi 142 140 @if test ! -f "$(PALOCONF)"; then \ 143 141 cp $(srctree)/arch/parisc/defpalo.conf $(objtree)/palo.conf; \ 144 - echo 'A generic palo config file ($(objree)/palo.conf) has been created for you.'; \ 142 + echo 'A generic palo config file ($(objtree)/palo.conf) has been created for you.'; \ 145 143 echo 'You should check it and re-run "make palo".'; \ 146 144 echo 'WARNING: the "lifimage" file is now placed in this directory by default!'; \ 147 145 false; \
+4 -3
arch/parisc/include/asm/pgtable.h
··· 276 276 #define pte_none(x) (pte_val(x) == 0) 277 277 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 278 278 #define pte_user(x) (pte_val(x) & _PAGE_USER) 279 - #define pte_clear(mm, addr, xp) set_pte(xp, __pte(0)) 279 + #define pte_clear(mm, addr, xp) set_pte_at((mm), (addr), (xp), __pte(0)) 280 280 281 281 #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 282 282 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) ··· 392 392 } 393 393 } 394 394 #define set_ptes set_ptes 395 + #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) 395 396 396 397 /* Used for deferring calls to flush_dcache_page() */ 397 398 ··· 457 456 if (!pte_young(pte)) { 458 457 return 0; 459 458 } 460 - set_pte(ptep, pte_mkold(pte)); 459 + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 461 460 return 1; 462 461 } 463 462 ··· 467 466 struct mm_struct; 468 467 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 469 468 { 470 - set_pte(ptep, pte_wrprotect(*ptep)); 469 + set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep)); 471 470 } 472 471 473 472 #define pte_same(A,B) (pte_val(A) == pte_val(B))
+28
arch/parisc/include/asm/special_insns.h
··· 32 32 pa; \ 33 33 }) 34 34 35 + /** 36 + * prober_user() - Probe user read access 37 + * @sr: Space regster. 38 + * @va: Virtual address. 39 + * 40 + * Return: Non-zero if address is accessible. 41 + * 42 + * Due to the way _PAGE_READ is handled in TLB entries, we need 43 + * a special check to determine whether a user address is accessible. 44 + * The ldb instruction does the initial access check. If it is 45 + * successful, the probe instruction checks user access rights. 46 + */ 47 + #define prober_user(sr, va) ({ \ 48 + unsigned long read_allowed; \ 49 + __asm__ __volatile__( \ 50 + "copy %%r0,%0\n" \ 51 + "8:\tldb 0(%%sr%1,%2),%%r0\n" \ 52 + "\tproberi (%%sr%1,%2),%3,%0\n" \ 53 + "9:\n" \ 54 + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ 55 + "or %%r0,%%r0,%%r0") \ 56 + : "=&r" (read_allowed) \ 57 + : "i" (sr), "r" (va), "i" (PRIV_USER) \ 58 + : "memory" \ 59 + ); \ 60 + read_allowed; \ 61 + }) 62 + 35 63 #define CR_EIEM 15 /* External Interrupt Enable Mask */ 36 64 #define CR_CR16 16 /* CR16 Interval Timer */ 37 65 #define CR_EIRR 23 /* External Interrupt Request Register */
+18 -3
arch/parisc/include/asm/uaccess.h
··· 42 42 __gu_err; \ 43 43 }) 44 44 45 - #define __get_user(val, ptr) \ 46 - ({ \ 47 - __get_user_internal(SR_USER, val, ptr); \ 45 + #define __probe_user_internal(sr, error, ptr) \ 46 + ({ \ 47 + __asm__("\tproberi (%%sr%1,%2),%3,%0\n" \ 48 + "\tcmpiclr,= 1,%0,%0\n" \ 49 + "\tldi %4,%0\n" \ 50 + : "=r"(error) \ 51 + : "i"(sr), "r"(ptr), "i"(PRIV_USER), \ 52 + "i"(-EFAULT)); \ 53 + }) 54 + 55 + #define __get_user(val, ptr) \ 56 + ({ \ 57 + register long __gu_err; \ 58 + \ 59 + __gu_err = __get_user_internal(SR_USER, val, ptr); \ 60 + if (likely(!__gu_err)) \ 61 + __probe_user_internal(SR_USER, __gu_err, ptr); \ 62 + __gu_err; \ 48 63 }) 49 64 50 65 #define __get_user_asm(sr, val, ldx, ptr) \
+3 -3
arch/parisc/kernel/cache.c
··· 429 429 return ptep; 430 430 } 431 431 432 - static inline bool pte_needs_flush(pte_t pte) 432 + static inline bool pte_needs_cache_flush(pte_t pte) 433 433 { 434 434 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)) 435 435 == (_PAGE_PRESENT | _PAGE_ACCESSED); ··· 630 630 ptep = get_ptep(vma->vm_mm, vmaddr); 631 631 if (ptep) { 632 632 pte = ptep_get(ptep); 633 - needs_flush = pte_needs_flush(pte); 633 + needs_flush = pte_needs_cache_flush(pte); 634 634 pte_unmap(ptep); 635 635 } 636 636 if (needs_flush) ··· 841 841 } 842 842 843 843 vm = find_vm_area((void *)start); 844 - if (WARN_ON_ONCE(!vm)) { 844 + if (!vm) { 845 845 flush_cache_all(); 846 846 return; 847 847 }
+12 -5
arch/parisc/kernel/entry.S
··· 499 499 * this happens is quite subtle, read below */ 500 500 .macro make_insert_tlb spc,pte,prot,tmp 501 501 space_to_prot \spc \prot /* create prot id from space */ 502 + 503 + #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 504 + /* need to drop DMB bit, as it's used as SPECIAL flag */ 505 + depi 0,_PAGE_SPECIAL_BIT,1,\pte 506 + #endif 507 + 502 508 /* The following is the real subtlety. This is depositing 503 509 * T <-> _PAGE_REFTRAP 504 510 * D <-> _PAGE_DIRTY ··· 517 511 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 518 512 * trigger an access rights trap in user space if the user 519 513 * tries to read an unreadable page */ 520 - #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT 521 - /* need to drop DMB bit, as it's used as SPECIAL flag */ 522 - depi 0,_PAGE_SPECIAL_BIT,1,\pte 523 - #endif 524 514 depd \pte,8,7,\prot 525 515 526 516 /* PAGE_USER indicates the page can be read with user privileges, 527 517 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 528 - * contains _PAGE_READ) */ 518 + * contains _PAGE_READ). While the kernel can't directly write 519 + * user pages which have _PAGE_WRITE zero, it can read pages 520 + * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel 521 + * exception fault handler doesn't trigger when reading pages 522 + * that aren't user read accessible */ 529 523 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 530 524 depdi 7,11,3,\prot 525 + 531 526 /* If we're a gateway page, drop PL2 back to zero for promotion 532 527 * to kernel privilege (so we can execute the page as kernel). 533 528 * Any privilege promotion page always denys read and write */
+21 -9
arch/parisc/kernel/syscall.S
··· 613 613 lws_compare_and_swap: 614 614 /* Trigger memory reference interruptions without writing to memory */ 615 615 1: ldw 0(%r26), %r28 616 + proberi (%r26), PRIV_USER, %r28 617 + comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */ 618 + nop 616 619 2: stbys,e %r0, 0(%r26) 617 620 618 621 /* Calculate 8-bit hash index from virtual address */ ··· 770 767 copy %r26, %r28 771 768 depi_safe 0, 31, 2, %r28 772 769 10: ldw 0(%r28), %r1 770 + proberi (%r28), PRIV_USER, %r1 771 + comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */ 772 + nop 773 773 11: stbys,e %r0, 0(%r28) 774 774 775 775 /* Calculate 8-bit hash index from virtual address */ ··· 957 951 958 952 /* 8-bit exchange */ 959 953 1: ldb 0(%r24), %r20 954 + proberi (%r24), PRIV_USER, %r20 955 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ 956 + nop 960 957 copy %r23, %r20 961 958 depi_safe 0, 31, 2, %r20 962 959 b atomic_xchg_start 963 960 2: stbys,e %r0, 0(%r20) 964 - nop 965 - nop 966 - nop 967 961 968 962 /* 16-bit exchange */ 969 963 3: ldh 0(%r24), %r20 964 + proberi (%r24), PRIV_USER, %r20 965 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ 966 + nop 970 967 copy %r23, %r20 971 968 depi_safe 0, 31, 2, %r20 972 969 b atomic_xchg_start 973 970 4: stbys,e %r0, 0(%r20) 974 - nop 975 - nop 976 - nop 977 971 978 972 /* 32-bit exchange */ 979 973 5: ldw 0(%r24), %r20 974 + proberi (%r24), PRIV_USER, %r20 975 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ 976 + nop 980 977 b atomic_xchg_start 981 978 6: stbys,e %r0, 0(%r23) 982 - nop 983 - nop 984 - nop 985 979 nop 986 980 nop 987 981 988 982 /* 64-bit exchange */ 989 983 #ifdef CONFIG_64BIT 990 984 7: ldd 0(%r24), %r20 985 + proberi (%r24), PRIV_USER, %r20 986 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ 987 + nop 991 988 8: stdby,e %r0, 0(%r23) 992 989 #else 993 990 7: ldw 0(%r24), %r20 994 991 8: ldw 4(%r24), %r20 992 + proberi (%r24), PRIV_USER, %r20 993 + comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */ 994 + nop 995 995 copy %r23, %r20 996 996 depi_safe 0, 31, 2, %r20 997 997 9: stbys,e %r0, 0(%r20)
+18 -1
arch/parisc/lib/memcpy.c
··· 12 12 #include <linux/module.h> 13 13 #include <linux/compiler.h> 14 14 #include <linux/uaccess.h> 15 + #include <linux/mm.h> 15 16 16 17 #define get_user_space() mfsp(SR_USER) 17 18 #define get_kernel_space() SR_KERNEL ··· 33 32 unsigned long raw_copy_from_user(void *dst, const void __user *src, 34 33 unsigned long len) 35 34 { 35 + unsigned long start = (unsigned long) src; 36 + unsigned long end = start + len; 37 + unsigned long newlen = len; 38 + 36 39 mtsp(get_user_space(), SR_TEMP1); 37 40 mtsp(get_kernel_space(), SR_TEMP2); 38 - return pa_memcpy(dst, (void __force *)src, len); 41 + 42 + /* Check region is user accessible */ 43 + if (start) 44 + while (start < end) { 45 + if (!prober_user(SR_TEMP1, start)) { 46 + newlen = (start - (unsigned long) src); 47 + break; 48 + } 49 + start += PAGE_SIZE; 50 + /* align to page boundry which may have different permission */ 51 + start = PAGE_ALIGN_DOWN(start); 52 + } 53 + return len - newlen + pa_memcpy(dst, (void __force *)src, newlen); 39 54 } 40 55 EXPORT_SYMBOL(raw_copy_from_user); 41 56
+4
arch/parisc/mm/fault.c
··· 363 363 mmap_read_unlock(mm); 364 364 365 365 bad_area_nosemaphore: 366 + if (!user_mode(regs) && fixup_exception(regs)) { 367 + return; 368 + } 369 + 366 370 if (user_mode(regs)) { 367 371 int signo, si_code; 368 372