Merge branch 'parisc-3.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc updates from Helge Deller:
"The most important patch is a new Light Weigth Syscall (LWS) for 8,
16, 32 and 64 bit atomic CAS operations which is required in order to
be able to implement the atomic gcc builtins on our platform.

Other than that, we wire up the seccomp, getrandom and memfd_create
syscalls, fixes a minor off-by-one bug and a wrong printk string"

* 'parisc-3.17-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Implement new LWS CAS supporting 64 bit operations.
parisc: Wire up seccomp, getrandom and memfd_create syscalls
parisc: dino: fix %d confusingly prefixed with 0x in format string
parisc: sys_hpux: NUL terminator is one past the end

Changed files
+280 -8
arch
drivers
parisc
+16
arch/parisc/Kconfig
··· 321 321 322 322 source "arch/parisc/Kconfig.debug" 323 323 324 + config SECCOMP 325 + def_bool y 326 + prompt "Enable seccomp to safely compute untrusted bytecode" 327 + ---help--- 328 + This kernel feature is useful for number crunching applications 329 + that may need to compute untrusted bytecode during their 330 + execution. By using pipes or other transports made available to 331 + the process as file descriptors supporting the read/write 332 + syscalls, it's possible to isolate those applications in 333 + their own address space using seccomp. Once seccomp is 334 + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled 335 + and the task is only allowed to execute a few safe syscalls 336 + defined by each seccomp mode. 337 + 338 + If unsure, say Y. Only embedded should say N here. 339 + 324 340 source "security/Kconfig" 325 341 326 342 source "crypto/Kconfig"
+1 -1
arch/parisc/hpux/sys_hpux.c
··· 456 456 } 457 457 458 458 /* String could be altered by userspace after strlen_user() */ 459 - fsname[len] = '\0'; 459 + fsname[len - 1] = '\0'; 460 460 461 461 printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); 462 462 if ( !strcmp(fsname, "hfs") ) {
+16
arch/parisc/include/asm/seccomp.h
··· 1 + #ifndef _ASM_PARISC_SECCOMP_H 2 + #define _ASM_PARISC_SECCOMP_H 3 + 4 + #include <linux/unistd.h> 5 + 6 + #define __NR_seccomp_read __NR_read 7 + #define __NR_seccomp_write __NR_write 8 + #define __NR_seccomp_exit __NR_exit 9 + #define __NR_seccomp_sigreturn __NR_rt_sigreturn 10 + 11 + #define __NR_seccomp_read_32 __NR_read 12 + #define __NR_seccomp_write_32 __NR_write 13 + #define __NR_seccomp_exit_32 __NR_exit 14 + #define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn 15 + 16 + #endif /* _ASM_PARISC_SECCOMP_H */
+4 -1
arch/parisc/include/asm/thread_info.h
··· 60 60 #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 61 61 #define TIF_SINGLESTEP 9 /* single stepping? */ 62 62 #define TIF_BLOCKSTEP 10 /* branch stepping? */ 63 + #define TIF_SECCOMP 11 /* secure computing */ 63 64 64 65 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 65 66 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) ··· 71 70 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 72 71 #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 73 72 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 73 + #define _TIF_SECCOMP (1 << TIF_SECCOMP) 74 74 75 75 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ 76 76 _TIF_NEED_RESCHED) 77 77 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ 78 - _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT) 78 + _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ 79 + _TIF_SECCOMP) 79 80 80 81 #ifdef CONFIG_64BIT 81 82 # ifdef CONFIG_COMPAT
+4 -1
arch/parisc/include/uapi/asm/unistd.h
··· 830 830 #define __NR_sched_getattr (__NR_Linux + 335) 831 831 #define __NR_utimes (__NR_Linux + 336) 832 832 #define __NR_renameat2 (__NR_Linux + 337) 833 + #define __NR_seccomp (__NR_Linux + 338) 834 + #define __NR_getrandom (__NR_Linux + 339) 835 + #define __NR_memfd_create (__NR_Linux + 340) 833 836 834 - #define __NR_Linux_syscalls (__NR_renameat2 + 1) 837 + #define __NR_Linux_syscalls (__NR_memfd_create + 1) 835 838 836 839 837 840 #define __IGNORE_select /* newselect */
+6
arch/parisc/kernel/ptrace.c
··· 270 270 { 271 271 long ret = 0; 272 272 273 + /* Do the secure computing check first. */ 274 + if (secure_computing(regs->gr[20])) { 275 + /* seccomp failures shouldn't expose any additional code. */ 276 + return -1; 277 + } 278 + 273 279 if (test_thread_flag(TIF_SYSCALL_TRACE) && 274 280 tracehook_report_syscall_entry(regs)) 275 281 ret = -1L;
+229 -4
arch/parisc/kernel/syscall.S
··· 74 74 /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ 75 75 /* Light-weight-syscall entry must always be located at 0xb0 */ 76 76 /* WARNING: Keep this number updated with table size changes */ 77 - #define __NR_lws_entries (2) 77 + #define __NR_lws_entries (3) 78 78 79 79 lws_entry: 80 80 gate lws_start, %r0 /* increase privilege */ ··· 502 502 503 503 504 504 /*************************************************** 505 - Implementing CAS as an atomic operation: 505 + Implementing 32bit CAS as an atomic operation: 506 506 507 507 %r26 - Address to examine 508 508 %r25 - Old value to check (old) ··· 659 659 ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) 660 660 661 661 662 + /*************************************************** 663 + New CAS implementation which uses pointers and variable size 664 + information. The value pointed by old and new MUST NOT change 665 + while performing CAS. The lock only protect the value at %r26. 666 + 667 + %r26 - Address to examine 668 + %r25 - Pointer to the value to check (old) 669 + %r24 - Pointer to the value to set (new) 670 + %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) 671 + %r28 - Return non-zero on failure 672 + %r21 - Kernel error code 673 + 674 + %r21 has the following meanings: 675 + 676 + EAGAIN - CAS is busy, ldcw failed, try again. 677 + EFAULT - Read or write failed. 678 + 679 + Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) 680 + 681 + ****************************************************/ 682 + 683 + /* ELF32 Process entry path */ 684 + lws_compare_and_swap_2: 685 + #ifdef CONFIG_64BIT 686 + /* Clip the input registers */ 687 + depdi 0, 31, 32, %r26 688 + depdi 0, 31, 32, %r25 689 + depdi 0, 31, 32, %r24 690 + depdi 0, 31, 32, %r23 691 + #endif 692 + 693 + /* Check the validity of the size pointer */ 694 + subi,>>= 4, %r23, %r0 695 + b,n lws_exit_nosys 696 + 697 + /* Jump to the functions which will load the old and new values into 698 + registers depending on the their size */ 699 + shlw %r23, 2, %r29 700 + blr %r29, %r0 701 + nop 702 + 703 + /* 8bit load */ 704 + 4: ldb 0(%sr3,%r25), %r25 705 + b cas2_lock_start 706 + 5: ldb 0(%sr3,%r24), %r24 707 + nop 708 + nop 709 + nop 710 + nop 711 + nop 712 + 713 + /* 16bit load */ 714 + 6: ldh 0(%sr3,%r25), %r25 715 + b cas2_lock_start 716 + 7: ldh 0(%sr3,%r24), %r24 717 + nop 718 + nop 719 + nop 720 + nop 721 + nop 722 + 723 + /* 32bit load */ 724 + 8: ldw 0(%sr3,%r25), %r25 725 + b cas2_lock_start 726 + 9: ldw 0(%sr3,%r24), %r24 727 + nop 728 + nop 729 + nop 730 + nop 731 + nop 732 + 733 + /* 64bit load */ 734 + #ifdef CONFIG_64BIT 735 + 10: ldd 0(%sr3,%r25), %r25 736 + 11: ldd 0(%sr3,%r24), %r24 737 + #else 738 + /* Load new value into r22/r23 - high/low */ 739 + 10: ldw 0(%sr3,%r25), %r22 740 + 11: ldw 4(%sr3,%r25), %r23 741 + /* Load new value into fr4 for atomic store later */ 742 + 12: flddx 0(%sr3,%r24), %fr4 743 + #endif 744 + 745 + cas2_lock_start: 746 + /* Load start of lock table */ 747 + ldil L%lws_lock_start, %r20 748 + ldo R%lws_lock_start(%r20), %r28 749 + 750 + /* Extract four bits from r26 and hash lock (Bits 4-7) */ 751 + extru %r26, 27, 4, %r20 752 + 753 + /* Find lock to use, the hash is either one of 0 to 754 + 15, multiplied by 16 (keep it 16-byte aligned) 755 + and add to the lock table offset. */ 756 + shlw %r20, 4, %r20 757 + add %r20, %r28, %r20 758 + 759 + rsm PSW_SM_I, %r0 /* Disable interrupts */ 760 + /* COW breaks can cause contention on UP systems */ 761 + LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ 762 + cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ 763 + cas2_wouldblock: 764 + ldo 2(%r0), %r28 /* 2nd case */ 765 + ssm PSW_SM_I, %r0 766 + b lws_exit /* Contended... */ 767 + ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ 768 + 769 + /* 770 + prev = *addr; 771 + if ( prev == old ) 772 + *addr = new; 773 + return prev; 774 + */ 775 + 776 + /* NOTES: 777 + This all works becuse intr_do_signal 778 + and schedule both check the return iasq 779 + and see that we are on the kernel page 780 + so this process is never scheduled off 781 + or is ever sent any signal of any sort, 782 + thus it is wholly atomic from usrspaces 783 + perspective 784 + */ 785 + cas2_action: 786 + /* Jump to the correct function */ 787 + blr %r29, %r0 788 + /* Set %r28 as non-zero for now */ 789 + ldo 1(%r0),%r28 790 + 791 + /* 8bit CAS */ 792 + 13: ldb,ma 0(%sr3,%r26), %r29 793 + sub,= %r29, %r25, %r0 794 + b,n cas2_end 795 + 14: stb,ma %r24, 0(%sr3,%r26) 796 + b cas2_end 797 + copy %r0, %r28 798 + nop 799 + nop 800 + 801 + /* 16bit CAS */ 802 + 15: ldh,ma 0(%sr3,%r26), %r29 803 + sub,= %r29, %r25, %r0 804 + b,n cas2_end 805 + 16: sth,ma %r24, 0(%sr3,%r26) 806 + b cas2_end 807 + copy %r0, %r28 808 + nop 809 + nop 810 + 811 + /* 32bit CAS */ 812 + 17: ldw,ma 0(%sr3,%r26), %r29 813 + sub,= %r29, %r25, %r0 814 + b,n cas2_end 815 + 18: stw,ma %r24, 0(%sr3,%r26) 816 + b cas2_end 817 + copy %r0, %r28 818 + nop 819 + nop 820 + 821 + /* 64bit CAS */ 822 + #ifdef CONFIG_64BIT 823 + 19: ldd,ma 0(%sr3,%r26), %r29 824 + sub,= %r29, %r25, %r0 825 + b,n cas2_end 826 + 20: std,ma %r24, 0(%sr3,%r26) 827 + copy %r0, %r28 828 + #else 829 + /* Compare first word */ 830 + 19: ldw,ma 0(%sr3,%r26), %r29 831 + sub,= %r29, %r22, %r0 832 + b,n cas2_end 833 + /* Compare second word */ 834 + 20: ldw,ma 4(%sr3,%r26), %r29 835 + sub,= %r29, %r23, %r0 836 + b,n cas2_end 837 + /* Perform the store */ 838 + 21: fstdx %fr4, 0(%sr3,%r26) 839 + copy %r0, %r28 840 + #endif 841 + 842 + cas2_end: 843 + /* Free lock */ 844 + stw,ma %r20, 0(%sr2,%r20) 845 + /* Enable interrupts */ 846 + ssm PSW_SM_I, %r0 847 + /* Return to userspace, set no error */ 848 + b lws_exit 849 + copy %r0, %r21 850 + 851 + 22: 852 + /* Error occurred on load or store */ 853 + /* Free lock */ 854 + stw %r20, 0(%sr2,%r20) 855 + ssm PSW_SM_I, %r0 856 + ldo 1(%r0),%r28 857 + b lws_exit 858 + ldo -EFAULT(%r0),%r21 /* set errno */ 859 + nop 860 + nop 861 + nop 862 + 863 + /* Exception table entries, for the load and store, return EFAULT. 864 + Each of the entries must be relocated. */ 865 + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) 866 + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) 867 + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) 868 + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) 869 + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) 870 + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) 871 + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) 872 + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) 873 + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) 874 + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) 875 + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) 876 + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) 877 + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) 878 + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) 879 + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) 880 + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) 881 + #ifndef CONFIG_64BIT 882 + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) 883 + ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) 884 + #endif 885 + 662 886 /* Make sure nothing else is placed on this page */ 663 887 .align PAGE_SIZE 664 888 END(linux_gateway_page) ··· 899 675 /* Light-weight-syscall table */ 900 676 /* Start of lws table. */ 901 677 ENTRY(lws_table) 902 - LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ 903 - LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ 678 + LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ 679 + LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ 680 + LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ 904 681 END(lws_table) 905 682 /* End of lws table */ 906 683
+3
arch/parisc/kernel/syscall_table.S
··· 433 433 ENTRY_SAME(sched_getattr) /* 335 */ 434 434 ENTRY_COMP(utimes) 435 435 ENTRY_SAME(renameat2) 436 + ENTRY_SAME(seccomp) 437 + ENTRY_SAME(getrandom) 438 + ENTRY_SAME(memfd_create) /* 340 */ 436 439 437 440 /* Nothing yet */ 438 441
+1 -1
drivers/parisc/dino.c
··· 913 913 printk("%s version %s found at 0x%lx\n", name, version, hpa); 914 914 915 915 if (!request_mem_region(hpa, PAGE_SIZE, name)) { 916 - printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n", 916 + printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n", 917 917 hpa); 918 918 return 1; 919 919 }