Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] dynamic page tables.
[S390] Add four level page tables for CONFIG_64BIT=y.
[S390] 1K/2K page table pages.
[S390] Remove a.out header file.
[S390] sclp_vt220: Fix vt220 initialization
[S390] qdio: avoid hang when establishing qdio queues
[S390] VMEM_MAX_PHYS overflow on 31 bit.
[S390] zcrypt: Do not start ap poll thread per default
[S390] Fix __ffs_word_loop/__ffz_word_loop inlnie assembly.
[S390] Wire up new timerfd syscalls.
[S390] Update default configuration.

+692 -345
+64 -23
arch/s390/defconfig
··· 1 # 2 # Automatically generated make config: don't edit 3 - # Linux kernel version: 2.6.23 4 - # Mon Oct 22 12:10:44 2007 5 # 6 CONFIG_MMU=y 7 CONFIG_ZONE_DMA=y 8 CONFIG_LOCKDEP_SUPPORT=y 9 CONFIG_STACKTRACE_SUPPORT=y 10 CONFIG_RWSEM_XCHGADD_ALGORITHM=y 11 # CONFIG_ARCH_HAS_ILOG2_U32 is not set 12 # CONFIG_ARCH_HAS_ILOG2_U64 is not set ··· 16 CONFIG_GENERIC_BUG=y 17 CONFIG_NO_IOMEM=y 18 CONFIG_NO_DMA=y 19 CONFIG_S390=y 20 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 21 ··· 34 CONFIG_POSIX_MQUEUE=y 35 # CONFIG_BSD_PROCESS_ACCT is not set 36 # CONFIG_TASKSTATS is not set 37 - # CONFIG_USER_NS is not set 38 CONFIG_AUDIT=y 39 # CONFIG_AUDITSYSCALL is not set 40 CONFIG_IKCONFIG=y ··· 42 CONFIG_CGROUPS=y 43 # CONFIG_CGROUP_DEBUG is not set 44 CONFIG_CGROUP_NS=y 45 - CONFIG_CGROUP_CPUACCT=y 46 # CONFIG_CPUSETS is not set 47 CONFIG_FAIR_GROUP_SCHED=y 48 CONFIG_FAIR_USER_SCHED=y 49 # CONFIG_FAIR_CGROUP_SCHED is not set 50 CONFIG_SYSFS_DEPRECATED=y 51 # CONFIG_RELAY is not set 52 CONFIG_BLK_DEV_INITRD=y 53 CONFIG_INITRAMFS_SOURCE="" 54 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ··· 68 CONFIG_PRINTK=y 69 CONFIG_BUG=y 70 CONFIG_ELF_CORE=y 71 CONFIG_BASE_FULL=y 72 CONFIG_FUTEX=y 73 CONFIG_ANON_INODES=y 74 CONFIG_EPOLL=y 75 CONFIG_SIGNALFD=y 76 CONFIG_EVENTFD=y 77 CONFIG_SHMEM=y 78 CONFIG_VM_EVENT_COUNTERS=y 79 CONFIG_SLAB=y 80 # CONFIG_SLUB is not set 81 # CONFIG_SLOB is not set 82 CONFIG_RT_MUTEXES=y 83 # CONFIG_TINY_SHMEM is not set 84 CONFIG_BASE_SMALL=0 ··· 115 # CONFIG_DEFAULT_CFQ is not set 116 # CONFIG_DEFAULT_NOOP is not set 117 CONFIG_DEFAULT_IOSCHED="deadline" 118 119 # 120 # Base setup ··· 155 # CONFIG_PREEMPT_NONE is not set 156 # CONFIG_PREEMPT_VOLUNTARY is not set 157 CONFIG_PREEMPT=y 158 - CONFIG_PREEMPT_BKL=y 159 CONFIG_SELECT_MEMORY_MODEL=y 160 CONFIG_FLATMEM_MANUAL=y 161 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 169 CONFIG_ZONE_DMA_FLAG=1 170 CONFIG_BOUNCE=y 171 CONFIG_VIRT_TO_BUS=y 172 - CONFIG_HOLES_IN_ZONE=y 173 174 # 175 # I/O subsystem configuration ··· 197 # CONFIG_HZ_300 is not set 198 # CONFIG_HZ_1000 is not set 199 CONFIG_HZ=100 200 CONFIG_NO_IDLE_HZ=y 201 CONFIG_NO_IDLE_HZ_INIT=y 202 CONFIG_S390_HYPFS_FS=y ··· 219 # CONFIG_XFRM_USER is not set 220 # CONFIG_XFRM_SUB_POLICY is not set 221 # CONFIG_XFRM_MIGRATE is not set 222 CONFIG_NET_KEY=y 223 # CONFIG_NET_KEY_MIGRATE is not set 224 CONFIG_IUCV=m ··· 270 # CONFIG_NETWORK_SECMARK is not set 271 CONFIG_NETFILTER=y 272 # CONFIG_NETFILTER_DEBUG is not set 273 274 # 275 # Core Netfilter Configuration ··· 278 CONFIG_NETFILTER_NETLINK=m 279 CONFIG_NETFILTER_NETLINK_QUEUE=m 280 CONFIG_NETFILTER_NETLINK_LOG=m 281 - CONFIG_NF_CONNTRACK_ENABLED=m 282 CONFIG_NF_CONNTRACK=m 283 # CONFIG_NF_CT_ACCT is not set 284 # CONFIG_NF_CONNTRACK_MARK is not set ··· 305 # CONFIG_IP_NF_ARPTABLES is not set 306 307 # 308 - # IPv6: Netfilter Configuration (EXPERIMENTAL) 309 # 310 # CONFIG_NF_CONNTRACK_IPV6 is not set 311 # CONFIG_IP6_NF_QUEUE is not set ··· 362 CONFIG_CLS_U32_MARK=y 363 CONFIG_NET_CLS_RSVP=m 364 CONFIG_NET_CLS_RSVP6=m 365 # CONFIG_NET_EMATCH is not set 366 CONFIG_NET_CLS_ACT=y 367 CONFIG_NET_ACT_POLICE=y ··· 371 CONFIG_NET_ACT_NAT=m 372 # CONFIG_NET_ACT_PEDIT is not set 373 # CONFIG_NET_ACT_SIMP is not set 374 - CONFIG_NET_CLS_POLICE=y 375 # CONFIG_NET_CLS_IND is not set 376 CONFIG_NET_SCH_FIFO=y 377 ··· 379 # 380 # CONFIG_NET_PKTGEN is not set 381 # CONFIG_NET_TCPPROBE is not set 382 # CONFIG_AF_RXRPC is not set 383 # CONFIG_RFKILL is not set 384 # CONFIG_NET_9P is not set ··· 417 CONFIG_BLK_DEV_RAM=y 418 CONFIG_BLK_DEV_RAM_COUNT=16 419 CONFIG_BLK_DEV_RAM_SIZE=4096 420 - CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 421 # CONFIG_CDROM_PKTCDVD is not set 422 # CONFIG_ATA_OVER_ETH is not set 423 ··· 434 CONFIG_DASD_EER=y 435 CONFIG_MISC_DEVICES=y 436 # CONFIG_EEPROM_93CX6 is not set 437 438 # 439 # SCSI device support ··· 516 # CONFIG_IBM_NEW_EMAC_TAH is not set 517 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set 518 CONFIG_NETDEV_1000=y 519 CONFIG_NETDEV_10000=y 520 # CONFIG_TR is not set 521 # CONFIG_WAN is not set ··· 538 CONFIG_CCWGROUP=y 539 # CONFIG_PPP is not set 540 # CONFIG_SLIP is not set 541 - # CONFIG_SHAPER is not set 542 # CONFIG_NETCONSOLE is not set 543 # CONFIG_NETPOLL is not set 544 # CONFIG_NET_POLL_CONTROLLER is not set ··· 587 CONFIG_MONWRITER=m 588 CONFIG_S390_VMUR=m 589 # CONFIG_POWER_SUPPLY is not set 590 # CONFIG_WATCHDOG is not set 591 592 # ··· 614 # CONFIG_XFS_FS is not set 615 # CONFIG_GFS2_FS is not set 616 # CONFIG_OCFS2_FS is not set 617 - # CONFIG_MINIX_FS is not set 618 - # CONFIG_ROMFS_FS is not set 619 CONFIG_INOTIFY=y 620 CONFIG_INOTIFY_USER=y 621 # CONFIG_QUOTA is not set 622 - CONFIG_DNOTIFY=y 623 # CONFIG_AUTOFS_FS is not set 624 # CONFIG_AUTOFS4_FS is not set 625 # CONFIG_FUSE_FS is not set ··· 660 # CONFIG_EFS_FS is not set 661 # CONFIG_CRAMFS is not set 662 # CONFIG_VXFS_FS is not set 663 # CONFIG_HPFS_FS is not set 664 # CONFIG_QNX4FS_FS is not set 665 # CONFIG_SYSV_FS is not set 666 # CONFIG_UFS_FS is not set 667 CONFIG_NETWORK_FILESYSTEMS=y ··· 716 # CONFIG_NLS is not set 717 CONFIG_DLM=m 718 # CONFIG_DLM_DEBUG is not set 719 - CONFIG_INSTRUMENTATION=y 720 - # CONFIG_PROFILING is not set 721 - CONFIG_KPROBES=y 722 - # CONFIG_MARKERS is not set 723 724 # 725 # Kernel hacking 726 # 727 CONFIG_TRACE_IRQFLAGS_SUPPORT=y 728 # CONFIG_PRINTK_TIME is not set 729 CONFIG_ENABLE_MUST_CHECK=y 730 CONFIG_MAGIC_SYSRQ=y 731 # CONFIG_UNUSED_SYMBOLS is not set ··· 748 # CONFIG_DEBUG_INFO is not set 749 # CONFIG_DEBUG_VM is not set 750 # CONFIG_DEBUG_LIST is not set 751 # CONFIG_FRAME_POINTER is not set 752 CONFIG_FORCED_INLINING=y 753 # CONFIG_RCU_TORTURE_TEST is not set 754 # CONFIG_LKDTM is not set 755 # CONFIG_FAULT_INJECTION is not set 756 CONFIG_SAMPLES=y 757 758 # 759 # Security options ··· 771 CONFIG_CRYPTO_ALGAPI=y 772 CONFIG_CRYPTO_AEAD=m 773 CONFIG_CRYPTO_BLKCIPHER=y 774 CONFIG_CRYPTO_HASH=m 775 CONFIG_CRYPTO_MANAGER=y 776 CONFIG_CRYPTO_HMAC=m ··· 779 # CONFIG_CRYPTO_NULL is not set 780 # CONFIG_CRYPTO_MD4 is not set 781 CONFIG_CRYPTO_MD5=m 782 - # CONFIG_CRYPTO_SHA1 is not set 783 # CONFIG_CRYPTO_SHA256 is not set 784 # CONFIG_CRYPTO_SHA512 is not set 785 # CONFIG_CRYPTO_WP512 is not set 786 # CONFIG_CRYPTO_TGR192 is not set 787 - # CONFIG_CRYPTO_GF128MUL is not set 788 CONFIG_CRYPTO_ECB=m 789 CONFIG_CRYPTO_CBC=y 790 CONFIG_CRYPTO_PCBC=m 791 # CONFIG_CRYPTO_LRW is not set 792 # CONFIG_CRYPTO_XTS is not set 793 # CONFIG_CRYPTO_CRYPTD is not set 794 # CONFIG_CRYPTO_DES is not set 795 CONFIG_CRYPTO_FCRYPT=m ··· 807 # CONFIG_CRYPTO_KHAZAD is not set 808 # CONFIG_CRYPTO_ANUBIS is not set 809 CONFIG_CRYPTO_SEED=m 810 # CONFIG_CRYPTO_DEFLATE is not set 811 # CONFIG_CRYPTO_MICHAEL_MIC is not set 812 # CONFIG_CRYPTO_CRC32C is not set 813 CONFIG_CRYPTO_CAMELLIA=m 814 # CONFIG_CRYPTO_TEST is not set 815 CONFIG_CRYPTO_AUTHENC=m 816 CONFIG_CRYPTO_HW=y 817 # CONFIG_CRYPTO_SHA1_S390 is not set 818 # CONFIG_CRYPTO_SHA256_S390 is not set 819 # CONFIG_CRYPTO_DES_S390 is not set 820 # CONFIG_CRYPTO_AES_S390 is not set 821 CONFIG_S390_PRNG=m 822 - CONFIG_ZCRYPT=m 823 - # CONFIG_ZCRYPT_MONOLITHIC is not set 824 825 # 826 # Library routines ··· 833 # CONFIG_CRC_ITU_T is not set 834 CONFIG_CRC32=m 835 CONFIG_CRC7=m 836 - # CONFIG_LIBCRC32C is not set 837 CONFIG_PLIST=y
··· 1 # 2 # Automatically generated make config: don't edit 3 + # Linux kernel version: 2.6.24 4 + # Sat Feb 9 12:13:01 2008 5 # 6 CONFIG_MMU=y 7 CONFIG_ZONE_DMA=y 8 CONFIG_LOCKDEP_SUPPORT=y 9 CONFIG_STACKTRACE_SUPPORT=y 10 + CONFIG_HAVE_LATENCYTOP_SUPPORT=y 11 CONFIG_RWSEM_XCHGADD_ALGORITHM=y 12 # CONFIG_ARCH_HAS_ILOG2_U32 is not set 13 # CONFIG_ARCH_HAS_ILOG2_U64 is not set ··· 15 CONFIG_GENERIC_BUG=y 16 CONFIG_NO_IOMEM=y 17 CONFIG_NO_DMA=y 18 + CONFIG_GENERIC_LOCKBREAK=y 19 CONFIG_S390=y 20 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 21 ··· 32 CONFIG_POSIX_MQUEUE=y 33 # CONFIG_BSD_PROCESS_ACCT is not set 34 # CONFIG_TASKSTATS is not set 35 CONFIG_AUDIT=y 36 # CONFIG_AUDITSYSCALL is not set 37 CONFIG_IKCONFIG=y ··· 41 CONFIG_CGROUPS=y 42 # CONFIG_CGROUP_DEBUG is not set 43 CONFIG_CGROUP_NS=y 44 # CONFIG_CPUSETS is not set 45 CONFIG_FAIR_GROUP_SCHED=y 46 CONFIG_FAIR_USER_SCHED=y 47 # CONFIG_FAIR_CGROUP_SCHED is not set 48 + # CONFIG_CGROUP_CPUACCT is not set 49 + # CONFIG_RESOURCE_COUNTERS is not set 50 CONFIG_SYSFS_DEPRECATED=y 51 # CONFIG_RELAY is not set 52 + CONFIG_NAMESPACES=y 53 + CONFIG_UTS_NS=y 54 + CONFIG_IPC_NS=y 55 + # CONFIG_USER_NS is not set 56 + # CONFIG_PID_NS is not set 57 CONFIG_BLK_DEV_INITRD=y 58 CONFIG_INITRAMFS_SOURCE="" 59 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set ··· 61 CONFIG_PRINTK=y 62 CONFIG_BUG=y 63 CONFIG_ELF_CORE=y 64 + # CONFIG_COMPAT_BRK is not set 65 CONFIG_BASE_FULL=y 66 CONFIG_FUTEX=y 67 CONFIG_ANON_INODES=y 68 CONFIG_EPOLL=y 69 CONFIG_SIGNALFD=y 70 + CONFIG_TIMERFD=y 71 CONFIG_EVENTFD=y 72 CONFIG_SHMEM=y 73 CONFIG_VM_EVENT_COUNTERS=y 74 CONFIG_SLAB=y 75 # CONFIG_SLUB is not set 76 # CONFIG_SLOB is not set 77 + # CONFIG_PROFILING is not set 78 + # CONFIG_MARKERS is not set 79 + CONFIG_HAVE_OPROFILE=y 80 + CONFIG_KPROBES=y 81 + CONFIG_HAVE_KPROBES=y 82 + CONFIG_PROC_PAGE_MONITOR=y 83 + CONFIG_SLABINFO=y 84 CONFIG_RT_MUTEXES=y 85 # CONFIG_TINY_SHMEM is not set 86 CONFIG_BASE_SMALL=0 ··· 99 # CONFIG_DEFAULT_CFQ is not set 100 # CONFIG_DEFAULT_NOOP is not set 101 CONFIG_DEFAULT_IOSCHED="deadline" 102 + CONFIG_CLASSIC_RCU=y 103 + # CONFIG_PREEMPT_RCU is not set 104 105 # 106 # Base setup ··· 137 # CONFIG_PREEMPT_NONE is not set 138 # CONFIG_PREEMPT_VOLUNTARY is not set 139 CONFIG_PREEMPT=y 140 + # CONFIG_RCU_TRACE is not set 141 CONFIG_SELECT_MEMORY_MODEL=y 142 CONFIG_FLATMEM_MANUAL=y 143 # CONFIG_DISCONTIGMEM_MANUAL is not set ··· 151 CONFIG_ZONE_DMA_FLAG=1 152 CONFIG_BOUNCE=y 153 CONFIG_VIRT_TO_BUS=y 154 155 # 156 # I/O subsystem configuration ··· 180 # CONFIG_HZ_300 is not set 181 # CONFIG_HZ_1000 is not set 182 CONFIG_HZ=100 183 + # CONFIG_SCHED_HRTICK is not set 184 CONFIG_NO_IDLE_HZ=y 185 CONFIG_NO_IDLE_HZ_INIT=y 186 CONFIG_S390_HYPFS_FS=y ··· 201 # CONFIG_XFRM_USER is not set 202 # CONFIG_XFRM_SUB_POLICY is not set 203 # CONFIG_XFRM_MIGRATE is not set 204 + # CONFIG_XFRM_STATISTICS is not set 205 CONFIG_NET_KEY=y 206 # CONFIG_NET_KEY_MIGRATE is not set 207 CONFIG_IUCV=m ··· 251 # CONFIG_NETWORK_SECMARK is not set 252 CONFIG_NETFILTER=y 253 # CONFIG_NETFILTER_DEBUG is not set 254 + CONFIG_NETFILTER_ADVANCED=y 255 256 # 257 # Core Netfilter Configuration ··· 258 CONFIG_NETFILTER_NETLINK=m 259 CONFIG_NETFILTER_NETLINK_QUEUE=m 260 CONFIG_NETFILTER_NETLINK_LOG=m 261 CONFIG_NF_CONNTRACK=m 262 # CONFIG_NF_CT_ACCT is not set 263 # CONFIG_NF_CONNTRACK_MARK is not set ··· 286 # CONFIG_IP_NF_ARPTABLES is not set 287 288 # 289 + # IPv6: Netfilter Configuration 290 # 291 # CONFIG_NF_CONNTRACK_IPV6 is not set 292 # CONFIG_IP6_NF_QUEUE is not set ··· 343 CONFIG_CLS_U32_MARK=y 344 CONFIG_NET_CLS_RSVP=m 345 CONFIG_NET_CLS_RSVP6=m 346 + CONFIG_NET_CLS_FLOW=m 347 # CONFIG_NET_EMATCH is not set 348 CONFIG_NET_CLS_ACT=y 349 CONFIG_NET_ACT_POLICE=y ··· 351 CONFIG_NET_ACT_NAT=m 352 # CONFIG_NET_ACT_PEDIT is not set 353 # CONFIG_NET_ACT_SIMP is not set 354 # CONFIG_NET_CLS_IND is not set 355 CONFIG_NET_SCH_FIFO=y 356 ··· 360 # 361 # CONFIG_NET_PKTGEN is not set 362 # CONFIG_NET_TCPPROBE is not set 363 + CONFIG_CAN=m 364 + CONFIG_CAN_RAW=m 365 + CONFIG_CAN_BCM=m 366 + 367 + # 368 + # CAN Device Drivers 369 + # 370 + CONFIG_CAN_VCAN=m 371 + # CONFIG_CAN_DEBUG_DEVICES is not set 372 # CONFIG_AF_RXRPC is not set 373 # CONFIG_RFKILL is not set 374 # CONFIG_NET_9P is not set ··· 389 CONFIG_BLK_DEV_RAM=y 390 CONFIG_BLK_DEV_RAM_COUNT=16 391 CONFIG_BLK_DEV_RAM_SIZE=4096 392 + CONFIG_BLK_DEV_XIP=y 393 # CONFIG_CDROM_PKTCDVD is not set 394 # CONFIG_ATA_OVER_ETH is not set 395 ··· 406 CONFIG_DASD_EER=y 407 CONFIG_MISC_DEVICES=y 408 # CONFIG_EEPROM_93CX6 is not set 409 + # CONFIG_ENCLOSURE_SERVICES is not set 410 411 # 412 # SCSI device support ··· 487 # CONFIG_IBM_NEW_EMAC_TAH is not set 488 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set 489 CONFIG_NETDEV_1000=y 490 + # CONFIG_E1000E_ENABLED is not set 491 CONFIG_NETDEV_10000=y 492 # CONFIG_TR is not set 493 # CONFIG_WAN is not set ··· 508 CONFIG_CCWGROUP=y 509 # CONFIG_PPP is not set 510 # CONFIG_SLIP is not set 511 # CONFIG_NETCONSOLE is not set 512 # CONFIG_NETPOLL is not set 513 # CONFIG_NET_POLL_CONTROLLER is not set ··· 558 CONFIG_MONWRITER=m 559 CONFIG_S390_VMUR=m 560 # CONFIG_POWER_SUPPLY is not set 561 + # CONFIG_THERMAL is not set 562 # CONFIG_WATCHDOG is not set 563 564 # ··· 584 # CONFIG_XFS_FS is not set 585 # CONFIG_GFS2_FS is not set 586 # CONFIG_OCFS2_FS is not set 587 + CONFIG_DNOTIFY=y 588 CONFIG_INOTIFY=y 589 CONFIG_INOTIFY_USER=y 590 # CONFIG_QUOTA is not set 591 # CONFIG_AUTOFS_FS is not set 592 # CONFIG_AUTOFS4_FS is not set 593 # CONFIG_FUSE_FS is not set ··· 632 # CONFIG_EFS_FS is not set 633 # CONFIG_CRAMFS is not set 634 # CONFIG_VXFS_FS is not set 635 + # CONFIG_MINIX_FS is not set 636 # CONFIG_HPFS_FS is not set 637 # CONFIG_QNX4FS_FS is not set 638 + # CONFIG_ROMFS_FS is not set 639 # CONFIG_SYSV_FS is not set 640 # CONFIG_UFS_FS is not set 641 CONFIG_NETWORK_FILESYSTEMS=y ··· 686 # CONFIG_NLS is not set 687 CONFIG_DLM=m 688 # CONFIG_DLM_DEBUG is not set 689 690 # 691 # Kernel hacking 692 # 693 CONFIG_TRACE_IRQFLAGS_SUPPORT=y 694 # CONFIG_PRINTK_TIME is not set 695 + CONFIG_ENABLE_WARN_DEPRECATED=y 696 CONFIG_ENABLE_MUST_CHECK=y 697 CONFIG_MAGIC_SYSRQ=y 698 # CONFIG_UNUSED_SYMBOLS is not set ··· 721 # CONFIG_DEBUG_INFO is not set 722 # CONFIG_DEBUG_VM is not set 723 # CONFIG_DEBUG_LIST is not set 724 + # CONFIG_DEBUG_SG is not set 725 # CONFIG_FRAME_POINTER is not set 726 CONFIG_FORCED_INLINING=y 727 # CONFIG_RCU_TORTURE_TEST is not set 728 + # CONFIG_KPROBES_SANITY_TEST is not set 729 + # CONFIG_BACKTRACE_SELF_TEST is not set 730 # CONFIG_LKDTM is not set 731 # CONFIG_FAULT_INJECTION is not set 732 + # CONFIG_LATENCYTOP is not set 733 CONFIG_SAMPLES=y 734 + # CONFIG_SAMPLE_KOBJECT is not set 735 + # CONFIG_DEBUG_PAGEALLOC is not set 736 737 # 738 # Security options ··· 738 CONFIG_CRYPTO_ALGAPI=y 739 CONFIG_CRYPTO_AEAD=m 740 CONFIG_CRYPTO_BLKCIPHER=y 741 + CONFIG_CRYPTO_SEQIV=m 742 CONFIG_CRYPTO_HASH=m 743 CONFIG_CRYPTO_MANAGER=y 744 CONFIG_CRYPTO_HMAC=m ··· 745 # CONFIG_CRYPTO_NULL is not set 746 # CONFIG_CRYPTO_MD4 is not set 747 CONFIG_CRYPTO_MD5=m 748 + CONFIG_CRYPTO_SHA1=m 749 # CONFIG_CRYPTO_SHA256 is not set 750 # CONFIG_CRYPTO_SHA512 is not set 751 # CONFIG_CRYPTO_WP512 is not set 752 # CONFIG_CRYPTO_TGR192 is not set 753 + CONFIG_CRYPTO_GF128MUL=m 754 CONFIG_CRYPTO_ECB=m 755 CONFIG_CRYPTO_CBC=y 756 CONFIG_CRYPTO_PCBC=m 757 # CONFIG_CRYPTO_LRW is not set 758 # CONFIG_CRYPTO_XTS is not set 759 + CONFIG_CRYPTO_CTR=m 760 + CONFIG_CRYPTO_GCM=m 761 + CONFIG_CRYPTO_CCM=m 762 # CONFIG_CRYPTO_CRYPTD is not set 763 # CONFIG_CRYPTO_DES is not set 764 CONFIG_CRYPTO_FCRYPT=m ··· 770 # CONFIG_CRYPTO_KHAZAD is not set 771 # CONFIG_CRYPTO_ANUBIS is not set 772 CONFIG_CRYPTO_SEED=m 773 + CONFIG_CRYPTO_SALSA20=m 774 # CONFIG_CRYPTO_DEFLATE is not set 775 # CONFIG_CRYPTO_MICHAEL_MIC is not set 776 # CONFIG_CRYPTO_CRC32C is not set 777 CONFIG_CRYPTO_CAMELLIA=m 778 # CONFIG_CRYPTO_TEST is not set 779 CONFIG_CRYPTO_AUTHENC=m 780 + CONFIG_CRYPTO_LZO=m 781 CONFIG_CRYPTO_HW=y 782 + CONFIG_ZCRYPT=m 783 + # CONFIG_ZCRYPT_MONOLITHIC is not set 784 # CONFIG_CRYPTO_SHA1_S390 is not set 785 # CONFIG_CRYPTO_SHA256_S390 is not set 786 # CONFIG_CRYPTO_DES_S390 is not set 787 # CONFIG_CRYPTO_AES_S390 is not set 788 CONFIG_S390_PRNG=m 789 790 # 791 # Library routines ··· 794 # CONFIG_CRC_ITU_T is not set 795 CONFIG_CRC32=m 796 CONFIG_CRC7=m 797 + CONFIG_LIBCRC32C=m 798 + CONFIG_LZO_COMPRESS=m 799 + CONFIG_LZO_DECOMPRESS=m 800 CONFIG_PLIST=y
+11
arch/s390/kernel/binfmt_elf32.c
··· 134 } 135 136 #include <asm/processor.h> 137 #include <linux/module.h> 138 #include <linux/elfcore.h> 139 #include <linux/binfmts.h> ··· 183 184 #undef start_thread 185 #define start_thread start_thread31 186 187 MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries," 188 " Copyright 2000 IBM Corporation");
··· 134 } 135 136 #include <asm/processor.h> 137 + #include <asm/pgalloc.h> 138 #include <linux/module.h> 139 #include <linux/elfcore.h> 140 #include <linux/binfmts.h> ··· 182 183 #undef start_thread 184 #define start_thread start_thread31 185 + 186 + static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw, 187 + unsigned long new_stackp) 188 + { 189 + set_fs(USER_DS); 190 + regs->psw.mask = psw_user32_bits; 191 + regs->psw.addr = new_psw; 192 + regs->gprs[15] = new_stackp; 193 + crst_table_downgrade(current->mm, 1UL << 31); 194 + } 195 196 MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries," 197 " Copyright 2000 IBM Corporation");
+20
arch/s390/kernel/compat_wrapper.S
··· 1712 sllg %r5,%r6,32 # get high word of 64bit loff_t 1713 l %r5,164(%r15) # get low word of 64bit loff_t 1714 jg sys_fallocate
··· 1712 sllg %r5,%r6,32 # get high word of 64bit loff_t 1713 l %r5,164(%r15) # get low word of 64bit loff_t 1714 jg sys_fallocate 1715 + 1716 + .globl sys_timerfd_create_wrapper 1717 + sys_timerfd_create_wrapper: 1718 + lgfr %r2,%r2 # int 1719 + lgfr %r3,%r3 # int 1720 + jg sys_timerfd_create 1721 + 1722 + .globl compat_sys_timerfd_settime_wrapper 1723 + compat_sys_timerfd_settime_wrapper: 1724 + lgfr %r2,%r2 # int 1725 + lgfr %r3,%r3 # int 1726 + llgtr %r4,%r4 # struct compat_itimerspec * 1727 + llgtr %r5,%r5 # struct compat_itimerspec * 1728 + jg compat_sys_timerfd_settime 1729 + 1730 + .globl compat_sys_timerfd_gettime_wrapper 1731 + compat_sys_timerfd_gettime_wrapper: 1732 + lgfr %r2,%r2 # int 1733 + llgtr %r3,%r3 # struct compat_itimerspec * 1734 + jg compat_sys_timerfd_gettime
-1
arch/s390/kernel/process.c
··· 29 #include <linux/slab.h> 30 #include <linux/vmalloc.h> 31 #include <linux/user.h> 32 - #include <linux/a.out.h> 33 #include <linux/interrupt.h> 34 #include <linux/delay.h> 35 #include <linux/reboot.h>
··· 29 #include <linux/slab.h> 30 #include <linux/vmalloc.h> 31 #include <linux/user.h> 32 #include <linux/interrupt.h> 33 #include <linux/delay.h> 34 #include <linux/reboot.h>
-1
arch/s390/kernel/setup.c
··· 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 - #include <linux/a.out.h> 28 #include <linux/tty.h> 29 #include <linux/ioport.h> 30 #include <linux/delay.h>
··· 24 #include <linux/ptrace.h> 25 #include <linux/slab.h> 26 #include <linux/user.h> 27 #include <linux/tty.h> 28 #include <linux/ioport.h> 29 #include <linux/delay.h>
+3
arch/s390/kernel/syscalls.S
··· 327 SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) 328 NI_SYSCALL /* 317 old sys_timer_fd */ 329 SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
··· 327 SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) 328 NI_SYSCALL /* 317 old sys_timer_fd */ 329 SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper) 330 + SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper) 331 + SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime_wrapper) /* 320 */ 332 + SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime_wrapper)
+2 -1
arch/s390/kernel/traps.c
··· 60 extern pgm_check_handler_t do_protection_exception; 61 extern pgm_check_handler_t do_dat_exception; 62 extern pgm_check_handler_t do_monitor_call; 63 64 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 65 ··· 731 pgm_check_table[0x12] = &translation_exception; 732 pgm_check_table[0x13] = &special_op_exception; 733 #ifdef CONFIG_64BIT 734 - pgm_check_table[0x38] = &do_dat_exception; 735 pgm_check_table[0x39] = &do_dat_exception; 736 pgm_check_table[0x3A] = &do_dat_exception; 737 pgm_check_table[0x3B] = &do_dat_exception;
··· 60 extern pgm_check_handler_t do_protection_exception; 61 extern pgm_check_handler_t do_dat_exception; 62 extern pgm_check_handler_t do_monitor_call; 63 + extern pgm_check_handler_t do_asce_exception; 64 65 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 66 ··· 730 pgm_check_table[0x12] = &translation_exception; 731 pgm_check_table[0x13] = &special_op_exception; 732 #ifdef CONFIG_64BIT 733 + pgm_check_table[0x38] = &do_asce_exception; 734 pgm_check_table[0x39] = &do_dat_exception; 735 pgm_check_table[0x3A] = &do_dat_exception; 736 pgm_check_table[0x3B] = &do_dat_exception;
+40
arch/s390/mm/fault.c
··· 32 #include <asm/system.h> 33 #include <asm/pgtable.h> 34 #include <asm/s390_ext.h> 35 36 #ifndef CONFIG_64BIT 37 #define __FAIL_ADDR_MASK 0x7ffff000 ··· 444 { 445 do_exception(regs, error_code & 0xff, 0); 446 } 447 448 #ifdef CONFIG_PFAULT 449 /*
··· 32 #include <asm/system.h> 33 #include <asm/pgtable.h> 34 #include <asm/s390_ext.h> 35 + #include <asm/mmu_context.h> 36 37 #ifndef CONFIG_64BIT 38 #define __FAIL_ADDR_MASK 0x7ffff000 ··· 443 { 444 do_exception(regs, error_code & 0xff, 0); 445 } 446 + 447 + #ifdef CONFIG_64BIT 448 + void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 449 + { 450 + struct mm_struct *mm; 451 + struct vm_area_struct *vma; 452 + unsigned long address; 453 + int space; 454 + 455 + mm = current->mm; 456 + address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; 457 + space = check_space(current); 458 + 459 + if (unlikely(space == 0 || in_atomic() || !mm)) 460 + goto no_context; 461 + 462 + local_irq_enable(); 463 + 464 + down_read(&mm->mmap_sem); 465 + vma = find_vma(mm, address); 466 + up_read(&mm->mmap_sem); 467 + 468 + if (vma) { 469 + update_mm(mm, current); 470 + return; 471 + } 472 + 473 + /* User mode accesses just cause a SIGSEGV */ 474 + if (regs->psw.mask & PSW_MASK_PSTATE) { 475 + current->thread.prot_addr = address; 476 + current->thread.trap_no = error_code; 477 + do_sigsegv(regs, error_code, SEGV_MAPERR, address); 478 + return; 479 + } 480 + 481 + no_context: 482 + do_no_context(regs, error_code, address); 483 + } 484 + #endif 485 486 #ifdef CONFIG_PFAULT 487 /*
+2 -1
arch/s390/mm/init.c
··· 112 init_mm.pgd = swapper_pg_dir; 113 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; 114 #ifdef CONFIG_64BIT 115 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 116 pgd_type = _REGION3_ENTRY_EMPTY; 117 #else ··· 185 pmd = pmd_offset(pud, address); 186 pte = pte_offset_kernel(pmd, address); 187 if (!enable) { 188 - ptep_invalidate(address, pte); 189 continue; 190 } 191 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
··· 112 init_mm.pgd = swapper_pg_dir; 113 S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; 114 #ifdef CONFIG_64BIT 115 + /* A three level page table (4TB) is enough for the kernel space. */ 116 S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 117 pgd_type = _REGION3_ENTRY_EMPTY; 118 #else ··· 184 pmd = pmd_offset(pud, address); 185 pte = pte_offset_kernel(pmd, address); 186 if (!enable) { 187 + ptep_invalidate(&init_mm, address, pte); 188 continue; 189 } 190 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+65
arch/s390/mm/mmap.c
··· 27 #include <linux/personality.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 31 /* 32 * Top of mmap area (just below the process stack). ··· 63 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; 64 } 65 66 /* 67 * This function, called very early during the creation of a new 68 * process VM image, sets up which VM layout function to use: ··· 87 } 88 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); 89
··· 27 #include <linux/personality.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 + #include <asm/pgalloc.h> 31 32 /* 33 * Top of mmap area (just below the process stack). ··· 62 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; 63 } 64 65 + #ifndef CONFIG_64BIT 66 + 67 /* 68 * This function, called very early during the creation of a new 69 * process VM image, sets up which VM layout function to use: ··· 84 } 85 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); 86 87 + #else 88 + 89 + static unsigned long 90 + s390_get_unmapped_area(struct file *filp, unsigned long addr, 91 + unsigned long len, unsigned long pgoff, unsigned long flags) 92 + { 93 + struct mm_struct *mm = current->mm; 94 + int rc; 95 + 96 + addr = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 97 + if (addr & ~PAGE_MASK) 98 + return addr; 99 + if (unlikely(mm->context.asce_limit < addr + len)) { 100 + rc = crst_table_upgrade(mm, addr + len); 101 + if (rc) 102 + return (unsigned long) rc; 103 + } 104 + return addr; 105 + } 106 + 107 + static unsigned long 108 + s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 109 + const unsigned long len, const unsigned long pgoff, 110 + const unsigned long flags) 111 + { 112 + struct mm_struct *mm = current->mm; 113 + unsigned long addr = addr0; 114 + int rc; 115 + 116 + addr = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 117 + if (addr & ~PAGE_MASK) 118 + return addr; 119 + if (unlikely(mm->context.asce_limit < addr + len)) { 120 + rc = crst_table_upgrade(mm, addr + len); 121 + if (rc) 122 + return (unsigned long) rc; 123 + } 124 + return addr; 125 + } 126 + /* 127 + * This function, called very early during the creation of a new 128 + * process VM image, sets up which VM layout function to use: 129 + */ 130 + void arch_pick_mmap_layout(struct mm_struct *mm) 131 + { 132 + /* 133 + * Fall back to the standard layout if the personality 134 + * bit is set, or if the expected stack growth is unlimited: 135 + */ 136 + if (mmap_is_legacy()) { 137 + mm->mmap_base = TASK_UNMAPPED_BASE; 138 + mm->get_unmapped_area = s390_get_unmapped_area; 139 + mm->unmap_area = arch_unmap_area; 140 + } else { 141 + mm->mmap_base = mmap_base(); 142 + mm->get_unmapped_area = s390_get_unmapped_area_topdown; 143 + mm->unmap_area = arch_unmap_area_topdown; 144 + } 145 + } 146 + EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); 147 + 148 + #endif
+157 -23
arch/s390/mm/pgtable.c
··· 23 #include <asm/pgalloc.h> 24 #include <asm/tlb.h> 25 #include <asm/tlbflush.h> 26 27 #ifndef CONFIG_64BIT 28 #define ALLOC_ORDER 1 29 #else 30 #define ALLOC_ORDER 2 31 #endif 32 33 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) ··· 52 } 53 page->index = page_to_phys(shadow); 54 } 55 return (unsigned long *) page_to_phys(page); 56 } 57 58 - void crst_table_free(unsigned long *table) 59 { 60 unsigned long *shadow = get_shadow_table(table); 61 62 if (shadow) 63 free_pages((unsigned long) shadow, ALLOC_ORDER); 64 free_pages((unsigned long) table, ALLOC_ORDER); 65 } 66 67 /* 68 * page table entry allocation/free routines. 69 */ 70 - unsigned long *page_table_alloc(int noexec) 71 { 72 - struct page *page = alloc_page(GFP_KERNEL); 73 unsigned long *table; 74 75 - if (!page) 76 - return NULL; 77 - page->index = 0; 78 - if (noexec) { 79 - struct page *shadow = alloc_page(GFP_KERNEL); 80 - if (!shadow) { 81 - __free_page(page); 82 - return NULL; 83 - } 84 - table = (unsigned long *) page_to_phys(shadow); 85 - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 86 - page->index = (addr_t) table; 87 } 88 - pgtable_page_ctor(page); 89 table = (unsigned long *) page_to_phys(page); 90 - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 91 return table; 92 } 93 94 - void page_table_free(unsigned long *table) 95 { 96 - unsigned long *shadow = get_shadow_pte(table); 97 98 - pgtable_page_dtor(virt_to_page(table)); 99 - if (shadow) 100 - free_page((unsigned long) shadow); 101 - free_page((unsigned long) table); 102 103 }
··· 23 #include <asm/pgalloc.h> 24 #include <asm/tlb.h> 25 #include <asm/tlbflush.h> 26 + #include <asm/mmu_context.h> 27 28 #ifndef CONFIG_64BIT 29 #define ALLOC_ORDER 1 30 + #define TABLES_PER_PAGE 4 31 + #define FRAG_MASK 15UL 32 + #define SECOND_HALVES 10UL 33 #else 34 #define ALLOC_ORDER 2 35 + #define TABLES_PER_PAGE 2 36 + #define FRAG_MASK 3UL 37 + #define SECOND_HALVES 2UL 38 #endif 39 40 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) ··· 45 } 46 page->index = page_to_phys(shadow); 47 } 48 + spin_lock(&mm->page_table_lock); 49 + list_add(&page->lru, &mm->context.crst_list); 50 + spin_unlock(&mm->page_table_lock); 51 return (unsigned long *) page_to_phys(page); 52 } 53 54 + void crst_table_free(struct mm_struct *mm, unsigned long *table) 55 { 56 unsigned long *shadow = get_shadow_table(table); 57 + struct page *page = virt_to_page(table); 58 59 + spin_lock(&mm->page_table_lock); 60 + list_del(&page->lru); 61 + spin_unlock(&mm->page_table_lock); 62 if (shadow) 63 free_pages((unsigned long) shadow, ALLOC_ORDER); 64 free_pages((unsigned long) table, ALLOC_ORDER); 65 } 66 67 + #ifdef CONFIG_64BIT 68 + int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 69 + { 70 + unsigned long *table, *pgd; 71 + unsigned long entry; 72 + 73 + BUG_ON(limit > (1UL << 53)); 74 + repeat: 75 + table = crst_table_alloc(mm, mm->context.noexec); 76 + if (!table) 77 + return -ENOMEM; 78 + spin_lock(&mm->page_table_lock); 79 + if (mm->context.asce_limit < limit) { 80 + pgd = (unsigned long *) mm->pgd; 81 + if (mm->context.asce_limit <= (1UL << 31)) { 82 + entry = _REGION3_ENTRY_EMPTY; 83 + mm->context.asce_limit = 1UL << 42; 84 + mm->context.asce_bits = _ASCE_TABLE_LENGTH | 85 + _ASCE_USER_BITS | 86 + _ASCE_TYPE_REGION3; 87 + } else { 88 + entry = _REGION2_ENTRY_EMPTY; 89 + mm->context.asce_limit = 1UL << 53; 90 + mm->context.asce_bits = _ASCE_TABLE_LENGTH | 91 + _ASCE_USER_BITS | 92 + _ASCE_TYPE_REGION2; 93 + } 94 + crst_table_init(table, entry); 95 + pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); 96 + mm->pgd = (pgd_t *) table; 97 + table = NULL; 98 + } 99 + spin_unlock(&mm->page_table_lock); 100 + if (table) 101 + crst_table_free(mm, table); 102 + if (mm->context.asce_limit < limit) 103 + goto repeat; 104 + update_mm(mm, current); 105 + return 0; 106 + } 107 + 108 + void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 109 + { 110 + pgd_t *pgd; 111 + 112 + if (mm->context.asce_limit <= limit) 113 + return; 114 + __tlb_flush_mm(mm); 115 + while (mm->context.asce_limit > limit) { 116 + pgd = mm->pgd; 117 + switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 118 + case _REGION_ENTRY_TYPE_R2: 119 + mm->context.asce_limit = 1UL << 42; 120 + mm->context.asce_bits = _ASCE_TABLE_LENGTH | 121 + _ASCE_USER_BITS | 122 + _ASCE_TYPE_REGION3; 123 + break; 124 + case _REGION_ENTRY_TYPE_R3: 125 + mm->context.asce_limit = 1UL << 31; 126 + mm->context.asce_bits = _ASCE_TABLE_LENGTH | 127 + _ASCE_USER_BITS | 128 + _ASCE_TYPE_SEGMENT; 129 + break; 130 + default: 131 + BUG(); 132 + } 133 + mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 134 + crst_table_free(mm, (unsigned long *) pgd); 135 + } 136 + update_mm(mm, current); 137 + } 138 + #endif 139 + 140 /* 141 * page table entry allocation/free routines. 142 */ 143 + unsigned long *page_table_alloc(struct mm_struct *mm) 144 { 145 + struct page *page; 146 unsigned long *table; 147 + unsigned long bits; 148 149 + bits = mm->context.noexec ? 3UL : 1UL; 150 + spin_lock(&mm->page_table_lock); 151 + page = NULL; 152 + if (!list_empty(&mm->context.pgtable_list)) { 153 + page = list_first_entry(&mm->context.pgtable_list, 154 + struct page, lru); 155 + if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 156 + page = NULL; 157 } 158 + if (!page) { 159 + spin_unlock(&mm->page_table_lock); 160 + page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 161 + if (!page) 162 + return NULL; 163 + pgtable_page_ctor(page); 164 + page->flags &= ~FRAG_MASK; 165 + table = (unsigned long *) page_to_phys(page); 166 + clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 167 + spin_lock(&mm->page_table_lock); 168 + list_add(&page->lru, &mm->context.pgtable_list); 169 + } 170 table = (unsigned long *) page_to_phys(page); 171 + while (page->flags & bits) { 172 + table += 256; 173 + bits <<= 1; 174 + } 175 + page->flags |= bits; 176 + if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 177 + list_move_tail(&page->lru, &mm->context.pgtable_list); 178 + spin_unlock(&mm->page_table_lock); 179 return table; 180 } 181 182 + void page_table_free(struct mm_struct *mm, unsigned long *table) 183 { 184 + struct page *page; 185 + unsigned long bits; 186 187 + bits = mm->context.noexec ? 3UL : 1UL; 188 + bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 189 + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 190 + spin_lock(&mm->page_table_lock); 191 + page->flags ^= bits; 192 + if (page->flags & FRAG_MASK) { 193 + /* Page now has some free pgtable fragments. */ 194 + list_move(&page->lru, &mm->context.pgtable_list); 195 + page = NULL; 196 + } else 197 + /* All fragments of the 4K page have been freed. */ 198 + list_del(&page->lru); 199 + spin_unlock(&mm->page_table_lock); 200 + if (page) { 201 + pgtable_page_dtor(page); 202 + __free_page(page); 203 + } 204 + } 205 206 + void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) 207 + { 208 + struct page *page; 209 + 210 + spin_lock(&mm->page_table_lock); 211 + /* Free shadow region and segment tables. */ 212 + list_for_each_entry(page, &mm->context.crst_list, lru) 213 + if (page->index) { 214 + free_pages((unsigned long) page->index, ALLOC_ORDER); 215 + page->index = 0; 216 + } 217 + /* "Free" second halves of page tables. */ 218 + list_for_each_entry(page, &mm->context.pgtable_list, lru) 219 + page->flags &= ~SECOND_HALVES; 220 + spin_unlock(&mm->page_table_lock); 221 + mm->context.noexec = 0; 222 + update_mm(mm, tsk); 223 }
+24 -4
arch/s390/mm/vmem.c
··· 69 return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 70 } 71 72 - #define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); }) 73 74 static inline pmd_t *vmem_pmd_alloc(void) 75 { ··· 96 return pmd; 97 } 98 99 - static inline pte_t *vmem_pte_alloc(void) 100 { 101 - pte_t *pte = vmem_alloc_pages(0); 102 103 if (!pte) 104 return NULL; 105 - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE); 106 return pte; 107 } 108 ··· 377 { 378 int i; 379 380 NODE_DATA(0)->node_mem_map = VMEM_MAP; 381 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 382 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
··· 69 return alloc_bootmem_pages((1 << order) * PAGE_SIZE); 70 } 71 72 + static inline pud_t *vmem_pud_alloc(void) 73 + { 74 + pud_t *pud = NULL; 75 + 76 + #ifdef CONFIG_64BIT 77 + pud = vmem_alloc_pages(2); 78 + if (!pud) 79 + return NULL; 80 + pud_val(*pud) = _REGION3_ENTRY_EMPTY; 81 + memcpy(pud + 1, pud, (PTRS_PER_PUD - 1)*sizeof(pud_t)); 82 + #endif 83 + return pud; 84 + } 85 86 static inline pmd_t *vmem_pmd_alloc(void) 87 { ··· 84 return pmd; 85 } 86 87 + static pte_t __init_refok *vmem_pte_alloc(void) 88 { 89 + pte_t *pte; 90 91 + if (slab_is_available()) 92 + pte = (pte_t *) page_table_alloc(&init_mm); 93 + else 94 + pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 95 if (!pte) 96 return NULL; 97 + clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 98 + PTRS_PER_PTE * sizeof(pte_t)); 99 return pte; 100 } 101 ··· 360 { 361 int i; 362 363 + INIT_LIST_HEAD(&init_mm.context.crst_list); 364 + INIT_LIST_HEAD(&init_mm.context.pgtable_list); 365 + init_mm.context.noexec = 0; 366 NODE_DATA(0)->node_mem_map = VMEM_MAP; 367 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) 368 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+18 -13
drivers/s390/char/sclp_vt220.c
··· 3 * SCLP VT220 terminal driver. 4 * 5 * S390 version 6 - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> 8 */ 9 ··· 632 else 633 free_bootmem((unsigned long) page, PAGE_SIZE); 634 } 635 } 636 637 static int __init __sclp_vt220_init(void) ··· 642 void *page; 643 int i; 644 int num_pages; 645 646 if (sclp_vt220_initialized) 647 return 0; ··· 671 } 672 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 673 } 674 - return 0; 675 } 676 677 static const struct tty_operations sclp_vt220_ops = { ··· 699 { 700 struct tty_driver *driver; 701 int rc; 702 703 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 704 * symmetry between VM and LPAR systems regarding ttyS1. */ 705 driver = alloc_tty_driver(1); 706 if (!driver) 707 return -ENOMEM; 708 rc = __sclp_vt220_init(); 709 if (rc) 710 goto out_driver; 711 - rc = sclp_register(&sclp_vt220_register); 712 - if (rc) { 713 - printk(KERN_ERR SCLP_VT220_PRINT_HEADER 714 - "could not register tty - " 715 - "sclp_register returned %d\n", rc); 716 - goto out_init; 717 - } 718 719 driver->owner = THIS_MODULE; 720 driver->driver_name = SCLP_VT220_DRIVER_NAME; ··· 727 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 728 "could not register tty - " 729 "tty_register_driver returned %d\n", rc); 730 - goto out_sclp; 731 } 732 sclp_vt220_driver = driver; 733 return 0; 734 735 - out_sclp: 736 - sclp_unregister(&sclp_vt220_register); 737 out_init: 738 - __sclp_vt220_cleanup(); 739 out_driver: 740 put_tty_driver(driver); 741 return rc;
··· 3 * SCLP VT220 terminal driver. 4 * 5 * S390 version 6 + * Copyright IBM Corp. 2003,2008 7 * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> 8 */ 9 ··· 632 else 633 free_bootmem((unsigned long) page, PAGE_SIZE); 634 } 635 + if (!list_empty(&sclp_vt220_register.list)) 636 + sclp_unregister(&sclp_vt220_register); 637 + sclp_vt220_initialized = 0; 638 } 639 640 static int __init __sclp_vt220_init(void) ··· 639 void *page; 640 int i; 641 int num_pages; 642 + int rc; 643 644 if (sclp_vt220_initialized) 645 return 0; ··· 667 } 668 list_add_tail((struct list_head *) page, &sclp_vt220_empty); 669 } 670 + rc = sclp_register(&sclp_vt220_register); 671 + if (rc) { 672 + printk(KERN_ERR SCLP_VT220_PRINT_HEADER 673 + "could not register vt220 - " 674 + "sclp_register returned %d\n", rc); 675 + __sclp_vt220_cleanup(); 676 + } 677 + return rc; 678 } 679 680 static const struct tty_operations sclp_vt220_ops = { ··· 688 { 689 struct tty_driver *driver; 690 int rc; 691 + int cleanup; 692 693 /* Note: we're not testing for CONSOLE_IS_SCLP here to preserve 694 * symmetry between VM and LPAR systems regarding ttyS1. */ 695 driver = alloc_tty_driver(1); 696 if (!driver) 697 return -ENOMEM; 698 + cleanup = !sclp_vt220_initialized; 699 rc = __sclp_vt220_init(); 700 if (rc) 701 goto out_driver; 702 703 driver->owner = THIS_MODULE; 704 driver->driver_name = SCLP_VT220_DRIVER_NAME; ··· 721 printk(KERN_ERR SCLP_VT220_PRINT_HEADER 722 "could not register tty - " 723 "tty_register_driver returned %d\n", rc); 724 + goto out_init; 725 } 726 sclp_vt220_driver = driver; 727 return 0; 728 729 out_init: 730 + if (cleanup) 731 + __sclp_vt220_cleanup(); 732 out_driver: 733 put_tty_driver(driver); 734 return rc;
+8 -10
drivers/s390/cio/qdio.c
··· 3189 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); 3190 3191 ccw_device_set_options_mask(cdev, 0); 3192 - result=ccw_device_start_timeout(cdev,&irq_ptr->ccw, 3193 - QDIO_DOING_ESTABLISH,0, 0, 3194 - QDIO_ESTABLISH_TIMEOUT); 3195 if (result) { 3196 - result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw, 3197 - QDIO_DOING_ESTABLISH,0,0, 3198 - QDIO_ESTABLISH_TIMEOUT); 3199 sprintf(dbf_text,"eq:io%4x",result); 3200 QDIO_DBF_TEXT2(1,setup,dbf_text); 3201 if (result2) { ··· 3217 return result; 3218 } 3219 3220 - /* Timeout is cared for already by using ccw_device_start_timeout(). */ 3221 - wait_event_interruptible(cdev->private->wait_q, 3222 - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 3223 - irq_ptr->state == QDIO_IRQ_STATE_ERR); 3224 3225 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) 3226 result = 0;
··· 3189 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); 3190 3191 ccw_device_set_options_mask(cdev, 0); 3192 + result = ccw_device_start(cdev, &irq_ptr->ccw, 3193 + QDIO_DOING_ESTABLISH, 0, 0); 3194 if (result) { 3195 + result2 = ccw_device_start(cdev, &irq_ptr->ccw, 3196 + QDIO_DOING_ESTABLISH, 0, 0); 3197 sprintf(dbf_text,"eq:io%4x",result); 3198 QDIO_DBF_TEXT2(1,setup,dbf_text); 3199 if (result2) { ··· 3219 return result; 3220 } 3221 3222 + wait_event_interruptible_timeout(cdev->private->wait_q, 3223 + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 3224 + irq_ptr->state == QDIO_IRQ_STATE_ERR, 3225 + QDIO_ESTABLISH_TIMEOUT); 3226 3227 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) 3228 result = 0;
+2 -2
drivers/s390/crypto/ap_bus.c
··· 61 MODULE_PARM_DESC(domain, "domain index for ap devices"); 62 EXPORT_SYMBOL(ap_domain_index); 63 64 - static int ap_thread_flag = 1; 65 module_param_named(poll_thread, ap_thread_flag, int, 0000); 66 - MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on)."); 67 68 static struct device *ap_root_device = NULL; 69 static DEFINE_SPINLOCK(ap_device_lock);
··· 61 MODULE_PARM_DESC(domain, "domain index for ap devices"); 62 EXPORT_SYMBOL(ap_domain_index); 63 64 + static int ap_thread_flag = 0; 65 module_param_named(poll_thread, ap_thread_flag, int, 0000); 66 + MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 67 68 static struct device *ap_root_device = NULL; 69 static DEFINE_SPINLOCK(ap_device_lock);
-32
include/asm-s390/a.out.h
··· 1 - /* 2 - * include/asm-s390/a.out.h 3 - * 4 - * S390 version 5 - * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 - * 7 - * Derived from "include/asm-i386/a.out.h" 8 - * Copyright (C) 1992, Linus Torvalds 9 - * 10 - * I don't think we'll ever need a.out ... 11 - */ 12 - 13 - #ifndef __S390_A_OUT_H__ 14 - #define __S390_A_OUT_H__ 15 - 16 - struct exec 17 - { 18 - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ 19 - unsigned a_text; /* length of text, in bytes */ 20 - unsigned a_data; /* length of data, in bytes */ 21 - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ 22 - unsigned a_syms; /* length of symbol table data in file, in bytes */ 23 - unsigned a_entry; /* start address */ 24 - unsigned a_trsize; /* length of relocation info for text, in bytes */ 25 - unsigned a_drsize; /* length of relocation info for data, in bytes */ 26 - }; 27 - 28 - #define N_TRSIZE(a) ((a).a_trsize) 29 - #define N_DRSIZE(a) ((a).a_drsize) 30 - #define N_SYMSIZE(a) ((a).a_syms) 31 - 32 - #endif /* __A_OUT_GNU_H__ */
···
+2 -2
include/asm-s390/bitops.h
··· 472 " brct %1,0b\n" 473 "1:\n" 474 #endif 475 - : "+a" (bytes), "+d" (size) 476 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 477 : "cc" ); 478 return bytes; ··· 507 " brct %1,0b\n" 508 "1:\n" 509 #endif 510 - : "+a" (bytes), "+a" (size) 511 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 512 : "cc" ); 513 return bytes;
··· 472 " brct %1,0b\n" 473 "1:\n" 474 #endif 475 + : "+&a" (bytes), "+&d" (size) 476 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 477 : "cc" ); 478 return bytes; ··· 507 " brct %1,0b\n" 508 "1:\n" 509 #endif 510 + : "+&a" (bytes), "+&a" (size) 511 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 512 : "cc" ); 513 return bytes;
+14 -8
include/asm-s390/elf.h
··· 115 116 #include <linux/sched.h> /* for task_struct */ 117 #include <asm/system.h> /* for save_access_regs */ 118 119 /* 120 * This is used to ensure we don't load something for the wrong architecture. ··· 138 use of this is to invoke "./ld.so someprog" to test out a new version of 139 the loader. We need to make sure that it is out of the way of the program 140 that it will "exec", and that there is sufficient room for the brk. */ 141 - 142 - #ifndef __s390x__ 143 - #define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \ 144 - ? TASK_SIZE / 3 * 2 \ 145 - : 2 * TASK_SIZE / 3) 146 - #else /* __s390x__ */ 147 - #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 148 - #endif /* __s390x__ */ 149 150 /* Wow, the "main" arch needs arch dependent functions too.. :) */ 151 ··· 207 clear_thread_flag(TIF_31BIT); \ 208 } while (0) 209 #endif /* __s390x__ */ 210 211 #endif
··· 115 116 #include <linux/sched.h> /* for task_struct */ 117 #include <asm/system.h> /* for save_access_regs */ 118 + #include <asm/mmu_context.h> 119 120 /* 121 * This is used to ensure we don't load something for the wrong architecture. ··· 137 use of this is to invoke "./ld.so someprog" to test out a new version of 138 the loader. We need to make sure that it is out of the way of the program 139 that it will "exec", and that there is sufficient room for the brk. */ 140 + #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) 141 142 /* Wow, the "main" arch needs arch dependent functions too.. :) */ 143 ··· 213 clear_thread_flag(TIF_31BIT); \ 214 } while (0) 215 #endif /* __s390x__ */ 216 + 217 + /* 218 + * An executable for which elf_read_implies_exec() returns TRUE will 219 + * have the READ_IMPLIES_EXEC personality flag set automatically. 220 + */ 221 + #define elf_read_implies_exec(ex, executable_stack) \ 222 + ({ \ 223 + if (current->mm->context.noexec && \ 224 + executable_stack != EXSTACK_DISABLE_X) \ 225 + disable_noexec(current->mm, current); \ 226 + current->mm->context.noexec == 0; \ 227 + }) 228 229 #endif
+7 -2
include/asm-s390/mmu.h
··· 1 #ifndef __MMU_H 2 #define __MMU_H 3 4 - /* Default "unsigned long" context */ 5 - typedef unsigned long mm_context_t; 6 7 #endif
··· 1 #ifndef __MMU_H 2 #define __MMU_H 3 4 + typedef struct { 5 + struct list_head crst_list; 6 + struct list_head pgtable_list; 7 + unsigned long asce_bits; 8 + unsigned long asce_limit; 9 + int noexec; 10 + } mm_context_t; 11 12 #endif
+12 -8
include/asm-s390/mmu_context.h
··· 10 #define __S390_MMU_CONTEXT_H 11 12 #include <asm/pgalloc.h> 13 #include <asm-generic/mm_hooks.h> 14 15 static inline int init_new_context(struct task_struct *tsk, 16 struct mm_struct *mm) 17 { 18 - mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 19 #ifdef CONFIG_64BIT 20 - mm->context |= _ASCE_TYPE_REGION3; 21 #endif 22 return 0; 23 } 24 ··· 36 37 static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 38 { 39 - S390_lowcore.user_asce = mm->context | __pa(mm->pgd); 40 if (switch_amode) { 41 /* Load primary space page table origin. */ 42 - pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd; 43 - S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd); 44 asm volatile(LCTL_OPCODE" 1,1,%0\n" 45 : : "m" (S390_lowcore.user_exec_asce) ); 46 } else 47 /* Load home space page table origin. */ 48 asm volatile(LCTL_OPCODE" 13,13,%0" 49 : : "m" (S390_lowcore.user_asce) ); 50 } 51 52 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 53 struct task_struct *tsk) 54 { 55 - if (unlikely(prev == next)) 56 - return; 57 cpu_set(smp_processor_id(), next->cpu_vm_mask); 58 update_mm(next, tsk); 59 } ··· 66 struct mm_struct *next) 67 { 68 switch_mm(prev, next, current); 69 - set_fs(current->thread.mm_segment); 70 } 71 72 #endif /* __S390_MMU_CONTEXT_H */
··· 10 #define __S390_MMU_CONTEXT_H 11 12 #include <asm/pgalloc.h> 13 + #include <asm/uaccess.h> 14 #include <asm-generic/mm_hooks.h> 15 16 static inline int init_new_context(struct task_struct *tsk, 17 struct mm_struct *mm) 18 { 19 + mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 20 #ifdef CONFIG_64BIT 21 + mm->context.asce_bits |= _ASCE_TYPE_REGION3; 22 #endif 23 + mm->context.noexec = s390_noexec; 24 + mm->context.asce_limit = STACK_TOP_MAX; 25 + crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 26 return 0; 27 } 28 ··· 32 33 static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 34 { 35 + pgd_t *pgd = mm->pgd; 36 + 37 + S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 38 if (switch_amode) { 39 /* Load primary space page table origin. */ 40 + pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; 41 + S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); 42 asm volatile(LCTL_OPCODE" 1,1,%0\n" 43 : : "m" (S390_lowcore.user_exec_asce) ); 44 } else 45 /* Load home space page table origin. */ 46 asm volatile(LCTL_OPCODE" 13,13,%0" 47 : : "m" (S390_lowcore.user_asce) ); 48 + set_fs(current->thread.mm_segment); 49 } 50 51 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 52 struct task_struct *tsk) 53 { 54 cpu_set(smp_processor_id(), next->cpu_vm_mask); 55 update_mm(next, tsk); 56 } ··· 61 struct mm_struct *next) 62 { 63 switch_mm(prev, next, current); 64 } 65 66 #endif /* __S390_MMU_CONTEXT_H */
+5 -31
include/asm-s390/page.h
··· 74 75 typedef struct { unsigned long pgprot; } pgprot_t; 76 typedef struct { unsigned long pte; } pte_t; 77 - 78 - #define pte_val(x) ((x).pte) 79 - #define pgprot_val(x) ((x).pgprot) 80 - 81 - #ifndef __s390x__ 82 - 83 typedef struct { unsigned long pmd; } pmd_t; 84 typedef struct { unsigned long pud; } pud_t; 85 - typedef struct { 86 - unsigned long pgd0; 87 - unsigned long pgd1; 88 - unsigned long pgd2; 89 - unsigned long pgd3; 90 - } pgd_t; 91 - 92 - #define pmd_val(x) ((x).pmd) 93 - #define pud_val(x) ((x).pud) 94 - #define pgd_val(x) ((x).pgd0) 95 - 96 - #else /* __s390x__ */ 97 - 98 - typedef struct { 99 - unsigned long pmd0; 100 - unsigned long pmd1; 101 - } pmd_t; 102 - typedef struct { unsigned long pud; } pud_t; 103 typedef struct { unsigned long pgd; } pgd_t; 104 105 - #define pmd_val(x) ((x).pmd0) 106 - #define pmd_val1(x) ((x).pmd1) 107 #define pud_val(x) ((x).pud) 108 #define pgd_val(x) ((x).pgd) 109 - 110 - #endif /* __s390x__ */ 111 - 112 - typedef struct page *pgtable_t; 113 114 #define __pte(x) ((pte_t) { (x) } ) 115 #define __pmd(x) ((pmd_t) { (x) } ) ··· 141 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 142 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 143 144 - #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 145 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 146 147 #include <asm-generic/memory_model.h>
··· 74 75 typedef struct { unsigned long pgprot; } pgprot_t; 76 typedef struct { unsigned long pte; } pte_t; 77 typedef struct { unsigned long pmd; } pmd_t; 78 typedef struct { unsigned long pud; } pud_t; 79 typedef struct { unsigned long pgd; } pgd_t; 80 + typedef pte_t *pgtable_t; 81 82 + #define pgprot_val(x) ((x).pgprot) 83 + #define pte_val(x) ((x).pte) 84 + #define pmd_val(x) ((x).pmd) 85 #define pud_val(x) ((x).pud) 86 #define pgd_val(x) ((x).pgd) 87 88 #define __pte(x) ((pte_t) { (x) } ) 89 #define __pmd(x) ((pmd_t) { (x) } ) ··· 167 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 168 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 169 170 + #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 172 173 #include <asm-generic/memory_model.h>
+64 -50
include/asm-s390/pgalloc.h
··· 20 #define check_pgt_cache() do {} while (0) 21 22 unsigned long *crst_table_alloc(struct mm_struct *, int); 23 - void crst_table_free(unsigned long *); 24 25 - unsigned long *page_table_alloc(int); 26 - void page_table_free(unsigned long *); 27 28 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 29 { ··· 73 74 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 75 { 76 - return _REGION3_ENTRY_EMPTY; 77 } 78 79 - #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) 80 - #define pud_free(mm, x) do { } while (0) 81 82 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 83 { 84 - unsigned long *crst = crst_table_alloc(mm, s390_noexec); 85 - if (crst) 86 - crst_table_init(crst, _SEGMENT_ENTRY_EMPTY); 87 - return (pmd_t *) crst; 88 } 89 - #define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd) 90 91 - #define pgd_populate(mm, pgd, pud) BUG() 92 - #define pgd_populate_kernel(mm, pgd, pud) BUG() 93 94 static inline void pud_populate_kernel(struct mm_struct *mm, 95 pud_t *pud, pmd_t *pmd) ··· 125 126 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 127 { 128 - pud_t *shadow_pud = get_shadow_table(pud); 129 - pmd_t *shadow_pmd = get_shadow_table(pmd); 130 - 131 - if (shadow_pud && shadow_pmd) 132 - pud_populate_kernel(mm, shadow_pud, shadow_pmd); 133 pud_populate_kernel(mm, pud, pmd); 134 } 135 136 #endif /* __s390x__ */ 137 138 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 139 { 140 - unsigned long *crst = crst_table_alloc(mm, s390_noexec); 141 - if (crst) 142 - crst_table_init(crst, pgd_entry_type(mm)); 143 - return (pgd_t *) crst; 144 } 145 - #define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd) 146 147 - static inline void 148 - pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) 149 { 150 - #ifndef __s390x__ 151 - pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte); 152 - pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256); 153 - pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512); 154 - pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768); 155 - #else /* __s390x__ */ 156 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 157 - pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256); 158 - #endif /* __s390x__ */ 159 } 160 161 - static inline void 162 - pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) 163 { 164 - pte_t *pte = (pte_t *)page_to_phys(page); 165 - pmd_t *shadow_pmd = get_shadow_table(pmd); 166 - pte_t *shadow_pte = get_shadow_pte(pte); 167 - 168 pmd_populate_kernel(mm, pmd, pte); 169 - if (shadow_pmd && shadow_pte) 170 - pmd_populate_kernel(mm, shadow_pmd, shadow_pte); 171 } 172 - #define pmd_pgtable(pmd) pmd_page(pmd) 173 174 /* 175 * page table entry allocation/free routines. 176 */ 177 - #define pte_alloc_one_kernel(mm, vmaddr) \ 178 - ((pte_t *) page_table_alloc(s390_noexec)) 179 - #define pte_alloc_one(mm, vmaddr) \ 180 - virt_to_page(page_table_alloc(s390_noexec)) 181 182 - #define pte_free_kernel(mm, pte) \ 183 - page_table_free((unsigned long *) pte) 184 - #define pte_free(mm, pte) \ 185 - page_table_free((unsigned long *) page_to_phys((struct page *) pte)) 186 187 #endif /* _S390_PGALLOC_H */
··· 20 #define check_pgt_cache() do {} while (0) 21 22 unsigned long *crst_table_alloc(struct mm_struct *, int); 23 + void crst_table_free(struct mm_struct *, unsigned long *); 24 25 + unsigned long *page_table_alloc(struct mm_struct *); 26 + void page_table_free(struct mm_struct *, unsigned long *); 27 + void disable_noexec(struct mm_struct *, struct task_struct *); 28 29 static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30 { ··· 72 73 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 74 { 75 + if (mm->context.asce_limit <= (1UL << 31)) 76 + return _SEGMENT_ENTRY_EMPTY; 77 + if (mm->context.asce_limit <= (1UL << 42)) 78 + return _REGION3_ENTRY_EMPTY; 79 + return _REGION2_ENTRY_EMPTY; 80 } 81 82 + int crst_table_upgrade(struct mm_struct *, unsigned long limit); 83 + void crst_table_downgrade(struct mm_struct *, unsigned long limit); 84 + 85 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 86 + { 87 + unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 88 + if (table) 89 + crst_table_init(table, _REGION3_ENTRY_EMPTY); 90 + return (pud_t *) table; 91 + } 92 + #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud) 93 94 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 95 { 96 + unsigned long *table = crst_table_alloc(mm, mm->context.noexec); 97 + if (table) 98 + crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 99 + return (pmd_t *) table; 100 } 101 + #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 102 103 + static inline void pgd_populate_kernel(struct mm_struct *mm, 104 + pgd_t *pgd, pud_t *pud) 105 + { 106 + pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud); 107 + } 108 + 109 + static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 110 + { 111 + pgd_populate_kernel(mm, pgd, pud); 112 + if (mm->context.noexec) { 113 + pgd = get_shadow_table(pgd); 114 + pud = get_shadow_table(pud); 115 + pgd_populate_kernel(mm, pgd, pud); 116 + } 117 + } 118 119 static inline void pud_populate_kernel(struct mm_struct *mm, 120 pud_t *pud, pmd_t *pmd) ··· 98 99 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 100 { 101 pud_populate_kernel(mm, pud, pmd); 102 + if (mm->context.noexec) { 103 + pud = get_shadow_table(pud); 104 + pmd = get_shadow_table(pmd); 105 + pud_populate_kernel(mm, pud, pmd); 106 + } 107 } 108 109 #endif /* __s390x__ */ 110 111 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 112 { 113 + INIT_LIST_HEAD(&mm->context.crst_list); 114 + INIT_LIST_HEAD(&mm->context.pgtable_list); 115 + return (pgd_t *) crst_table_alloc(mm, s390_noexec); 116 } 117 + #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 118 119 + static inline void pmd_populate_kernel(struct mm_struct *mm, 120 + pmd_t *pmd, pte_t *pte) 121 { 122 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte); 123 } 124 125 + static inline void pmd_populate(struct mm_struct *mm, 126 + pmd_t *pmd, pgtable_t pte) 127 { 128 pmd_populate_kernel(mm, pmd, pte); 129 + if (mm->context.noexec) { 130 + pmd = get_shadow_table(pmd); 131 + pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE); 132 + } 133 } 134 + 135 + #define pmd_pgtable(pmd) \ 136 + (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE) 137 138 /* 139 * page table entry allocation/free routines. 140 */ 141 + #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 142 + #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 143 144 + #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 145 + #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 146 147 #endif /* _S390_PGALLOC_H */
+110 -85
include/asm-s390/pgtable.h
··· 57 * PGDIR_SHIFT determines what a third-level page table entry can map 58 */ 59 #ifndef __s390x__ 60 - # define PMD_SHIFT 22 61 - # define PUD_SHIFT 22 62 - # define PGDIR_SHIFT 22 63 #else /* __s390x__ */ 64 - # define PMD_SHIFT 21 65 # define PUD_SHIFT 31 66 - # define PGDIR_SHIFT 31 67 #endif /* __s390x__ */ 68 69 #define PMD_SIZE (1UL << PMD_SHIFT) 70 #define PMD_MASK (~(PMD_SIZE-1)) 71 #define PUD_SIZE (1UL << PUD_SHIFT) 72 #define PUD_MASK (~(PUD_SIZE-1)) 73 - #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 74 - #define PGDIR_MASK (~(PGDIR_SIZE-1)) 75 76 /* 77 * entries per page directory level: the S390 is two-level, so ··· 79 * for S390 segment-table entries are combined to one PGD 80 * that leads to 1024 pte per pgd 81 */ 82 #ifndef __s390x__ 83 - # define PTRS_PER_PTE 1024 84 - # define PTRS_PER_PMD 1 85 - # define PTRS_PER_PUD 1 86 - # define PTRS_PER_PGD 512 87 #else /* __s390x__ */ 88 - # define PTRS_PER_PTE 512 89 - # define PTRS_PER_PMD 1024 90 - # define PTRS_PER_PUD 1 91 - # define PTRS_PER_PGD 2048 92 #endif /* __s390x__ */ 93 94 #define FIRST_USER_ADDRESS 0 95 ··· 125 * mapping. This needs to be calculated at compile time since the size of the 126 * VMEM_MAP is static but the size of struct page can change. 127 */ 128 - #define VMEM_MAX_PHYS min(VMALLOC_START, ((VMEM_MAP_END - VMALLOC_END) / \ 129 - sizeof(struct page) * PAGE_SIZE) & ~((16 << 20) - 1)) 130 #define VMEM_MAP ((struct page *) VMALLOC_END) 131 132 /* ··· 374 # define PxD_SHADOW_SHIFT 2 375 #endif /* __s390x__ */ 376 377 - static inline struct page *get_shadow_page(struct page *page) 378 - { 379 - if (s390_noexec && page->index) 380 - return virt_to_page((void *)(addr_t) page->index); 381 - return NULL; 382 - } 383 - 384 - static inline void *get_shadow_pte(void *table) 385 - { 386 - unsigned long addr, offset; 387 - struct page *page; 388 - 389 - addr = (unsigned long) table; 390 - offset = addr & (PAGE_SIZE - 1); 391 - page = virt_to_page((void *)(addr ^ offset)); 392 - return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); 393 - } 394 - 395 static inline void *get_shadow_table(void *table) 396 { 397 unsigned long addr, offset; ··· 391 * hook is made available. 392 */ 393 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 394 - pte_t *pteptr, pte_t pteval) 395 { 396 - pte_t *shadow_pte = get_shadow_pte(pteptr); 397 - 398 - *pteptr = pteval; 399 - if (shadow_pte) { 400 - if (!(pte_val(pteval) & _PAGE_INVALID) && 401 - (pte_val(pteval) & _PAGE_SWX)) 402 - pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; 403 else 404 - pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 405 } 406 } 407 ··· 419 420 #else /* __s390x__ */ 421 422 - static inline int pgd_present(pgd_t pgd) { return 1; } 423 - static inline int pgd_none(pgd_t pgd) { return 0; } 424 - static inline int pgd_bad(pgd_t pgd) { return 0; } 425 426 static inline int pud_present(pud_t pud) 427 { 428 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 429 } 430 431 static inline int pud_none(pud_t pud) 432 { 433 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 434 } 435 436 static inline int pud_bad(pud_t pud) 437 { 438 - unsigned long mask = ~_REGION_ENTRY_ORIGIN & ~_REGION_ENTRY_INV; 439 - return (pud_val(pud) & mask) != _REGION3_ENTRY; 440 } 441 442 #endif /* __s390x__ */ ··· 549 #define pgd_clear(pgd) do { } while (0) 550 #define pud_clear(pud) do { } while (0) 551 552 - static inline void pmd_clear_kernel(pmd_t * pmdp) 553 - { 554 - pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY; 555 - pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY; 556 - pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY; 557 - pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY; 558 - } 559 - 560 #else /* __s390x__ */ 561 562 - #define pgd_clear(pgd) do { } while (0) 563 564 static inline void pud_clear_kernel(pud_t *pud) 565 { 566 - pud_val(*pud) = _REGION3_ENTRY_EMPTY; 567 } 568 569 - static inline void pud_clear(pud_t * pud) 570 { 571 pud_t *shadow = get_shadow_table(pud); 572 ··· 581 pud_clear_kernel(shadow); 582 } 583 584 static inline void pmd_clear_kernel(pmd_t * pmdp) 585 { 586 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 587 - pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY; 588 } 589 590 - #endif /* __s390x__ */ 591 - 592 - static inline void pmd_clear(pmd_t * pmdp) 593 { 594 - pmd_t *shadow_pmd = get_shadow_table(pmdp); 595 596 - pmd_clear_kernel(pmdp); 597 - if (shadow_pmd) 598 - pmd_clear_kernel(shadow_pmd); 599 } 600 601 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 602 { 603 - pte_t *shadow_pte = get_shadow_pte(ptep); 604 - 605 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 606 - if (shadow_pte) 607 - pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; 608 } 609 610 /* ··· 682 { 683 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 684 #ifndef __s390x__ 685 - /* S390 has 1mb segments, we are emulating 4MB segments */ 686 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 687 #else 688 /* ipte in zarch mode can do the math */ ··· 696 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 697 } 698 699 - static inline void ptep_invalidate(unsigned long address, pte_t *ptep) 700 { 701 __ptep_ipte(address, ptep); 702 - ptep = get_shadow_pte(ptep); 703 - if (ptep) 704 - __ptep_ipte(address, ptep); 705 } 706 707 /* ··· 723 pte_t __pte = *(__ptep); \ 724 if (atomic_read(&(__mm)->mm_users) > 1 || \ 725 (__mm) != current->active_mm) \ 726 - ptep_invalidate(__address, __ptep); \ 727 else \ 728 pte_clear((__mm), (__address), (__ptep)); \ 729 __pte; \ ··· 734 unsigned long address, pte_t *ptep) 735 { 736 pte_t pte = *ptep; 737 - ptep_invalidate(address, ptep); 738 return pte; 739 } 740 ··· 755 if (full) 756 pte_clear(mm, addr, ptep); 757 else 758 - ptep_invalidate(addr, ptep); 759 return pte; 760 } 761 ··· 766 if (pte_write(__pte)) { \ 767 if (atomic_read(&(__mm)->mm_users) > 1 || \ 768 (__mm) != current->active_mm) \ 769 - ptep_invalidate(__addr, __ptep); \ 770 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 771 } \ 772 }) ··· 776 ({ \ 777 int __changed = !pte_same(*(__ptep), __entry); \ 778 if (__changed) { \ 779 - ptep_invalidate(__addr, __ptep); \ 780 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 781 } \ 782 __changed; \ ··· 857 858 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 859 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 860 - #define pgd_deref(pgd) ({ BUG(); 0UL; }) 861 862 - #define pud_offset(pgd, address) ((pud_t *) pgd) 863 864 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 865 { 866 - pmd_t *pmd = (pmd_t *) pud_deref(*pud); 867 return pmd + pmd_index(address); 868 } 869
··· 57 * PGDIR_SHIFT determines what a third-level page table entry can map 58 */ 59 #ifndef __s390x__ 60 + # define PMD_SHIFT 20 61 + # define PUD_SHIFT 20 62 + # define PGDIR_SHIFT 20 63 #else /* __s390x__ */ 64 + # define PMD_SHIFT 20 65 # define PUD_SHIFT 31 66 + # define PGDIR_SHIFT 42 67 #endif /* __s390x__ */ 68 69 #define PMD_SIZE (1UL << PMD_SHIFT) 70 #define PMD_MASK (~(PMD_SIZE-1)) 71 #define PUD_SIZE (1UL << PUD_SHIFT) 72 #define PUD_MASK (~(PUD_SIZE-1)) 73 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 74 + #define PGDIR_MASK (~(PGDIR_SIZE-1)) 75 76 /* 77 * entries per page directory level: the S390 is two-level, so ··· 79 * for S390 segment-table entries are combined to one PGD 80 * that leads to 1024 pte per pgd 81 */ 82 + #define PTRS_PER_PTE 256 83 #ifndef __s390x__ 84 + #define PTRS_PER_PMD 1 85 + #define PTRS_PER_PUD 1 86 #else /* __s390x__ */ 87 + #define PTRS_PER_PMD 2048 88 + #define PTRS_PER_PUD 2048 89 #endif /* __s390x__ */ 90 + #define PTRS_PER_PGD 2048 91 92 #define FIRST_USER_ADDRESS 0 93 ··· 127 * mapping. This needs to be calculated at compile time since the size of the 128 * VMEM_MAP is static but the size of struct page can change. 129 */ 130 + #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) 131 + #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) 132 + #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) 133 #define VMEM_MAP ((struct page *) VMALLOC_END) 134 135 /* ··· 375 # define PxD_SHADOW_SHIFT 2 376 #endif /* __s390x__ */ 377 378 static inline void *get_shadow_table(void *table) 379 { 380 unsigned long addr, offset; ··· 410 * hook is made available. 411 */ 412 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 413 + pte_t *ptep, pte_t entry) 414 { 415 + *ptep = entry; 416 + if (mm->context.noexec) { 417 + if (!(pte_val(entry) & _PAGE_INVALID) && 418 + (pte_val(entry) & _PAGE_SWX)) 419 + pte_val(entry) |= _PAGE_RO; 420 else 421 + pte_val(entry) = _PAGE_TYPE_EMPTY; 422 + ptep[PTRS_PER_PTE] = entry; 423 } 424 } 425 ··· 439 440 #else /* __s390x__ */ 441 442 + static inline int pgd_present(pgd_t pgd) 443 + { 444 + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 445 + return 1; 446 + return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; 447 + } 448 + 449 + static inline int pgd_none(pgd_t pgd) 450 + { 451 + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 452 + return 0; 453 + return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 454 + } 455 + 456 + static inline int pgd_bad(pgd_t pgd) 457 + { 458 + /* 459 + * With dynamic page table levels the pgd can be a region table 460 + * entry or a segment table entry. Check for the bit that are 461 + * invalid for either table entry. 462 + */ 463 + unsigned long mask = 464 + ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 465 + ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 466 + return (pgd_val(pgd) & mask) != 0; 467 + } 468 469 static inline int pud_present(pud_t pud) 470 { 471 + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 472 + return 1; 473 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; 474 } 475 476 static inline int pud_none(pud_t pud) 477 { 478 + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 479 + return 0; 480 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 481 } 482 483 static inline int pud_bad(pud_t pud) 484 { 485 + /* 486 + * With dynamic page table levels the pud can be a region table 487 + * entry or a segment table entry. Check for the bit that are 488 + * invalid for either table entry. 489 + */ 490 + unsigned long mask = 491 + ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 492 + ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 493 + return (pud_val(pud) & mask) != 0; 494 } 495 496 #endif /* __s390x__ */ ··· 535 #define pgd_clear(pgd) do { } while (0) 536 #define pud_clear(pud) do { } while (0) 537 538 #else /* __s390x__ */ 539 540 + static inline void pgd_clear_kernel(pgd_t * pgd) 541 + { 542 + if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 543 + pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 544 + } 545 + 546 + static inline void pgd_clear(pgd_t * pgd) 547 + { 548 + pgd_t *shadow = get_shadow_table(pgd); 549 + 550 + pgd_clear_kernel(pgd); 551 + if (shadow) 552 + pgd_clear_kernel(shadow); 553 + } 554 555 static inline void pud_clear_kernel(pud_t *pud) 556 { 557 + if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 558 + pud_val(*pud) = _REGION3_ENTRY_EMPTY; 559 } 560 561 + static inline void pud_clear(pud_t *pud) 562 { 563 pud_t *shadow = get_shadow_table(pud); 564 ··· 561 pud_clear_kernel(shadow); 562 } 563 564 + #endif /* __s390x__ */ 565 + 566 static inline void pmd_clear_kernel(pmd_t * pmdp) 567 { 568 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 569 } 570 571 + static inline void pmd_clear(pmd_t *pmd) 572 { 573 + pmd_t *shadow = get_shadow_table(pmd); 574 575 + pmd_clear_kernel(pmd); 576 + if (shadow) 577 + pmd_clear_kernel(shadow); 578 } 579 580 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 581 { 582 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 583 + if (mm->context.noexec) 584 + pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; 585 } 586 587 /* ··· 665 { 666 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 667 #ifndef __s390x__ 668 + /* pto must point to the start of the segment table */ 669 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 670 #else 671 /* ipte in zarch mode can do the math */ ··· 679 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 680 } 681 682 + static inline void ptep_invalidate(struct mm_struct *mm, 683 + unsigned long address, pte_t *ptep) 684 { 685 __ptep_ipte(address, ptep); 686 + if (mm->context.noexec) 687 + __ptep_ipte(address, ptep + PTRS_PER_PTE); 688 } 689 690 /* ··· 706 pte_t __pte = *(__ptep); \ 707 if (atomic_read(&(__mm)->mm_users) > 1 || \ 708 (__mm) != current->active_mm) \ 709 + ptep_invalidate(__mm, __address, __ptep); \ 710 else \ 711 pte_clear((__mm), (__address), (__ptep)); \ 712 __pte; \ ··· 717 unsigned long address, pte_t *ptep) 718 { 719 pte_t pte = *ptep; 720 + ptep_invalidate(vma->vm_mm, address, ptep); 721 return pte; 722 } 723 ··· 738 if (full) 739 pte_clear(mm, addr, ptep); 740 else 741 + ptep_invalidate(mm, addr, ptep); 742 return pte; 743 } 744 ··· 749 if (pte_write(__pte)) { \ 750 if (atomic_read(&(__mm)->mm_users) > 1 || \ 751 (__mm) != current->active_mm) \ 752 + ptep_invalidate(__mm, __addr, __ptep); \ 753 set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ 754 } \ 755 }) ··· 759 ({ \ 760 int __changed = !pte_same(*(__ptep), __entry); \ 761 if (__changed) { \ 762 + ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ 763 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ 764 } \ 765 __changed; \ ··· 840 841 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 842 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 843 + #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) 844 845 + static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) 846 + { 847 + pud_t *pud = (pud_t *) pgd; 848 + if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 849 + pud = (pud_t *) pgd_deref(*pgd); 850 + return pud + pud_index(address); 851 + } 852 853 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) 854 { 855 + pmd_t *pmd = (pmd_t *) pud; 856 + if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 857 + pmd = (pmd_t *) pud_deref(*pud); 858 return pmd + pmd_index(address); 859 } 860
+15 -30
include/asm-s390/processor.h
··· 64 */ 65 #ifndef __s390x__ 66 67 - # define TASK_SIZE (0x80000000UL) 68 - # define TASK_UNMAPPED_BASE (TASK_SIZE / 2) 69 - # define DEFAULT_TASK_SIZE (0x80000000UL) 70 71 #else /* __s390x__ */ 72 73 - # define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \ 74 - (0x80000000UL) : (0x40000000000UL)) 75 - # define TASK_SIZE TASK_SIZE_OF(current) 76 - # define TASK_UNMAPPED_BASE (TASK_SIZE / 2) 77 - # define DEFAULT_TASK_SIZE (0x40000000000UL) 78 79 #endif /* __s390x__ */ 80 81 #ifdef __KERNEL__ 82 83 - #define STACK_TOP TASK_SIZE 84 - #define STACK_TOP_MAX DEFAULT_TASK_SIZE 85 86 #endif 87 ··· 143 /* 144 * Do necessary setup to start up a new thread. 145 */ 146 - #ifndef __s390x__ 147 - 148 #define start_thread(regs, new_psw, new_stackp) do { \ 149 set_fs(USER_DS); \ 150 regs->psw.mask = psw_user_bits; \ 151 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 152 regs->gprs[15] = new_stackp ; \ 153 } while (0) 154 - 155 - #else /* __s390x__ */ 156 - 157 - #define start_thread(regs, new_psw, new_stackp) do { \ 158 - set_fs(USER_DS); \ 159 - regs->psw.mask = psw_user_bits; \ 160 - regs->psw.addr = new_psw; \ 161 - regs->gprs[15] = new_stackp; \ 162 - } while (0) 163 - 164 - #define start_thread31(regs, new_psw, new_stackp) do { \ 165 - set_fs(USER_DS); \ 166 - regs->psw.mask = psw_user32_bits; \ 167 - regs->psw.addr = new_psw; \ 168 - regs->gprs[15] = new_stackp; \ 169 - } while (0) 170 - 171 - #endif /* __s390x__ */ 172 173 /* Forward declaration, a strange C thing */ 174 struct task_struct;
··· 64 */ 65 #ifndef __s390x__ 66 67 + #define TASK_SIZE (1UL << 31) 68 + #define TASK_UNMAPPED_BASE (1UL << 30) 69 70 #else /* __s390x__ */ 71 72 + #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,TIF_31BIT) ? \ 73 + (1UL << 31) : (1UL << 53)) 74 + #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 75 + (1UL << 30) : (1UL << 41)) 76 + #define TASK_SIZE TASK_SIZE_OF(current) 77 78 #endif /* __s390x__ */ 79 80 #ifdef __KERNEL__ 81 82 + #ifndef __s390x__ 83 + #define STACK_TOP (1UL << 31) 84 + #define STACK_TOP_MAX (1UL << 31) 85 + #else /* __s390x__ */ 86 + #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) 87 + #define STACK_TOP_MAX (1UL << 42) 88 + #endif /* __s390x__ */ 89 + 90 91 #endif 92 ··· 138 /* 139 * Do necessary setup to start up a new thread. 140 */ 141 #define start_thread(regs, new_psw, new_stackp) do { \ 142 set_fs(USER_DS); \ 143 regs->psw.mask = psw_user_bits; \ 144 regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ 145 regs->gprs[15] = new_stackp ; \ 146 } while (0) 147 148 /* Forward declaration, a strange C thing */ 149 struct task_struct;
+37 -12
include/asm-s390/tlb.h
··· 38 struct mm_struct *mm; 39 unsigned int fullmm; 40 unsigned int nr_ptes; 41 - unsigned int nr_pmds; 42 void *array[TLB_NR_PTRS]; 43 }; 44 ··· 53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || 54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); 55 tlb->nr_ptes = 0; 56 - tlb->nr_pmds = TLB_NR_PTRS; 57 if (tlb->fullmm) 58 __tlb_flush_mm(mm); 59 return tlb; ··· 62 static inline void tlb_flush_mmu(struct mmu_gather *tlb, 63 unsigned long start, unsigned long end) 64 { 65 - if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS)) 66 __tlb_flush_mm(tlb->mm); 67 while (tlb->nr_ptes > 0) 68 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); 69 - while (tlb->nr_pmds < TLB_NR_PTRS) 70 - pmd_free(tlb->mm, (pmd_t *) tlb->array[tlb->nr_pmds++]); 71 } 72 73 static inline void tlb_finish_mmu(struct mmu_gather *tlb, ··· 96 * pte_free_tlb frees a pte table and clears the CRSTE for the 97 * page table from the tlb. 98 */ 99 - static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) 100 { 101 if (!tlb->fullmm) { 102 - tlb->array[tlb->nr_ptes++] = page; 103 - if (tlb->nr_ptes >= tlb->nr_pmds) 104 tlb_flush_mmu(tlb, 0, 0); 105 } else 106 - pte_free(tlb->mm, page); 107 } 108 109 /* 110 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 111 * segment table entry from the tlb. 112 */ 113 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 114 { 115 #ifdef __s390x__ 116 if (!tlb->fullmm) { 117 - tlb->array[--tlb->nr_pmds] = (struct page *) pmd; 118 - if (tlb->nr_ptes >= tlb->nr_pmds) 119 tlb_flush_mmu(tlb, 0, 0); 120 } else 121 pmd_free(tlb->mm, pmd); 122 #endif 123 } 124 125 - #define pud_free_tlb(tlb, pud) do { } while (0) 126 127 #define tlb_start_vma(tlb, vma) do { } while (0) 128 #define tlb_end_vma(tlb, vma) do { } while (0)
··· 38 struct mm_struct *mm; 39 unsigned int fullmm; 40 unsigned int nr_ptes; 41 + unsigned int nr_pxds; 42 void *array[TLB_NR_PTRS]; 43 }; 44 ··· 53 tlb->fullmm = full_mm_flush || (num_online_cpus() == 1) || 54 (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm); 55 tlb->nr_ptes = 0; 56 + tlb->nr_pxds = TLB_NR_PTRS; 57 if (tlb->fullmm) 58 __tlb_flush_mm(mm); 59 return tlb; ··· 62 static inline void tlb_flush_mmu(struct mmu_gather *tlb, 63 unsigned long start, unsigned long end) 64 { 65 + if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS)) 66 __tlb_flush_mm(tlb->mm); 67 while (tlb->nr_ptes > 0) 68 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]); 69 + while (tlb->nr_pxds < TLB_NR_PTRS) 70 + /* pgd_free frees the pointer as region or segment table */ 71 + pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]); 72 } 73 74 static inline void tlb_finish_mmu(struct mmu_gather *tlb, ··· 95 * pte_free_tlb frees a pte table and clears the CRSTE for the 96 * page table from the tlb. 97 */ 98 + static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) 99 { 100 if (!tlb->fullmm) { 101 + tlb->array[tlb->nr_ptes++] = pte; 102 + if (tlb->nr_ptes >= tlb->nr_pxds) 103 tlb_flush_mmu(tlb, 0, 0); 104 } else 105 + pte_free(tlb->mm, pte); 106 } 107 108 /* 109 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 110 * segment table entry from the tlb. 111 + * If the mm uses a two level page table the single pmd is freed 112 + * as the pgd. pmd_free_tlb checks the asce_limit against 2GB 113 + * to avoid the double free of the pmd in this case. 114 */ 115 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 116 { 117 #ifdef __s390x__ 118 + if (tlb->mm->context.asce_limit <= (1UL << 31)) 119 + return; 120 if (!tlb->fullmm) { 121 + tlb->array[--tlb->nr_pxds] = pmd; 122 + if (tlb->nr_ptes >= tlb->nr_pxds) 123 tlb_flush_mmu(tlb, 0, 0); 124 } else 125 pmd_free(tlb->mm, pmd); 126 #endif 127 } 128 129 + /* 130 + * pud_free_tlb frees a pud table and clears the CRSTE for the 131 + * region third table entry from the tlb. 132 + * If the mm uses a three level page table the single pud is freed 133 + * as the pgd. pud_free_tlb checks the asce_limit against 4TB 134 + * to avoid the double free of the pud in this case. 135 + */ 136 + static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 137 + { 138 + #ifdef __s390x__ 139 + if (tlb->mm->context.asce_limit <= (1UL << 42)) 140 + return; 141 + if (!tlb->fullmm) { 142 + tlb->array[--tlb->nr_pxds] = pud; 143 + if (tlb->nr_ptes >= tlb->nr_pxds) 144 + tlb_flush_mmu(tlb, 0, 0); 145 + } else 146 + pud_free(tlb->mm, pud); 147 + #endif 148 + } 149 150 #define tlb_start_vma(tlb, vma) do { } while (0) 151 #define tlb_end_vma(tlb, vma) do { } while (0)
+6 -5
include/asm-s390/tlbflush.h
··· 61 * only ran on the local cpu. 62 */ 63 if (MACHINE_HAS_IDTE) { 64 - pgd_t *shadow = get_shadow_table(mm->pgd); 65 - 66 - if (shadow) 67 - __tlb_flush_idte((unsigned long) shadow | mm->context); 68 - __tlb_flush_idte((unsigned long) mm->pgd | mm->context); 69 return; 70 } 71 preempt_disable();
··· 61 * only ran on the local cpu. 62 */ 63 if (MACHINE_HAS_IDTE) { 64 + if (mm->context.noexec) 65 + __tlb_flush_idte((unsigned long) 66 + get_shadow_table(mm->pgd) | 67 + mm->context.asce_bits); 68 + __tlb_flush_idte((unsigned long) mm->pgd | 69 + mm->context.asce_bits); 70 return; 71 } 72 preempt_disable();
+4 -1
include/asm-s390/unistd.h
··· 256 #define __NR_signalfd 316 257 #define __NR_timerfd 317 258 #define __NR_eventfd 318 259 - #define NR_syscalls 319 260 261 /* 262 * There are some system calls that are not present on 64 bit, some
··· 256 #define __NR_signalfd 316 257 #define __NR_timerfd 317 258 #define __NR_eventfd 318 259 + #define __NR_timerfd_create 319 260 + #define __NR_timerfd_settime 320 261 + #define __NR_timerfd_gettime 321 262 + #define NR_syscalls 322 263 264 /* 265 * There are some system calls that are not present on 64 bit, some