Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: remove 31 bit support

Remove the 31 bit support in order to reduce maintenance cost and
effectively remove dead code. Since a couple of years there is no
distribution left that comes with a 31 bit kernel.

The 31 bit kernel also has been broken since more than a year before
anybody noticed. In addition I added a removal warning to the kernel
shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning
message") which let everybody know about the plan to remove 31 bit
code. We didn't get any response.

Given that the last 31 bit only machine was introduced in 1999 let's
remove the code.
Anybody with 31 bit user space code can still use the compat mode.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Heiko Carstens and committed by
Martin Schwidefsky
5a79859a 1833c9f6

+170 -6016
-1
arch/s390/Kbuild
··· 4 4 obj-$(CONFIG_CRYPTO_HW) += crypto/ 5 5 obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ 6 6 obj-$(CONFIG_APPLDATA_BASE) += appldata/ 7 - obj-$(CONFIG_MATHEMU) += math-emu/ 8 7 obj-y += net/ 9 8 obj-$(CONFIG_PCI) += pci/
+21 -58
arch/s390/Kconfig
··· 35 35 def_bool y 36 36 37 37 config ARCH_DMA_ADDR_T_64BIT 38 - def_bool 64BIT 38 + def_bool y 39 39 40 40 config GENERIC_LOCKBREAK 41 41 def_bool y if SMP && PREEMPT ··· 59 59 def_bool n 60 60 61 61 config ARCH_SUPPORTS_UPROBES 62 - def_bool 64BIT 62 + def_bool y 63 63 64 64 config S390 65 65 def_bool y ··· 110 110 select GENERIC_TIME_VSYSCALL 111 111 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 112 112 select HAVE_ARCH_AUDITSYSCALL 113 - select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 113 + select HAVE_ARCH_JUMP_LABEL 114 114 select HAVE_ARCH_SECCOMP_FILTER 115 115 select HAVE_ARCH_TRACEHOOK 116 - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT 117 - select HAVE_BPF_JIT if 64BIT && PACK_STACK 116 + select HAVE_ARCH_TRANSPARENT_HUGEPAGE 117 + select HAVE_BPF_JIT if PACK_STACK 118 118 select HAVE_CMPXCHG_DOUBLE 119 119 select HAVE_CMPXCHG_LOCAL 120 120 select HAVE_DEBUG_KMEMLEAK 121 - select HAVE_DYNAMIC_FTRACE if 64BIT 122 - select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT 121 + select HAVE_DYNAMIC_FTRACE 122 + select HAVE_DYNAMIC_FTRACE_WITH_REGS 123 123 select HAVE_FTRACE_MCOUNT_RECORD 124 - select HAVE_FUNCTION_GRAPH_TRACER if 64BIT 125 - select HAVE_FUNCTION_TRACER if 64BIT 124 + select HAVE_FUNCTION_GRAPH_TRACER 125 + select HAVE_FUNCTION_TRACER 126 126 select HAVE_FUTEX_CMPXCHG if FUTEX 127 127 select HAVE_KERNEL_BZIP2 128 128 select HAVE_KERNEL_GZIP ··· 132 132 select HAVE_KERNEL_XZ 133 133 select HAVE_KPROBES 134 134 select HAVE_KRETPROBES 135 - select HAVE_KVM if 64BIT 135 + select HAVE_KVM 136 136 select HAVE_MEMBLOCK 137 137 select HAVE_MEMBLOCK_NODE_MAP 138 138 select HAVE_MEMBLOCK_PHYS_MAP ··· 141 141 select HAVE_PERF_EVENTS 142 142 select HAVE_REGS_AND_STACK_ACCESS_API 143 143 select HAVE_SYSCALL_TRACEPOINTS 144 - select HAVE_UID16 if 32BIT 145 144 select HAVE_VIRT_CPU_ACCOUNTING 146 145 select MODULES_USE_ELF_RELA 147 146 select NO_BOOTMEM ··· 189 190 190 191 choice 191 192 prompt "Processor type" 192 - default MARCH_G5 193 - 194 - config MARCH_G5 195 - bool "System/390 model G5 and G6" 196 - depends on !64BIT 197 - help 198 - Select this to build a 31 bit kernel that works 199 - on all ESA/390 and z/Architecture machines. 193 + default MARCH_Z900 200 194 201 195 config MARCH_Z900 202 196 bool "IBM zSeries model z800 and z900" 203 - select HAVE_MARCH_Z900_FEATURES if 64BIT 197 + select HAVE_MARCH_Z900_FEATURES 204 198 help 205 199 Select this to enable optimizations for model z800/z900 (2064 and 206 200 2066 series). This will enable some optimizations that are not ··· 201 209 202 210 config MARCH_Z990 203 211 bool "IBM zSeries model z890 and z990" 204 - select HAVE_MARCH_Z990_FEATURES if 64BIT 212 + select HAVE_MARCH_Z990_FEATURES 205 213 help 206 214 Select this to enable optimizations for model z890/z990 (2084 and 207 215 2086 series). The kernel will be slightly faster but will not work ··· 209 217 210 218 config MARCH_Z9_109 211 219 bool "IBM System z9" 212 - select HAVE_MARCH_Z9_109_FEATURES if 64BIT 220 + select HAVE_MARCH_Z9_109_FEATURES 213 221 help 214 222 Select this to enable optimizations for IBM System z9 (2094 and 215 223 2096 series). The kernel will be slightly faster but will not work ··· 217 225 218 226 config MARCH_Z10 219 227 bool "IBM System z10" 220 - select HAVE_MARCH_Z10_FEATURES if 64BIT 228 + select HAVE_MARCH_Z10_FEATURES 221 229 help 222 230 Select this to enable optimizations for IBM System z10 (2097 and 223 231 2098 series). The kernel will be slightly faster but will not work ··· 225 233 226 234 config MARCH_Z196 227 235 bool "IBM zEnterprise 114 and 196" 228 - select HAVE_MARCH_Z196_FEATURES if 64BIT 236 + select HAVE_MARCH_Z196_FEATURES 229 237 help 230 238 Select this to enable optimizations for IBM zEnterprise 114 and 196 231 239 (2818 and 2817 series). The kernel will be slightly faster but will ··· 233 241 234 242 config MARCH_ZEC12 235 243 bool "IBM zBC12 and zEC12" 236 - select HAVE_MARCH_ZEC12_FEATURES if 64BIT 244 + select HAVE_MARCH_ZEC12_FEATURES 237 245 help 238 246 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and 239 247 2827 series). The kernel will be slightly faster but will not work on ··· 241 249 242 250 config MARCH_Z13 243 251 bool "IBM z13" 244 - select HAVE_MARCH_Z13_FEATURES if 64BIT 252 + select HAVE_MARCH_Z13_FEATURES 245 253 help 246 254 Select this to enable optimizations for IBM z13 (2964 series). 247 255 The kernel will be slightly faster but will not work on older 248 256 machines. 249 257 250 258 endchoice 251 - 252 - config MARCH_G5_TUNE 253 - def_bool TUNE_G5 || MARCH_G5 && TUNE_DEFAULT 254 259 255 260 config MARCH_Z900_TUNE 256 261 def_bool TUNE_Z900 || MARCH_Z900 && TUNE_DEFAULT ··· 287 298 Tune the generated code for the target processor for which the kernel 288 299 will be compiled. 289 300 290 - config TUNE_G5 291 - bool "System/390 model G5 and G6" 292 - 293 301 config TUNE_Z900 294 302 bool "IBM zSeries model z800 and z900" 295 303 ··· 312 326 313 327 config 64BIT 314 328 def_bool y 315 - prompt "64 bit kernel" 316 - help 317 - Select this option if you have an IBM z/Architecture machine 318 - and want to use the 64 bit addressing mode. 319 - 320 - config 32BIT 321 - def_bool y if !64BIT 322 329 323 330 config COMPAT 324 331 def_bool y 325 332 prompt "Kernel support for 31 bit emulation" 326 - depends on 64BIT 327 333 select COMPAT_BINFMT_ELF if BINFMT_ELF 328 334 select ARCH_WANT_OLD_COMPAT_IPC 329 335 select COMPAT_OLD_SIGACTION ··· 354 376 int "Maximum number of CPUs (2-512)" 355 377 range 2 512 356 378 depends on SMP 357 - default "32" if !64BIT 358 - default "64" if 64BIT 379 + default "64" 359 380 help 360 381 This allows you to specify the maximum number of CPUs which this 361 382 kernel will support. The maximum supported value is 512 and the ··· 395 418 396 419 source kernel/Kconfig.preempt 397 420 398 - config MATHEMU 399 - def_bool y 400 - prompt "IEEE FPU emulation" 401 - depends on MARCH_G5 402 - help 403 - This option is required for IEEE compliant floating point arithmetic 404 - on older ESA/390 machines. Say Y unless you know your machine doesn't 405 - need this. 406 - 407 421 source kernel/Kconfig.hz 408 422 409 423 endmenu ··· 405 437 def_bool y 406 438 select SPARSEMEM_VMEMMAP_ENABLE 407 439 select SPARSEMEM_VMEMMAP 408 - select SPARSEMEM_STATIC if !64BIT 409 440 410 441 config ARCH_SPARSEMEM_DEFAULT 411 442 def_bool y ··· 420 453 421 454 config ARCH_ENABLE_SPLIT_PMD_PTLOCK 422 455 def_bool y 423 - depends on 64BIT 424 456 425 457 config FORCE_MAX_ZONEORDER 426 458 int ··· 494 528 495 529 menuconfig PCI 496 530 bool "PCI support" 497 - depends on 64BIT 498 531 select HAVE_DMA_ATTRS 499 532 select PCI_MSI 500 533 help ··· 563 598 564 599 config SCM_BUS 565 600 def_bool y 566 - depends on 64BIT 567 601 prompt "SCM bus driver" 568 602 help 569 603 Bus driver for Storage Class Memory. ··· 584 620 585 621 config CRASH_DUMP 586 622 bool "kernel crash dumps" 587 - depends on 64BIT && SMP 623 + depends on SMP 588 624 select KEXEC 589 625 help 590 626 Generate crash dump after being started by kexec. ··· 623 659 menu "Power Management" 624 660 625 661 config ARCH_HIBERNATION_POSSIBLE 626 - def_bool y if 64BIT 662 + def_bool y 627 663 628 664 source "kernel/power/Kconfig" 629 665 ··· 774 810 config S390_GUEST 775 811 def_bool y 776 812 prompt "s390 support for virtio devices" 777 - depends on 64BIT 778 813 select TTY 779 814 select VIRTUALIZATION 780 815 select VIRTIO
+1 -15
arch/s390/Makefile
··· 13 13 # Copyright (C) 1994 by Linus Torvalds 14 14 # 15 15 16 - ifndef CONFIG_64BIT 17 - LD_BFD := elf32-s390 18 - LDFLAGS := -m elf_s390 19 - KBUILD_CFLAGS += -m31 20 - KBUILD_AFLAGS += -m31 21 - UTS_MACHINE := s390 22 - STACK_SIZE := 8192 23 - CHECKFLAGS += -D__s390__ -msize-long 24 - else 25 16 LD_BFD := elf64-s390 26 17 LDFLAGS := -m elf64_s390 27 18 KBUILD_AFLAGS_MODULE += -fPIC ··· 22 31 UTS_MACHINE := s390x 23 32 STACK_SIZE := 16384 24 33 CHECKFLAGS += -D__s390__ -D__s390x__ 25 - endif 26 34 27 35 export LD_BFD 28 36 29 - mflags-$(CONFIG_MARCH_G5) := -march=g5 30 37 mflags-$(CONFIG_MARCH_Z900) := -march=z900 31 38 mflags-$(CONFIG_MARCH_Z990) := -march=z990 32 39 mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 ··· 36 47 aflags-y += $(mflags-y) 37 48 cflags-y += $(mflags-y) 38 49 39 - cflags-$(CONFIG_MARCH_G5_TUNE) += -mtune=g5 40 50 cflags-$(CONFIG_MARCH_Z900_TUNE) += -mtune=z900 41 51 cflags-$(CONFIG_MARCH_Z990_TUNE) += -mtune=z990 42 52 cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 ··· 92 104 OBJCOPYFLAGS := -O binary 93 105 94 106 head-y := arch/s390/kernel/head.o 95 - head-y += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o) 107 + head-y += arch/s390/kernel/head64.o 96 108 97 109 # See arch/s390/Kbuild for content of core part of the kernel 98 110 core-y += arch/s390/ ··· 117 129 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 118 130 119 131 vdso_install: 120 - ifeq ($(CONFIG_64BIT),y) 121 132 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 122 - endif 123 133 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ 124 134 125 135 archclean:
+5 -7
arch/s390/boot/compressed/Makefile
··· 4 4 # create a compressed vmlinux image from the original vmlinux 5 5 # 6 6 7 - BITS := $(if $(CONFIG_64BIT),64,31) 8 - 9 7 targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 10 8 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 11 - targets += misc.o piggy.o sizes.h head$(BITS).o 9 + targets += misc.o piggy.o sizes.h head64.o 12 10 13 - KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 11 + KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 14 12 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 15 13 KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks 16 14 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) ··· 17 19 GCOV_PROFILE := n 18 20 19 21 OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o) 20 - OBJECTS += $(obj)/head$(BITS).o $(obj)/misc.o $(obj)/piggy.o 22 + OBJECTS += $(obj)/head64.o $(obj)/misc.o $(obj)/piggy.o 21 23 22 24 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T 23 25 $(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) ··· 32 34 $(obj)/sizes.h: vmlinux 33 35 $(call if_changed,sizes) 34 36 35 - AFLAGS_head$(BITS).o += -I$(obj) 36 - $(obj)/head$(BITS).o: $(obj)/sizes.h 37 + AFLAGS_head64.o += -I$(obj) 38 + $(obj)/head64.o: $(obj)/sizes.h 37 39 38 40 CFLAGS_misc.o += -I$(obj) 39 41 $(obj)/misc.o: $(obj)/sizes.h
-51
arch/s390/boot/compressed/head31.S
··· 1 - /* 2 - * Startup glue code to uncompress the kernel 3 - * 4 - * Copyright IBM Corp. 2010 5 - * 6 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 7 - */ 8 - 9 - #include <linux/init.h> 10 - #include <linux/linkage.h> 11 - #include <asm/asm-offsets.h> 12 - #include <asm/thread_info.h> 13 - #include <asm/page.h> 14 - #include "sizes.h" 15 - 16 - __HEAD 17 - ENTRY(startup_continue) 18 - basr %r13,0 # get base 19 - .LPG1: 20 - # setup stack 21 - l %r15,.Lstack-.LPG1(%r13) 22 - ahi %r15,-96 23 - l %r1,.Ldecompress-.LPG1(%r13) 24 - basr %r14,%r1 25 - # setup registers for memory mover & branch to target 26 - lr %r4,%r2 27 - l %r2,.Loffset-.LPG1(%r13) 28 - la %r4,0(%r2,%r4) 29 - l %r3,.Lmvsize-.LPG1(%r13) 30 - lr %r5,%r3 31 - # move the memory mover someplace safe 32 - la %r1,0x200 33 - mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) 34 - # decompress image is started at 0x11000 35 - lr %r6,%r2 36 - br %r1 37 - mover: 38 - mvcle %r2,%r4,0 39 - jo mover 40 - br %r6 41 - mover_end: 42 - 43 - .align 8 44 - .Lstack: 45 - .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) 46 - .Ldecompress: 47 - .long decompress_kernel 48 - .Loffset: 49 - .long 0x11000 50 - .Lmvsize: 51 - .long SZ__bss_start
-5
arch/s390/boot/compressed/vmlinux.lds.S
··· 1 1 #include <asm-generic/vmlinux.lds.h> 2 2 3 - #ifdef CONFIG_64BIT 4 3 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 5 4 OUTPUT_ARCH(s390:64-bit) 6 - #else 7 - OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 8 - OUTPUT_ARCH(s390:31-bit) 9 - #endif 10 5 11 6 ENTRY(startup) 12 7
-4
arch/s390/hypfs/hypfs_diag0c.c
··· 19 19 static void diag0c(struct hypfs_diag0c_entry *entry) 20 20 { 21 21 asm volatile ( 22 - #ifdef CONFIG_64BIT 23 22 " sam31\n" 24 23 " diag %0,%0,0x0c\n" 25 24 " sam64\n" 26 - #else 27 - " diag %0,%0,0x0c\n" 28 - #endif 29 25 : /* no output register */ 30 26 : "a" (entry) 31 27 : "memory");
-24
arch/s390/include/asm/appldata.h
··· 9 9 10 10 #include <asm/io.h> 11 11 12 - #ifndef CONFIG_64BIT 13 - 14 - #define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ 15 - #define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ 16 - #define APPLDATA_GEN_EVENT_REC 0x02 17 - #define APPLDATA_START_CONFIG_REC 0x03 18 - 19 - /* 20 - * Parameter list for DIAGNOSE X'DC' 21 - */ 22 - struct appldata_parameter_list { 23 - u16 diag; /* The DIAGNOSE code X'00DC' */ 24 - u8 function; /* The function code for the DIAGNOSE */ 25 - u8 parlist_length; /* Length of the parameter list */ 26 - u32 product_id_addr; /* Address of the 16-byte product ID */ 27 - u16 reserved; 28 - u16 buffer_length; /* Length of the application data buffer */ 29 - u32 buffer_addr; /* Address of the application data buffer */ 30 - } __attribute__ ((packed)); 31 - 32 - #else /* CONFIG_64BIT */ 33 - 34 12 #define APPLDATA_START_INTERVAL_REC 0x80 35 13 #define APPLDATA_STOP_REC 0x81 36 14 #define APPLDATA_GEN_EVENT_REC 0x82 ··· 28 50 u64 product_id_addr; 29 51 u64 buffer_addr; 30 52 } __attribute__ ((packed)); 31 - 32 - #endif /* CONFIG_64BIT */ 33 53 34 54 struct appldata_product_id { 35 55 char prod_nr[7]; /* product number */
-95
arch/s390/include/asm/atomic.h
··· 160 160 161 161 #define ATOMIC64_INIT(i) { (i) } 162 162 163 - #ifdef CONFIG_64BIT 164 - 165 163 #define __ATOMIC64_NO_BARRIER "\n" 166 164 167 165 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES ··· 271 273 } 272 274 273 275 #undef __ATOMIC64_LOOP 274 - 275 - #else /* CONFIG_64BIT */ 276 - 277 - typedef struct { 278 - long long counter; 279 - } atomic64_t; 280 - 281 - static inline long long atomic64_read(const atomic64_t *v) 282 - { 283 - register_pair rp; 284 - 285 - asm volatile( 286 - " lm %0,%N0,%1" 287 - : "=&d" (rp) : "Q" (v->counter) ); 288 - return rp.pair; 289 - } 290 - 291 - static inline void atomic64_set(atomic64_t *v, long long i) 292 - { 293 - register_pair rp = {.pair = i}; 294 - 295 - asm volatile( 296 - " stm %1,%N1,%0" 297 - : "=Q" (v->counter) : "d" (rp) ); 298 - } 299 - 300 - static inline long long atomic64_xchg(atomic64_t *v, long long new) 301 - { 302 - register_pair rp_new = {.pair = new}; 303 - register_pair rp_old; 304 - 305 - asm volatile( 306 - " lm %0,%N0,%1\n" 307 - "0: cds %0,%2,%1\n" 308 - " jl 0b\n" 309 - : "=&d" (rp_old), "+Q" (v->counter) 310 - : "d" (rp_new) 311 - : "cc"); 312 - return rp_old.pair; 313 - } 314 - 315 - static inline long long atomic64_cmpxchg(atomic64_t *v, 316 - long long old, long long new) 317 - { 318 - register_pair rp_old = {.pair = old}; 319 - register_pair rp_new = {.pair = new}; 320 - 321 - asm volatile( 322 - " cds %0,%2,%1" 323 - : "+&d" (rp_old), "+Q" (v->counter) 324 - : "d" (rp_new) 325 - : "cc"); 326 - return rp_old.pair; 327 - } 328 - 329 - 330 - static inline long long atomic64_add_return(long long i, atomic64_t *v) 331 - { 332 - long long old, new; 333 - 334 - do { 335 - old = atomic64_read(v); 336 - new = old + i; 337 - } while (atomic64_cmpxchg(v, old, new) != old); 338 - return new; 339 - } 340 - 341 - static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) 342 - { 343 - long long old, new; 344 - 345 - do { 346 - old = atomic64_read(v); 347 - new = old | mask; 348 - } while (atomic64_cmpxchg(v, old, new) != old); 349 - } 350 - 351 - static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) 352 - { 353 - long long old, new; 354 - 355 - do { 356 - old = atomic64_read(v); 357 - new = old & mask; 358 - } while (atomic64_cmpxchg(v, old, new) != old); 359 - } 360 - 361 - static inline void atomic64_add(long long i, atomic64_t *v) 362 - { 363 - atomic64_add_return(i, v); 364 - } 365 - 366 - #endif /* CONFIG_64BIT */ 367 276 368 277 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) 369 278 {
-28
arch/s390/include/asm/bitops.h
··· 51 51 52 52 #define __BITOPS_NO_BARRIER "\n" 53 53 54 - #ifndef CONFIG_64BIT 55 - 56 - #define __BITOPS_OR "or" 57 - #define __BITOPS_AND "nr" 58 - #define __BITOPS_XOR "xr" 59 - #define __BITOPS_BARRIER "\n" 60 - 61 - #define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ 62 - ({ \ 63 - unsigned long __old, __new; \ 64 - \ 65 - typecheck(unsigned long *, (__addr)); \ 66 - asm volatile( \ 67 - " l %0,%2\n" \ 68 - "0: lr %1,%0\n" \ 69 - __op_string " %1,%3\n" \ 70 - " cs %0,%1,%2\n" \ 71 - " jl 0b" \ 72 - : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ 73 - : "d" (__val) \ 74 - : "cc", "memory"); \ 75 - __old; \ 76 - }) 77 - 78 - #else /* CONFIG_64BIT */ 79 - 80 54 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES 81 55 82 56 #define __BITOPS_OR "laog" ··· 98 124 }) 99 125 100 126 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ 101 - 102 - #endif /* CONFIG_64BIT */ 103 127 104 128 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) 105 129
-26
arch/s390/include/asm/cputime.h
··· 22 22 23 23 static inline unsigned long __div(unsigned long long n, unsigned long base) 24 24 { 25 - #ifndef CONFIG_64BIT 26 - register_pair rp; 27 - 28 - rp.pair = n >> 1; 29 - asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); 30 - return rp.subreg.odd; 31 - #else /* CONFIG_64BIT */ 32 25 return n / base; 33 - #endif /* CONFIG_64BIT */ 34 26 } 35 27 36 28 #define cputime_one_jiffy jiffies_to_cputime(1) ··· 93 101 struct timespec *value) 94 102 { 95 103 unsigned long long __cputime = (__force unsigned long long) cputime; 96 - #ifndef CONFIG_64BIT 97 - register_pair rp; 98 - 99 - rp.pair = __cputime >> 1; 100 - asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2)); 101 - value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC; 102 - value->tv_sec = rp.subreg.odd; 103 - #else 104 104 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC; 105 105 value->tv_sec = __cputime / CPUTIME_PER_SEC; 106 - #endif 107 106 } 108 107 109 108 /* ··· 112 129 struct timeval *value) 113 130 { 114 131 unsigned long long __cputime = (__force unsigned long long) cputime; 115 - #ifndef CONFIG_64BIT 116 - register_pair rp; 117 - 118 - rp.pair = __cputime >> 1; 119 - asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2)); 120 - value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC; 121 - value->tv_sec = rp.subreg.odd; 122 - #else 123 132 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC; 124 133 value->tv_sec = __cputime / CPUTIME_PER_SEC; 125 - #endif 126 134 } 127 135 128 136 /*
+2 -12
arch/s390/include/asm/ctl_reg.h
··· 9 9 10 10 #include <linux/bug.h> 11 11 12 - #ifdef CONFIG_64BIT 13 - # define __CTL_LOAD "lctlg" 14 - # define __CTL_STORE "stctg" 15 - #else 16 - # define __CTL_LOAD "lctl" 17 - # define __CTL_STORE "stctl" 18 - #endif 19 - 20 12 #define __ctl_load(array, low, high) { \ 21 13 typedef struct { char _[sizeof(array)]; } addrtype; \ 22 14 \ 23 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 24 16 asm volatile( \ 25 - __CTL_LOAD " %1,%2,%0\n" \ 17 + " lctlg %1,%2,%0\n" \ 26 18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 27 19 } 28 20 ··· 23 31 \ 24 32 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 25 33 asm volatile( \ 26 - __CTL_STORE " %1,%2,%0\n" \ 34 + " stctg %1,%2,%0\n" \ 27 35 : "=Q" (*(addrtype *)(&array)) \ 28 36 : "i" (low), "i" (high)); \ 29 37 } ··· 52 60 union ctlreg0 { 53 61 unsigned long val; 54 62 struct { 55 - #ifdef CONFIG_64BIT 56 63 unsigned long : 32; 57 - #endif 58 64 unsigned long : 3; 59 65 unsigned long lap : 1; /* Low-address-protection control */ 60 66 unsigned long : 4;
-4
arch/s390/include/asm/elf.h
··· 107 107 /* 108 108 * These are used to set parameters in the core dumps. 109 109 */ 110 - #ifndef CONFIG_64BIT 111 - #define ELF_CLASS ELFCLASS32 112 - #else /* CONFIG_64BIT */ 113 110 #define ELF_CLASS ELFCLASS64 114 - #endif /* CONFIG_64BIT */ 115 111 #define ELF_DATA ELFDATA2MSB 116 112 #define ELF_ARCH EM_S390 117 113
-16
arch/s390/include/asm/idals.h
··· 19 19 #include <asm/cio.h> 20 20 #include <asm/uaccess.h> 21 21 22 - #ifdef CONFIG_64BIT 23 22 #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ 24 - #else 25 - #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */ 26 - #endif 27 23 #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) 28 24 29 25 /* ··· 28 32 static inline int 29 33 idal_is_needed(void *vaddr, unsigned int length) 30 34 { 31 - #ifdef CONFIG_64BIT 32 35 return ((__pa(vaddr) + length - 1) >> 31) != 0; 33 - #else 34 - return 0; 35 - #endif 36 36 } 37 37 38 38 ··· 69 77 static inline int 70 78 set_normalized_cda(struct ccw1 * ccw, void *vaddr) 71 79 { 72 - #ifdef CONFIG_64BIT 73 80 unsigned int nridaws; 74 81 unsigned long *idal; 75 82 ··· 84 93 ccw->flags |= CCW_FLAG_IDA; 85 94 vaddr = idal; 86 95 } 87 - #endif 88 96 ccw->cda = (__u32)(unsigned long) vaddr; 89 97 return 0; 90 98 } ··· 94 104 static inline void 95 105 clear_normalized_cda(struct ccw1 * ccw) 96 106 { 97 - #ifdef CONFIG_64BIT 98 107 if (ccw->flags & CCW_FLAG_IDA) { 99 108 kfree((void *)(unsigned long) ccw->cda); 100 109 ccw->flags &= ~CCW_FLAG_IDA; 101 110 } 102 - #endif 103 111 ccw->cda = 0; 104 112 } 105 113 ··· 169 181 static inline int 170 182 __idal_buffer_is_needed(struct idal_buffer *ib) 171 183 { 172 - #ifdef CONFIG_64BIT 173 184 return ib->size > (4096ul << ib->page_order) || 174 185 idal_is_needed(ib->data[0], ib->size); 175 - #else 176 - return ib->size > (4096ul << ib->page_order); 177 - #endif 178 186 } 179 187 180 188 /*
+2 -10
arch/s390/include/asm/jump_label.h
··· 6 6 #define JUMP_LABEL_NOP_SIZE 6 7 7 #define JUMP_LABEL_NOP_OFFSET 2 8 8 9 - #ifdef CONFIG_64BIT 10 - #define ASM_PTR ".quad" 11 - #define ASM_ALIGN ".balign 8" 12 - #else 13 - #define ASM_PTR ".long" 14 - #define ASM_ALIGN ".balign 4" 15 - #endif 16 - 17 9 /* 18 10 * We use a brcl 0,2 instruction for jump labels at compile time so it 19 11 * can be easily distinguished from a hotpatch generated instruction. ··· 14 22 { 15 23 asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" 16 24 ".pushsection __jump_table, \"aw\"\n" 17 - ASM_ALIGN "\n" 18 - ASM_PTR " 0b, %l[label], %0\n" 25 + ".balign 8\n" 26 + ".quad 0b, %l[label], %0\n" 19 27 ".popsection\n" 20 28 : : "X" (key) : : label); 21 29 return false;
-159
arch/s390/include/asm/lowcore.h
··· 13 13 #include <asm/cpu.h> 14 14 #include <asm/types.h> 15 15 16 - #ifdef CONFIG_32BIT 17 - 18 - #define LC_ORDER 0 19 - #define LC_PAGES 1 20 - 21 - struct save_area { 22 - u32 ext_save; 23 - u64 timer; 24 - u64 clk_cmp; 25 - u8 pad1[24]; 26 - u8 psw[8]; 27 - u32 pref_reg; 28 - u8 pad2[20]; 29 - u32 acc_regs[16]; 30 - u64 fp_regs[4]; 31 - u32 gp_regs[16]; 32 - u32 ctrl_regs[16]; 33 - } __packed; 34 - 35 - struct save_area_ext { 36 - struct save_area sa; 37 - __vector128 vx_regs[32]; 38 - }; 39 - 40 - struct _lowcore { 41 - psw_t restart_psw; /* 0x0000 */ 42 - psw_t restart_old_psw; /* 0x0008 */ 43 - __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */ 44 - __u32 ipl_parmblock_ptr; /* 0x0014 */ 45 - psw_t external_old_psw; /* 0x0018 */ 46 - psw_t svc_old_psw; /* 0x0020 */ 47 - psw_t program_old_psw; /* 0x0028 */ 48 - psw_t mcck_old_psw; /* 0x0030 */ 49 - psw_t io_old_psw; /* 0x0038 */ 50 - __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */ 51 - psw_t external_new_psw; /* 0x0058 */ 52 - psw_t svc_new_psw; /* 0x0060 */ 53 - psw_t program_new_psw; /* 0x0068 */ 54 - psw_t mcck_new_psw; /* 0x0070 */ 55 - psw_t io_new_psw; /* 0x0078 */ 56 - __u32 ext_params; /* 0x0080 */ 57 - __u16 ext_cpu_addr; /* 0x0084 */ 58 - __u16 ext_int_code; /* 0x0086 */ 59 - __u16 svc_ilc; /* 0x0088 */ 60 - __u16 svc_code; /* 0x008a */ 61 - __u16 pgm_ilc; /* 0x008c */ 62 - __u16 pgm_code; /* 0x008e */ 63 - __u32 trans_exc_code; /* 0x0090 */ 64 - __u16 mon_class_num; /* 0x0094 */ 65 - __u8 per_code; /* 0x0096 */ 66 - __u8 per_atmid; /* 0x0097 */ 67 - __u32 per_address; /* 0x0098 */ 68 - __u32 monitor_code; /* 0x009c */ 69 - __u8 exc_access_id; /* 0x00a0 */ 70 - __u8 per_access_id; /* 0x00a1 */ 71 - __u8 op_access_id; /* 0x00a2 */ 72 - __u8 ar_mode_id; /* 0x00a3 */ 73 - __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ 74 - __u16 subchannel_id; /* 0x00b8 */ 75 - __u16 subchannel_nr; /* 0x00ba */ 76 - __u32 io_int_parm; /* 0x00bc */ 77 - __u32 io_int_word; /* 0x00c0 */ 78 - __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ 79 - __u32 stfl_fac_list; /* 0x00c8 */ 80 - __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */ 81 - __u32 extended_save_area_addr; /* 0x00d4 */ 82 - __u32 cpu_timer_save_area[2]; /* 0x00d8 */ 83 - __u32 clock_comp_save_area[2]; /* 0x00e0 */ 84 - __u32 mcck_interruption_code[2]; /* 0x00e8 */ 85 - __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 86 - __u32 external_damage_code; /* 0x00f4 */ 87 - __u32 failing_storage_address; /* 0x00f8 */ 88 - __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */ 89 - psw_t psw_save_area; /* 0x0100 */ 90 - __u32 prefixreg_save_area; /* 0x0108 */ 91 - __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */ 92 - 93 - /* CPU register save area: defined by architecture */ 94 - __u32 access_regs_save_area[16]; /* 0x0120 */ 95 - __u32 floating_pt_save_area[8]; /* 0x0160 */ 96 - __u32 gpregs_save_area[16]; /* 0x0180 */ 97 - __u32 cregs_save_area[16]; /* 0x01c0 */ 98 - 99 - /* Save areas. */ 100 - __u32 save_area_sync[8]; /* 0x0200 */ 101 - __u32 save_area_async[8]; /* 0x0220 */ 102 - __u32 save_area_restart[1]; /* 0x0240 */ 103 - 104 - /* CPU flags. */ 105 - __u32 cpu_flags; /* 0x0244 */ 106 - 107 - /* Return psws. */ 108 - psw_t return_psw; /* 0x0248 */ 109 - psw_t return_mcck_psw; /* 0x0250 */ 110 - 111 - /* CPU time accounting values */ 112 - __u64 sync_enter_timer; /* 0x0258 */ 113 - __u64 async_enter_timer; /* 0x0260 */ 114 - __u64 mcck_enter_timer; /* 0x0268 */ 115 - __u64 exit_timer; /* 0x0270 */ 116 - __u64 user_timer; /* 0x0278 */ 117 - __u64 system_timer; /* 0x0280 */ 118 - __u64 steal_timer; /* 0x0288 */ 119 - __u64 last_update_timer; /* 0x0290 */ 120 - __u64 last_update_clock; /* 0x0298 */ 121 - __u64 int_clock; /* 0x02a0 */ 122 - __u64 mcck_clock; /* 0x02a8 */ 123 - __u64 clock_comparator; /* 0x02b0 */ 124 - 125 - /* Current process. */ 126 - __u32 current_task; /* 0x02b8 */ 127 - __u32 thread_info; /* 0x02bc */ 128 - __u32 kernel_stack; /* 0x02c0 */ 129 - 130 - /* Interrupt, panic and restart stack. */ 131 - __u32 async_stack; /* 0x02c4 */ 132 - __u32 panic_stack; /* 0x02c8 */ 133 - __u32 restart_stack; /* 0x02cc */ 134 - 135 - /* Restart function and parameter. */ 136 - __u32 restart_fn; /* 0x02d0 */ 137 - __u32 restart_data; /* 0x02d4 */ 138 - __u32 restart_source; /* 0x02d8 */ 139 - 140 - /* Address space pointer. */ 141 - __u32 kernel_asce; /* 0x02dc */ 142 - __u32 user_asce; /* 0x02e0 */ 143 - __u32 current_pid; /* 0x02e4 */ 144 - 145 - /* SMP info area */ 146 - __u32 cpu_nr; /* 0x02e8 */ 147 - __u32 softirq_pending; /* 0x02ec */ 148 - __u32 percpu_offset; /* 0x02f0 */ 149 - __u32 machine_flags; /* 0x02f4 */ 150 - __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */ 151 - __u32 spinlock_lockval; /* 0x02fc */ 152 - 153 - __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ 154 - 155 - /* 156 - * 0xe00 contains the address of the IPL Parameter Information 157 - * block. Dump tools need IPIB for IPL after dump. 158 - * Note: do not change the position of any fields in 0x0e00-0x0f00 159 - */ 160 - __u32 ipib; /* 0x0e00 */ 161 - __u32 ipib_checksum; /* 0x0e04 */ 162 - __u32 vmcore_info; /* 0x0e08 */ 163 - __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */ 164 - __u32 os_info; /* 0x0e18 */ 165 - __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */ 166 - 167 - /* Extended facility list */ 168 - __u64 stfle_fac_list[32]; /* 0x0f00 */ 169 - } __packed; 170 - 171 - #else /* CONFIG_32BIT */ 172 - 173 16 #define LC_ORDER 1 174 17 #define LC_PAGES 2 175 18 ··· 196 353 /* Software defined save area for vector registers */ 197 354 __u8 vector_save_area[1024]; /* 0x1c00 */ 198 355 } __packed; 199 - 200 - #endif /* CONFIG_32BIT */ 201 356 202 357 #define S390_lowcore (*((struct _lowcore *) 0)) 203 358
+1 -1
arch/s390/include/asm/mman.h
··· 8 8 9 9 #include <uapi/asm/mman.h> 10 10 11 - #if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT) 11 + #ifndef __ASSEMBLY__ 12 12 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags); 13 13 #define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags) 14 14 #endif
-4
arch/s390/include/asm/mmu_context.h
··· 19 19 atomic_set(&mm->context.attach_count, 0); 20 20 mm->context.flush_mm = 0; 21 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 22 - #ifdef CONFIG_64BIT 23 22 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 - #endif 25 23 mm->context.has_pgste = 0; 26 24 mm->context.use_skey = 0; 27 25 mm->context.asce_limit = STACK_TOP_MAX; ··· 108 110 static inline void arch_dup_mmap(struct mm_struct *oldmm, 109 111 struct mm_struct *mm) 110 112 { 111 - #ifdef CONFIG_64BIT 112 113 if (oldmm->context.asce_limit < mm->context.asce_limit) 113 114 crst_table_downgrade(mm, oldmm->context.asce_limit); 114 - #endif 115 115 } 116 116 117 117 static inline void arch_exit_mmap(struct mm_struct *mm)
-4
arch/s390/include/asm/percpu.h
··· 10 10 */ 11 11 #define __my_cpu_offset S390_lowcore.percpu_offset 12 12 13 - #ifdef CONFIG_64BIT 14 - 15 13 /* 16 14 * For 64 bit module code, the module may be more than 4G above the 17 15 * per cpu area, use weak definitions to force the compiler to ··· 180 182 181 183 #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double 182 184 #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double 183 - 184 - #endif /* CONFIG_64BIT */ 185 185 186 186 #include <asm-generic/percpu.h> 187 187
-3
arch/s390/include/asm/perf_event.h
··· 9 9 #ifndef _ASM_S390_PERF_EVENT_H 10 10 #define _ASM_S390_PERF_EVENT_H 11 11 12 - #ifdef CONFIG_64BIT 13 - 14 12 #include <linux/perf_event.h> 15 13 #include <linux/device.h> 16 14 #include <asm/cpu_mf.h> ··· 90 92 int perf_reserve_sampling(void); 91 93 void perf_release_sampling(void); 92 94 93 - #endif /* CONFIG_64BIT */ 94 95 #endif /* _ASM_S390_PERF_EVENT_H */
-24
arch/s390/include/asm/pgalloc.h
··· 33 33 *s = val; 34 34 n = (n / 256) - 1; 35 35 asm volatile( 36 - #ifdef CONFIG_64BIT 37 36 " mvc 8(248,%0),0(%0)\n" 38 - #else 39 - " mvc 4(252,%0),0(%0)\n" 40 - #endif 41 37 "0: mvc 256(256,%0),0(%0)\n" 42 38 " la %0,256(%0)\n" 43 39 " brct %1,0b\n" ··· 45 49 { 46 50 clear_table(crst, entry, sizeof(unsigned long)*2048); 47 51 } 48 - 49 - #ifndef CONFIG_64BIT 50 - 51 - static inline unsigned long pgd_entry_type(struct mm_struct *mm) 52 - { 53 - return _SEGMENT_ENTRY_EMPTY; 54 - } 55 - 56 - #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); }) 57 - #define pud_free(mm, x) do { } while (0) 58 - 59 - #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 60 - #define pmd_free(mm, x) do { } while (0) 61 - 62 - #define pgd_populate(mm, pgd, pud) BUG() 63 - #define pud_populate(mm, pud, pmd) BUG() 64 - 65 - #else /* CONFIG_64BIT */ 66 52 67 53 static inline unsigned long pgd_entry_type(struct mm_struct *mm) 68 54 { ··· 96 118 { 97 119 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 98 120 } 99 - 100 - #endif /* CONFIG_64BIT */ 101 121 102 122 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 103 123 {
+5 -120
arch/s390/include/asm/pgtable.h
··· 66 66 * table can map 67 67 * PGDIR_SHIFT determines what a third-level page table entry can map 68 68 */ 69 - #ifndef CONFIG_64BIT 70 - # define PMD_SHIFT 20 71 - # define PUD_SHIFT 20 72 - # define PGDIR_SHIFT 20 73 - #else /* CONFIG_64BIT */ 74 - # define PMD_SHIFT 20 75 - # define PUD_SHIFT 31 76 - # define PGDIR_SHIFT 42 77 - #endif /* CONFIG_64BIT */ 69 + #define PMD_SHIFT 20 70 + #define PUD_SHIFT 31 71 + #define PGDIR_SHIFT 42 78 72 79 73 #define PMD_SIZE (1UL << PMD_SHIFT) 80 74 #define PMD_MASK (~(PMD_SIZE-1)) ··· 84 90 * that leads to 1024 pte per pgd 85 91 */ 86 92 #define PTRS_PER_PTE 256 87 - #ifndef CONFIG_64BIT 88 - #define __PAGETABLE_PUD_FOLDED 89 - #define PTRS_PER_PMD 1 90 - #define __PAGETABLE_PMD_FOLDED 91 - #define PTRS_PER_PUD 1 92 - #else /* CONFIG_64BIT */ 93 93 #define PTRS_PER_PMD 2048 94 94 #define PTRS_PER_PUD 2048 95 - #endif /* CONFIG_64BIT */ 96 95 #define PTRS_PER_PGD 2048 97 96 98 97 #define FIRST_USER_ADDRESS 0UL ··· 114 127 115 128 #define VMEM_MAX_PHYS ((unsigned long) vmemmap) 116 129 117 - #ifdef CONFIG_64BIT 118 130 extern unsigned long MODULES_VADDR; 119 131 extern unsigned long MODULES_END; 120 132 #define MODULES_VADDR MODULES_VADDR 121 133 #define MODULES_END MODULES_END 122 134 #define MODULES_LEN (1UL << 31) 123 - #endif 124 135 125 136 static inline int is_module_addr(void *addr) 126 137 { 127 - #ifdef CONFIG_64BIT 128 138 BUILD_BUG_ON(MODULES_LEN > (1UL << 31)); 129 139 if (addr < (void *)MODULES_VADDR) 130 140 return 0; 131 141 if (addr > (void *)MODULES_END) 132 142 return 0; 133 - #endif 134 143 return 1; 135 144 } 136 145 ··· 267 284 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 268 285 */ 269 286 270 - #ifndef CONFIG_64BIT 271 - 272 - /* Bits in the segment table address-space-control-element */ 273 - #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 274 - #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ 275 - #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ 276 - #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ 277 - #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 278 - 279 - /* Bits in the segment table entry */ 280 - #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ 281 - #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 282 - #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 283 - #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 284 - #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 285 - #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 286 - 287 - #define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */ 288 - #define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */ 289 - #define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */ 290 - #define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */ 291 - #define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */ 292 - #define _SEGMENT_ENTRY_BITS_LARGE 0 293 - #define _SEGMENT_ENTRY_ORIGIN_LARGE 0 294 - 295 - #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 296 - #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 297 - 298 - /* 299 - * Segment table entry encoding (I = invalid, R = read-only bit): 300 - * ..R...I..... 301 - * prot-none ..1...1..... 302 - * read-only ..1...0..... 303 - * read-write ..0...0..... 304 - * empty ..0...1..... 305 - */ 306 - 307 - /* Page status table bits for virtualization */ 308 - #define PGSTE_ACC_BITS 0xf0000000UL 309 - #define PGSTE_FP_BIT 0x08000000UL 310 - #define PGSTE_PCL_BIT 0x00800000UL 311 - #define PGSTE_HR_BIT 0x00400000UL 312 - #define PGSTE_HC_BIT 0x00200000UL 313 - #define PGSTE_GR_BIT 0x00040000UL 314 - #define PGSTE_GC_BIT 0x00020000UL 315 - #define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ 316 - #define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ 317 - 318 - #else /* CONFIG_64BIT */ 319 - 320 287 /* Bits in the segment/region table address-space-control-element */ 321 288 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 322 289 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ ··· 349 416 #define PGSTE_GC_BIT 0x0002000000000000UL 350 417 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ 351 418 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ 352 - 353 - #endif /* CONFIG_64BIT */ 354 419 355 420 /* Guest Page State used for virtualization */ 356 421 #define _PGSTE_GPS_ZERO 0x0000000080000000UL ··· 440 509 /* 441 510 * pgd/pmd/pte query functions 442 511 */ 443 - #ifndef CONFIG_64BIT 444 - 445 - static inline int pgd_present(pgd_t pgd) { return 1; } 446 - static inline int pgd_none(pgd_t pgd) { return 0; } 447 - static inline int pgd_bad(pgd_t pgd) { return 0; } 448 - 449 - static inline int pud_present(pud_t pud) { return 1; } 450 - static inline int pud_none(pud_t pud) { return 0; } 451 - static inline int pud_large(pud_t pud) { return 0; } 452 - static inline int pud_bad(pud_t pud) { return 0; } 453 - 454 - #else /* CONFIG_64BIT */ 455 - 456 512 static inline int pgd_present(pgd_t pgd) 457 513 { 458 514 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) ··· 500 582 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 501 583 return (pud_val(pud) & mask) != 0; 502 584 } 503 - 504 - #endif /* CONFIG_64BIT */ 505 585 506 586 static inline int pmd_present(pmd_t pmd) 507 587 { ··· 832 916 833 917 static inline void pgd_clear(pgd_t *pgd) 834 918 { 835 - #ifdef CONFIG_64BIT 836 919 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 837 920 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 838 - #endif 839 921 } 840 922 841 923 static inline void pud_clear(pud_t *pud) 842 924 { 843 - #ifdef CONFIG_64BIT 844 925 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 845 926 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 846 - #endif 847 927 } 848 928 849 929 static inline void pmd_clear(pmd_t *pmdp) ··· 938 1026 { 939 1027 unsigned long pto = (unsigned long) ptep; 940 1028 941 - #ifndef CONFIG_64BIT 942 - /* pto in ESA mode must point to the start of the segment table */ 943 - pto &= 0x7ffffc00; 944 - #endif 945 1029 /* Invalidation + global TLB flush for the pte */ 946 1030 asm volatile( 947 1031 " ipte %2,%3" ··· 948 1040 { 949 1041 unsigned long pto = (unsigned long) ptep; 950 1042 951 - #ifndef CONFIG_64BIT 952 - /* pto in ESA mode must point to the start of the segment table */ 953 - pto &= 0x7ffffc00; 954 - #endif 955 1043 /* Invalidation + local TLB flush for the pte */ 956 1044 asm volatile( 957 1045 " .insn rrf,0xb2210000,%2,%3,0,1" ··· 958 1054 { 959 1055 unsigned long pto = (unsigned long) ptep; 960 1056 961 - #ifndef CONFIG_64BIT 962 - /* pto in ESA mode must point to the start of the segment table */ 963 - pto &= 0x7ffffc00; 964 - #endif 965 1057 /* Invalidate a range of ptes + global TLB flush of the ptes */ 966 1058 do { 967 1059 asm volatile( ··· 1276 1376 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1277 1377 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 1278 1378 1279 - #ifndef CONFIG_64BIT 1280 - 1281 - #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1282 - #define pud_deref(pmd) ({ BUG(); 0UL; }) 1283 - #define pgd_deref(pmd) ({ BUG(); 0UL; }) 1284 - 1285 - #define pud_offset(pgd, address) ((pud_t *) pgd) 1286 - #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1287 - 1288 - #else /* CONFIG_64BIT */ 1289 - 1290 1379 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1291 1380 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1292 1381 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) ··· 1295 1406 pmd = (pmd_t *) pud_deref(*pud); 1296 1407 return pmd + pmd_index(address); 1297 1408 } 1298 - 1299 - #endif /* CONFIG_64BIT */ 1300 1409 1301 1410 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1302 1411 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) ··· 1616 1729 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1617 1730 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1618 1731 */ 1619 - #ifndef CONFIG_64BIT 1620 - #define __SWP_OFFSET_MASK (~0UL >> 12) 1621 - #else 1732 + 1622 1733 #define __SWP_OFFSET_MASK (~0UL >> 11) 1623 - #endif 1734 + 1624 1735 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 1625 1736 { 1626 1737 pte_t pte;
+1 -65
arch/s390/include/asm/processor.h
··· 19 19 #define _CIF_ASCE (1<<CIF_ASCE) 20 20 #define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) 21 21 22 - 23 22 #ifndef __ASSEMBLY__ 24 23 25 24 #include <linux/linkage.h> ··· 65 66 /* 66 67 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 67 68 */ 68 - #ifndef CONFIG_64BIT 69 - 70 - #define TASK_SIZE (1UL << 31) 71 - #define TASK_MAX_SIZE (1UL << 31) 72 - #define TASK_UNMAPPED_BASE (1UL << 30) 73 - 74 - #else /* CONFIG_64BIT */ 75 69 76 70 #define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 77 71 #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ ··· 72 80 #define TASK_SIZE TASK_SIZE_OF(current) 73 81 #define TASK_MAX_SIZE (1UL << 53) 74 82 75 - #endif /* CONFIG_64BIT */ 76 - 77 - #ifndef CONFIG_64BIT 78 - #define STACK_TOP (1UL << 31) 79 - #define STACK_TOP_MAX (1UL << 31) 80 - #else /* CONFIG_64BIT */ 81 83 #define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) 82 84 #define STACK_TOP_MAX (1UL << 42) 83 - #endif /* CONFIG_64BIT */ 84 85 85 86 #define HAVE_ARCH_PICK_MMAP_LAYOUT 86 87 ··· 100 115 /* cpu runtime instrumentation */ 101 116 struct runtime_instr_cb *ri_cb; 102 117 int ri_signum; 103 - #ifdef CONFIG_64BIT 104 118 unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 105 119 __vector128 *vxrs; /* Vector register save area */ 106 - #endif 107 120 }; 108 121 109 122 /* Flag to disable transactions. */ ··· 164 181 struct mm_struct; 165 182 struct seq_file; 166 183 167 - #ifdef CONFIG_64BIT 168 - extern void show_cacheinfo(struct seq_file *m); 169 - #else 170 - static inline void show_cacheinfo(struct seq_file *m) { } 171 - #endif 184 + void show_cacheinfo(struct seq_file *m); 172 185 173 186 /* Free all resources held by a thread. */ 174 187 extern void release_thread(struct task_struct *); ··· 208 229 */ 209 230 static inline void __load_psw(psw_t psw) 210 231 { 211 - #ifndef CONFIG_64BIT 212 - asm volatile("lpsw %0" : : "Q" (psw) : "cc"); 213 - #else 214 232 asm volatile("lpswe %0" : : "Q" (psw) : "cc"); 215 - #endif 216 233 } 217 234 218 235 /* ··· 222 247 223 248 psw.mask = mask; 224 249 225 - #ifndef CONFIG_64BIT 226 - asm volatile( 227 - " basr %0,0\n" 228 - "0: ahi %0,1f-0b\n" 229 - " st %0,%O1+4(%R1)\n" 230 - " lpsw %1\n" 231 - "1:" 232 - : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 233 - #else /* CONFIG_64BIT */ 234 250 asm volatile( 235 251 " larl %0,1f\n" 236 252 " stg %0,%O1+8(%R1)\n" 237 253 " lpswe %1\n" 238 254 "1:" 239 255 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 240 - #endif /* CONFIG_64BIT */ 241 256 } 242 257 243 258 /* ··· 235 270 */ 236 271 static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) 237 272 { 238 - #ifndef CONFIG_64BIT 239 - if (psw.addr & PSW_ADDR_AMODE) 240 - /* 31 bit mode */ 241 - return (psw.addr - ilc) | PSW_ADDR_AMODE; 242 - /* 24 bit mode */ 243 - return (psw.addr - ilc) & ((1UL << 24) - 1); 244 - #else 245 273 unsigned long mask; 246 274 247 275 mask = (psw.mask & PSW_MASK_EA) ? -1UL : 248 276 (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 : 249 277 (1UL << 24) - 1; 250 278 return (psw.addr - ilc) & mask; 251 - #endif 252 279 } 253 280 254 281 /* ··· 262 305 * Store status and then load disabled wait psw, 263 306 * the processor is dead afterwards 264 307 */ 265 - #ifndef CONFIG_64BIT 266 - asm volatile( 267 - " stctl 0,0,0(%2)\n" 268 - " ni 0(%2),0xef\n" /* switch off protection */ 269 - " lctl 0,0,0(%2)\n" 270 - " stpt 0xd8\n" /* store timer */ 271 - " stckc 0xe0\n" /* store clock comparator */ 272 - " stpx 0x108\n" /* store prefix register */ 273 - " stam 0,15,0x120\n" /* store access registers */ 274 - " std 0,0x160\n" /* store f0 */ 275 - " std 2,0x168\n" /* store f2 */ 276 - " std 4,0x170\n" /* store f4 */ 277 - " std 6,0x178\n" /* store f6 */ 278 - " stm 0,15,0x180\n" /* store general registers */ 279 - " stctl 0,15,0x1c0\n" /* store control registers */ 280 - " oi 0x1c0,0x10\n" /* fake protection bit */ 281 - " lpsw 0(%1)" 282 - : "=m" (ctl_buf) 283 - : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); 284 - #else /* CONFIG_64BIT */ 285 308 asm volatile( 286 309 " stctg 0,0,0(%2)\n" 287 310 " ni 4(%2),0xef\n" /* switch off protection */ ··· 294 357 " lpswe 0(%1)" 295 358 : "=m" (ctl_buf) 296 359 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); 297 - #endif /* CONFIG_64BIT */ 298 360 while (1); 299 361 } 300 362
-4
arch/s390/include/asm/ptrace.h
··· 40 40 unsigned long long ri : 1; /* Runtime Instrumentation */ 41 41 unsigned long long : 6; 42 42 unsigned long long eaba : 2; /* Addressing Mode */ 43 - #ifdef CONFIG_64BIT 44 43 unsigned long long : 31; 45 44 unsigned long long ia : 64;/* Instruction Address */ 46 - #else 47 - unsigned long long ia : 31;/* Instruction Address */ 48 - #endif 49 45 }; 50 46 51 47 enum {
-10
arch/s390/include/asm/qdio.h
··· 211 211 u8 scount; 212 212 u8 sflags; 213 213 u32 length; 214 - #ifdef CONFIG_32BIT 215 - /* private: */ 216 - void *res2; 217 - /* public: */ 218 - #endif 219 214 void *addr; 220 215 } __attribute__ ((packed, aligned(16))); 221 216 ··· 227 232 * @sbal: absolute SBAL address 228 233 */ 229 234 struct sl_element { 230 - #ifdef CONFIG_32BIT 231 - /* private: */ 232 - unsigned long reserved; 233 - /* public: */ 234 - #endif 235 235 unsigned long sbal; 236 236 } __attribute__ ((packed)); 237 237
+1 -9
arch/s390/include/asm/runtime_instr.h
··· 72 72 73 73 static inline void save_ri_cb(struct runtime_instr_cb *cb_prev) 74 74 { 75 - #ifdef CONFIG_64BIT 76 75 if (cb_prev) 77 76 store_runtime_instr_cb(cb_prev); 78 - #endif 79 77 } 80 78 81 79 static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, 82 80 struct runtime_instr_cb *cb_prev) 83 81 { 84 - #ifdef CONFIG_64BIT 85 82 if (cb_next) 86 83 load_runtime_instr_cb(cb_next); 87 84 else if (cb_prev) 88 85 load_runtime_instr_cb(&runtime_instr_empty_cb); 89 - #endif 90 86 } 91 87 92 - #ifdef CONFIG_64BIT 93 - extern void exit_thread_runtime_instr(void); 94 - #else 95 - static inline void exit_thread_runtime_instr(void) { } 96 - #endif 88 + void exit_thread_runtime_instr(void); 97 89 98 90 #endif /* _RUNTIME_INSTR_H */
-81
arch/s390/include/asm/rwsem.h
··· 39 39 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 40 40 #endif 41 41 42 - #ifndef CONFIG_64BIT 43 - #define RWSEM_UNLOCKED_VALUE 0x00000000 44 - #define RWSEM_ACTIVE_BIAS 0x00000001 45 - #define RWSEM_ACTIVE_MASK 0x0000ffff 46 - #define RWSEM_WAITING_BIAS (-0x00010000) 47 - #else /* CONFIG_64BIT */ 48 42 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L 49 43 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L 50 44 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL 51 45 #define RWSEM_WAITING_BIAS (-0x0000000100000000L) 52 - #endif /* CONFIG_64BIT */ 53 46 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 54 47 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 55 48 ··· 54 61 signed long old, new; 55 62 56 63 asm volatile( 57 - #ifndef CONFIG_64BIT 58 - " l %0,%2\n" 59 - "0: lr %1,%0\n" 60 - " ahi %1,%4\n" 61 - " cs %0,%1,%2\n" 62 - " jl 0b" 63 - #else /* CONFIG_64BIT */ 64 64 " lg %0,%2\n" 65 65 "0: lgr %1,%0\n" 66 66 " aghi %1,%4\n" 67 67 " csg %0,%1,%2\n" 68 68 " jl 0b" 69 - #endif /* CONFIG_64BIT */ 70 69 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 71 70 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 72 71 : "cc", "memory"); ··· 74 89 signed long old, new; 75 90 76 91 asm volatile( 77 - #ifndef CONFIG_64BIT 78 - " l %0,%2\n" 79 - "0: ltr %1,%0\n" 80 - " jm 1f\n" 81 - " ahi %1,%4\n" 82 - " cs %0,%1,%2\n" 83 - " jl 0b\n" 84 - "1:" 85 - #else /* CONFIG_64BIT */ 86 92 " lg %0,%2\n" 87 93 "0: ltgr %1,%0\n" 88 94 " jm 1f\n" ··· 81 105 " csg %0,%1,%2\n" 82 106 " jl 0b\n" 83 107 "1:" 84 - #endif /* CONFIG_64BIT */ 85 108 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 86 109 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 87 110 : "cc", "memory"); ··· 96 121 97 122 tmp = RWSEM_ACTIVE_WRITE_BIAS; 98 123 asm volatile( 99 - #ifndef CONFIG_64BIT 100 - " l %0,%2\n" 101 - "0: lr %1,%0\n" 102 - " a %1,%4\n" 103 - " cs %0,%1,%2\n" 104 - " jl 0b" 105 - #else /* CONFIG_64BIT */ 106 124 " lg %0,%2\n" 107 125 "0: lgr %1,%0\n" 108 126 " ag %1,%4\n" 109 127 " csg %0,%1,%2\n" 110 128 " jl 0b" 111 - #endif /* CONFIG_64BIT */ 112 129 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 113 130 : "Q" (sem->count), "m" (tmp) 114 131 : "cc", "memory"); ··· 121 154 signed long old; 122 155 123 156 asm volatile( 124 - #ifndef CONFIG_64BIT 125 - " l %0,%1\n" 126 - "0: ltr %0,%0\n" 127 - " jnz 1f\n" 128 - " cs %0,%3,%1\n" 129 - " jl 0b\n" 130 - #else /* CONFIG_64BIT */ 131 157 " lg %0,%1\n" 132 158 "0: ltgr %0,%0\n" 133 159 " jnz 1f\n" 134 160 " csg %0,%3,%1\n" 135 161 " jl 0b\n" 136 - #endif /* CONFIG_64BIT */ 137 162 "1:" 138 163 : "=&d" (old), "=Q" (sem->count) 139 164 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) ··· 141 182 signed long old, new; 142 183 143 184 asm volatile( 144 - #ifndef CONFIG_64BIT 145 - " l %0,%2\n" 146 - "0: lr %1,%0\n" 147 - " ahi %1,%4\n" 148 - " cs %0,%1,%2\n" 149 - " jl 0b" 150 - #else /* CONFIG_64BIT */ 151 185 " lg %0,%2\n" 152 186 "0: lgr %1,%0\n" 153 187 " aghi %1,%4\n" 154 188 " csg %0,%1,%2\n" 155 189 " jl 0b" 156 - #endif /* CONFIG_64BIT */ 157 190 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 158 191 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 159 192 : "cc", "memory"); ··· 163 212 164 213 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 165 214 asm volatile( 166 - #ifndef CONFIG_64BIT 167 - " l %0,%2\n" 168 - "0: lr %1,%0\n" 169 - " a %1,%4\n" 170 - " cs %0,%1,%2\n" 171 - " jl 0b" 172 - #else /* CONFIG_64BIT */ 173 215 " lg %0,%2\n" 174 216 "0: lgr %1,%0\n" 175 217 " ag %1,%4\n" 176 218 " csg %0,%1,%2\n" 177 219 " jl 0b" 178 - #endif /* CONFIG_64BIT */ 179 220 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 180 221 : "Q" (sem->count), "m" (tmp) 181 222 : "cc", "memory"); ··· 185 242 186 243 tmp = -RWSEM_WAITING_BIAS; 187 244 asm volatile( 188 - #ifndef CONFIG_64BIT 189 - " l %0,%2\n" 190 - "0: lr %1,%0\n" 191 - " a %1,%4\n" 192 - " cs %0,%1,%2\n" 193 - " jl 0b" 194 - #else /* CONFIG_64BIT */ 195 245 " lg %0,%2\n" 196 246 "0: lgr %1,%0\n" 197 247 " ag %1,%4\n" 198 248 " csg %0,%1,%2\n" 199 249 " jl 0b" 200 - #endif /* CONFIG_64BIT */ 201 250 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 202 251 : "Q" (sem->count), "m" (tmp) 203 252 : "cc", "memory"); ··· 205 270 signed long old, new; 206 271 207 272 asm volatile( 208 - #ifndef CONFIG_64BIT 209 - " l %0,%2\n" 210 - "0: lr %1,%0\n" 211 - " ar %1,%4\n" 212 - " cs %0,%1,%2\n" 213 - " jl 0b" 214 - #else /* CONFIG_64BIT */ 215 273 " lg %0,%2\n" 216 274 "0: lgr %1,%0\n" 217 275 " agr %1,%4\n" 218 276 " csg %0,%1,%2\n" 219 277 " jl 0b" 220 - #endif /* CONFIG_64BIT */ 221 278 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 222 279 : "Q" (sem->count), "d" (delta) 223 280 : "cc", "memory"); ··· 223 296 signed long old, new; 224 297 225 298 asm volatile( 226 - #ifndef CONFIG_64BIT 227 - " l %0,%2\n" 228 - "0: lr %1,%0\n" 229 - " ar %1,%4\n" 230 - " cs %0,%1,%2\n" 231 - " jl 0b" 232 - #else /* CONFIG_64BIT */ 233 299 " lg %0,%2\n" 234 300 "0: lgr %1,%0\n" 235 301 " agr %1,%4\n" 236 302 " csg %0,%1,%2\n" 237 303 " jl 0b" 238 - #endif /* CONFIG_64BIT */ 239 304 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 240 305 : "Q" (sem->count), "d" (delta) 241 306 : "cc", "memory");
-35
arch/s390/include/asm/setup.h
··· 15 15 #include <asm/lowcore.h> 16 16 #include <asm/types.h> 17 17 18 - #ifndef CONFIG_64BIT 19 - #define IPL_DEVICE (*(unsigned long *) (0x10404)) 20 - #define INITRD_START (*(unsigned long *) (0x1040C)) 21 - #define INITRD_SIZE (*(unsigned long *) (0x10414)) 22 - #define OLDMEM_BASE (*(unsigned long *) (0x1041C)) 23 - #define OLDMEM_SIZE (*(unsigned long *) (0x10424)) 24 - #else /* CONFIG_64BIT */ 25 18 #define IPL_DEVICE (*(unsigned long *) (0x10400)) 26 19 #define INITRD_START (*(unsigned long *) (0x10408)) 27 20 #define INITRD_SIZE (*(unsigned long *) (0x10410)) 28 21 #define OLDMEM_BASE (*(unsigned long *) (0x10418)) 29 22 #define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 30 - #endif /* CONFIG_64BIT */ 31 23 #define COMMAND_LINE ((char *) (0x10480)) 32 24 33 25 extern int memory_end_set; ··· 60 68 #define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1 61 69 #define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1 62 70 63 - #ifndef CONFIG_64BIT 64 - #define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE) 65 - #define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP) 66 - #define MACHINE_HAS_IDTE (0) 67 - #define MACHINE_HAS_DIAG44 (1) 68 - #define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) 69 - #define MACHINE_HAS_EDAT1 (0) 70 - #define MACHINE_HAS_EDAT2 (0) 71 - #define MACHINE_HAS_LPP (0) 72 - #define MACHINE_HAS_TOPOLOGY (0) 73 - #define MACHINE_HAS_TE (0) 74 - #define MACHINE_HAS_TLB_LC (0) 75 - #define MACHINE_HAS_VX (0) 76 - #define MACHINE_HAS_CAD (0) 77 - #else /* CONFIG_64BIT */ 78 - #define MACHINE_HAS_IEEE (1) 79 - #define MACHINE_HAS_CSP (1) 80 71 #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 81 72 #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 82 - #define MACHINE_HAS_MVPG (1) 83 73 #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 84 74 #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 85 75 #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) ··· 70 96 #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) 71 97 #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) 72 98 #define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) 73 - #endif /* CONFIG_64BIT */ 74 99 75 100 /* 76 101 * Console mode. Override with conmode= ··· 108 135 109 136 #else /* __ASSEMBLY__ */ 110 137 111 - #ifndef CONFIG_64BIT 112 - #define IPL_DEVICE 0x10404 113 - #define INITRD_START 0x1040C 114 - #define INITRD_SIZE 0x10414 115 - #define OLDMEM_BASE 0x1041C 116 - #define OLDMEM_SIZE 0x10424 117 - #else /* CONFIG_64BIT */ 118 138 #define IPL_DEVICE 0x10400 119 139 #define INITRD_START 0x10408 120 140 #define INITRD_SIZE 0x10410 121 141 #define OLDMEM_BASE 0x10418 122 142 #define OLDMEM_SIZE 0x10420 123 - #endif /* CONFIG_64BIT */ 124 143 #define COMMAND_LINE 0x10480 125 144 126 145 #endif /* __ASSEMBLY__ */
-10
arch/s390/include/asm/sfp-util.h
··· 51 51 wl = __wl; \ 52 52 }) 53 53 54 - #ifdef CONFIG_64BIT 55 54 #define udiv_qrnnd(q, r, n1, n0, d) \ 56 55 do { unsigned long __n; \ 57 56 unsigned int __r, __d; \ ··· 59 60 (q) = __n / __d; \ 60 61 (r) = __n % __d; \ 61 62 } while (0) 62 - #else 63 - #define udiv_qrnnd(q, r, n1, n0, d) \ 64 - do { unsigned int __r; \ 65 - (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ 66 - (r) = __r; \ 67 - } while (0) 68 - extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, 69 - unsigned int , unsigned int); 70 - #endif 71 63 72 64 #define UDIV_NEEDS_NORMALIZATION 0 73 65
-9
arch/s390/include/asm/sparsemem.h
··· 1 1 #ifndef _ASM_S390_SPARSEMEM_H 2 2 #define _ASM_S390_SPARSEMEM_H 3 3 4 - #ifdef CONFIG_64BIT 5 - 6 4 #define SECTION_SIZE_BITS 28 7 5 #define MAX_PHYSMEM_BITS 46 8 - 9 - #else 10 - 11 - #define SECTION_SIZE_BITS 25 12 - #define MAX_PHYSMEM_BITS 31 13 - 14 - #endif /* CONFIG_64BIT */ 15 6 16 7 #endif /* _ASM_S390_SPARSEMEM_H */
+2 -19
arch/s390/include/asm/switch_to.h
··· 18 18 u32 orig_fpc; 19 19 int rc; 20 20 21 - if (!MACHINE_HAS_IEEE) 22 - return 0; 23 - 24 21 asm volatile( 25 22 " efpc %1\n" 26 23 " sfpc %2\n" ··· 32 35 33 36 static inline void save_fp_ctl(u32 *fpc) 34 37 { 35 - if (!MACHINE_HAS_IEEE) 36 - return; 37 - 38 38 asm volatile( 39 39 " stfpc %0\n" 40 40 : "+Q" (*fpc)); ··· 40 46 static inline int restore_fp_ctl(u32 *fpc) 41 47 { 42 48 int rc; 43 - 44 - if (!MACHINE_HAS_IEEE) 45 - return 0; 46 49 47 50 asm volatile( 48 51 " lfpc %1\n" ··· 56 65 asm volatile("std 2,%0" : "=Q" (fprs[2])); 57 66 asm volatile("std 4,%0" : "=Q" (fprs[4])); 58 67 asm volatile("std 6,%0" : "=Q" (fprs[6])); 59 - if (!MACHINE_HAS_IEEE) 60 - return; 61 68 asm volatile("std 1,%0" : "=Q" (fprs[1])); 62 69 asm volatile("std 3,%0" : "=Q" (fprs[3])); 63 70 asm volatile("std 5,%0" : "=Q" (fprs[5])); ··· 76 87 asm volatile("ld 2,%0" : : "Q" (fprs[2])); 77 88 asm volatile("ld 4,%0" : : "Q" (fprs[4])); 78 89 asm volatile("ld 6,%0" : : "Q" (fprs[6])); 79 - if (!MACHINE_HAS_IEEE) 80 - return; 81 90 asm volatile("ld 1,%0" : : "Q" (fprs[1])); 82 91 asm volatile("ld 3,%0" : : "Q" (fprs[3])); 83 92 asm volatile("ld 5,%0" : : "Q" (fprs[5])); ··· 127 140 128 141 static inline void save_fp_vx_regs(struct task_struct *task) 129 142 { 130 - #ifdef CONFIG_64BIT 131 143 if (task->thread.vxrs) 132 144 save_vx_regs(task->thread.vxrs); 133 145 else 134 - #endif 135 - save_fp_regs(task->thread.fp_regs.fprs); 146 + save_fp_regs(task->thread.fp_regs.fprs); 136 147 } 137 148 138 149 static inline void restore_fp_vx_regs(struct task_struct *task) 139 150 { 140 - #ifdef CONFIG_64BIT 141 151 if (task->thread.vxrs) 142 152 restore_vx_regs(task->thread.vxrs); 143 153 else 144 - #endif 145 - restore_fp_regs(task->thread.fp_regs.fprs); 154 + restore_fp_regs(task->thread.fp_regs.fprs); 146 155 } 147 156 148 157 static inline void save_access_regs(unsigned int *acrs)
-9
arch/s390/include/asm/thread_info.h
··· 10 10 /* 11 11 * Size of kernel stack for each process 12 12 */ 13 - #ifndef CONFIG_64BIT 14 - #define THREAD_ORDER 1 15 - #define ASYNC_ORDER 1 16 - #else /* CONFIG_64BIT */ 17 13 #define THREAD_ORDER 2 18 14 #define ASYNC_ORDER 2 19 - #endif /* CONFIG_64BIT */ 20 15 21 16 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 22 17 #define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) ··· 94 99 #define _TIF_31BIT (1<<TIF_31BIT) 95 100 #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 96 101 97 - #ifdef CONFIG_64BIT 98 102 #define is_32bit_task() (test_thread_flag(TIF_31BIT)) 99 - #else 100 - #define is_32bit_task() (1) 101 - #endif 102 103 103 104 #endif /* _ASM_THREAD_INFO_H */
-4
arch/s390/include/asm/tlb.h
··· 118 118 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 119 119 unsigned long address) 120 120 { 121 - #ifdef CONFIG_64BIT 122 121 if (tlb->mm->context.asce_limit <= (1UL << 31)) 123 122 return; 124 123 pgtable_pmd_page_dtor(virt_to_page(pmd)); 125 124 tlb_remove_table(tlb, pmd); 126 - #endif 127 125 } 128 126 129 127 /* ··· 134 136 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 135 137 unsigned long address) 136 138 { 137 - #ifdef CONFIG_64BIT 138 139 if (tlb->mm->context.asce_limit <= (1UL << 42)) 139 140 return; 140 141 tlb_remove_table(tlb, pud); 141 - #endif 142 142 } 143 143 144 144 #define tlb_start_vma(tlb, vma) do { } while (0)
-7
arch/s390/include/asm/tlbflush.h
··· 49 49 register unsigned long reg4 asm("4"); 50 50 long dummy; 51 51 52 - #ifndef CONFIG_64BIT 53 - if (!MACHINE_HAS_CSP) { 54 - smp_ptlb_all(); 55 - return; 56 - } 57 - #endif /* CONFIG_64BIT */ 58 - 59 52 dummy = 0; 60 53 reg2 = reg3 = 0; 61 54 reg4 = ((unsigned long) &dummy) + 1;
-17
arch/s390/include/asm/types.h
··· 8 8 9 9 #include <uapi/asm/types.h> 10 10 11 - /* 12 - * These aren't exported outside the kernel to avoid name space clashes 13 - */ 14 - 15 - #ifndef __ASSEMBLY__ 16 - 17 - #ifndef CONFIG_64BIT 18 - typedef union { 19 - unsigned long long pair; 20 - struct { 21 - unsigned long even; 22 - unsigned long odd; 23 - } subreg; 24 - } register_pair; 25 - 26 - #endif /* ! CONFIG_64BIT */ 27 - #endif /* __ASSEMBLY__ */ 28 11 #endif /* _S390_TYPES_H */
-8
arch/s390/include/asm/unistd.h
··· 9 9 #include <uapi/asm/unistd.h> 10 10 11 11 12 - #ifndef CONFIG_64BIT 13 - #define __IGNORE_select 14 - #else 15 12 #define __IGNORE_time 16 - #endif 17 13 18 14 /* Ignore NUMA system calls. Not wired up on s390. */ 19 15 #define __IGNORE_mbind ··· 39 43 #define __ARCH_WANT_SYS_OLDUMOUNT 40 44 #define __ARCH_WANT_SYS_SIGPENDING 41 45 #define __ARCH_WANT_SYS_SIGPROCMASK 42 - # ifndef CONFIG_64BIT 43 - # define __ARCH_WANT_STAT64 44 - # define __ARCH_WANT_SYS_TIME 45 - # endif 46 46 # ifdef CONFIG_COMPAT 47 47 # define __ARCH_WANT_COMPAT_SYS_TIME 48 48 # endif
-2
arch/s390/include/asm/vdso.h
··· 42 42 43 43 extern struct vdso_data *vdso_data; 44 44 45 - #ifdef CONFIG_64BIT 46 45 int vdso_alloc_per_cpu(struct _lowcore *lowcore); 47 46 void vdso_free_per_cpu(struct _lowcore *lowcore); 48 - #endif 49 47 50 48 #endif /* __ASSEMBLY__ */ 51 49 #endif /* __S390_VDSO_H__ */
+7 -15
arch/s390/kernel/Makefile
··· 26 26 # 27 27 CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 28 28 29 - CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w 29 + CFLAGS_sysinfo.o += -w 30 30 31 31 obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 32 32 obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 33 33 obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o 34 34 obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o 35 - obj-y += dumpstack.o 35 + obj-y += runtime_instr.o cache.o dumpstack.o 36 + obj-y += entry64.o reipl64.o relocate_kernel64.o 36 37 37 - obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 38 - obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 39 - obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) 40 - 41 - extra-y += head.o vmlinux.lds 42 - extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) 38 + extra-y += head.o head64.o vmlinux.lds 43 39 44 40 obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 45 41 obj-$(CONFIG_SMP) += smp.o ··· 52 56 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 53 57 obj-$(CONFIG_UPROBES) += uprobes.o 54 58 55 - ifdef CONFIG_64BIT 56 - obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \ 57 - perf_cpum_cf_events.o 58 - obj-y += runtime_instr.o cache.o 59 - endif 59 + obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o 60 + obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o 60 61 61 62 # vdso 62 - obj-$(CONFIG_64BIT) += vdso64/ 63 - obj-$(CONFIG_32BIT) += vdso32/ 63 + obj-y += vdso64/ 64 64 obj-$(CONFIG_COMPAT) += vdso32/
-4
arch/s390/kernel/asm-offsets.c
··· 166 166 DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); 167 167 DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); 168 168 DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); 169 - #ifdef CONFIG_32BIT 170 - DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr)); 171 - #else /* CONFIG_32BIT */ 172 169 DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); 173 170 DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); 174 171 DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); ··· 180 183 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 181 184 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 182 185 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 183 - #endif /* CONFIG_32BIT */ 184 186 return 0; 185 187 }
-76
arch/s390/kernel/base.S
··· 11 11 #include <asm/ptrace.h> 12 12 #include <asm/sigp.h> 13 13 14 - #ifdef CONFIG_64BIT 15 - 16 14 ENTRY(s390_base_mcck_handler) 17 15 basr %r13,0 18 16 0: lg %r15,__LC_PANIC_STACK # load panic stack ··· 129 131 .Lfpctl: 130 132 .long 0 131 133 .previous 132 - 133 - #else /* CONFIG_64BIT */ 134 - 135 - ENTRY(s390_base_mcck_handler) 136 - basr %r13,0 137 - 0: l %r15,__LC_PANIC_STACK # load panic stack 138 - ahi %r15,-STACK_FRAME_OVERHEAD 139 - l %r1,2f-0b(%r13) 140 - l %r1,0(%r1) 141 - ltr %r1,%r1 142 - jz 1f 143 - basr %r14,%r1 144 - 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA 145 - lpsw __LC_MCK_OLD_PSW 146 - 147 - 2: .long s390_base_mcck_handler_fn 148 - 149 - .section .bss 150 - .align 4 151 - .globl s390_base_mcck_handler_fn 152 - s390_base_mcck_handler_fn: 153 - .long 0 154 - .previous 155 - 156 - ENTRY(s390_base_ext_handler) 157 - stm %r0,%r15,__LC_SAVE_AREA_ASYNC 158 - basr %r13,0 159 - 0: ahi %r15,-STACK_FRAME_OVERHEAD 160 - l %r1,2f-0b(%r13) 161 - l %r1,0(%r1) 162 - ltr %r1,%r1 163 - jz 1f 164 - basr %r14,%r1 165 - 1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC 166 - ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit 167 - lpsw __LC_EXT_OLD_PSW 168 - 169 - 2: .long s390_base_ext_handler_fn 170 - 171 - .section .bss 172 - .align 4 173 - .globl s390_base_ext_handler_fn 174 - s390_base_ext_handler_fn: 175 - .long 0 176 - .previous 177 - 178 - ENTRY(s390_base_pgm_handler) 179 - stm %r0,%r15,__LC_SAVE_AREA_SYNC 180 - basr %r13,0 181 - 0: ahi %r15,-STACK_FRAME_OVERHEAD 182 - l %r1,2f-0b(%r13) 183 - l %r1,0(%r1) 184 - ltr %r1,%r1 185 - jz 1f 186 - basr %r14,%r1 187 - lm %r0,%r15,__LC_SAVE_AREA_SYNC 188 - lpsw __LC_PGM_OLD_PSW 189 - 190 - 1: lpsw disabled_wait_psw-0b(%r13) 191 - 192 - 2: .long s390_base_pgm_handler_fn 193 - 194 - disabled_wait_psw: 195 - .align 8 196 - .long 0x000a0000,0x00000000 + s390_base_pgm_handler 197 - 198 - .section .bss 199 - .align 4 200 - .globl s390_base_pgm_handler_fn 201 - s390_base_pgm_handler_fn: 202 - .long 0 203 - .previous 204 - 205 - #endif /* CONFIG_64BIT */
-10
arch/s390/kernel/cpcmd.c
··· 27 27 register unsigned long reg3 asm ("3") = cmdlen; 28 28 29 29 asm volatile( 30 - #ifndef CONFIG_64BIT 31 - " diag %1,%0,0x8\n" 32 - #else /* CONFIG_64BIT */ 33 30 " sam31\n" 34 31 " diag %1,%0,0x8\n" 35 32 " sam64\n" 36 - #endif /* CONFIG_64BIT */ 37 33 : "+d" (reg3) : "d" (reg2) : "cc"); 38 34 return reg3; 39 35 } ··· 42 46 register unsigned long reg5 asm ("5") = *rlen; 43 47 44 48 asm volatile( 45 - #ifndef CONFIG_64BIT 46 - " diag %2,%0,0x8\n" 47 - " brc 8,1f\n" 48 - " ar %1,%4\n" 49 - #else /* CONFIG_64BIT */ 50 49 " sam31\n" 51 50 " diag %2,%0,0x8\n" 52 51 " sam64\n" 53 52 " brc 8,1f\n" 54 53 " agr %1,%4\n" 55 - #endif /* CONFIG_64BIT */ 56 54 "1:\n" 57 55 : "+d" (reg4), "+d" (reg5) 58 56 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
-15
arch/s390/kernel/diag.c
··· 18 18 int rc = 0; 19 19 20 20 asm volatile( 21 - #ifdef CONFIG_64BIT 22 21 " sam31\n" 23 22 " diag %2,2,0x14\n" 24 23 " sam64\n" 25 - #else 26 - " diag %2,2,0x14\n" 27 - #endif 28 24 " ipm %0\n" 29 25 " srl %0,28\n" 30 26 : "=d" (rc), "+d" (_ry2) ··· 48 52 spin_lock_irqsave(&diag210_lock, flags); 49 53 diag210_tmp = *addr; 50 54 51 - #ifdef CONFIG_64BIT 52 55 asm volatile( 53 56 " lhi %0,-1\n" 54 57 " sam31\n" ··· 57 62 "1: sam64\n" 58 63 EX_TABLE(0b, 1b) 59 64 : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); 60 - #else 61 - asm volatile( 62 - " lhi %0,-1\n" 63 - " diag %1,0,0x210\n" 64 - "0: ipm %0\n" 65 - " srl %0,28\n" 66 - "1:\n" 67 - EX_TABLE(0b, 1b) 68 - : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory"); 69 - #endif 70 65 71 66 *addr = diag210_tmp; 72 67 spin_unlock_irqrestore(&diag210_lock, flags);
+1 -47
arch/s390/kernel/dis.c
··· 32 32 #include <asm/debug.h> 33 33 #include <asm/irq.h> 34 34 35 - #ifndef CONFIG_64BIT 36 - #define ONELONG "%08lx: " 37 - #else /* CONFIG_64BIT */ 38 - #define ONELONG "%016lx: " 39 - #endif /* CONFIG_64BIT */ 40 - 41 35 enum { 42 36 UNUSED, /* Indicates the end of the operand list */ 43 37 R_8, /* GPR starting at position 8 */ ··· 530 536 }; 531 537 532 538 static struct s390_insn opcode[] = { 533 - #ifdef CONFIG_64BIT 534 539 { "bprp", 0xc5, INSTR_MII_UPI }, 535 540 { "bpp", 0xc7, INSTR_SMI_U0RDP }, 536 541 { "trtr", 0xd0, INSTR_SS_L0RDRD }, 537 542 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 538 - #endif 539 543 { "spm", 0x04, INSTR_RR_R0 }, 540 544 { "balr", 0x05, INSTR_RR_RR }, 541 545 { "bctr", 0x06, INSTR_RR_RR }, ··· 717 725 }; 718 726 719 727 static struct s390_insn opcode_01[] = { 720 - #ifdef CONFIG_64BIT 721 728 { "ptff", 0x04, INSTR_E }, 722 729 { "pfpo", 0x0a, INSTR_E }, 723 730 { "sam64", 0x0e, INSTR_E }, 724 - #endif 725 731 { "pr", 0x01, INSTR_E }, 726 732 { "upt", 0x02, INSTR_E }, 727 733 { "sckpf", 0x07, INSTR_E }, ··· 731 741 }; 732 742 733 743 static struct s390_insn opcode_a5[] = { 734 - #ifdef CONFIG_64BIT 735 744 { "iihh", 0x00, INSTR_RI_RU }, 736 745 { "iihl", 0x01, INSTR_RI_RU }, 737 746 { "iilh", 0x02, INSTR_RI_RU }, ··· 747 758 { "llihl", 0x0d, INSTR_RI_RU }, 748 759 { "llilh", 0x0e, INSTR_RI_RU }, 749 760 { "llill", 0x0f, INSTR_RI_RU }, 750 - #endif 751 761 { "", 0, INSTR_INVALID } 752 762 }; 753 763 754 764 static struct s390_insn opcode_a7[] = { 755 - #ifdef CONFIG_64BIT 756 765 { "tmhh", 0x02, INSTR_RI_RU }, 757 766 { "tmhl", 0x03, INSTR_RI_RU }, 758 767 { "brctg", 0x07, INSTR_RI_RP }, ··· 758 771 { "aghi", 0x0b, INSTR_RI_RI }, 759 772 { "mghi", 0x0d, INSTR_RI_RI }, 760 773 { "cghi", 0x0f, INSTR_RI_RI }, 761 - #endif 762 774 { "tmlh", 0x00, INSTR_RI_RU }, 763 775 { "tmll", 0x01, INSTR_RI_RU }, 764 776 { "brc", 0x04, INSTR_RI_UP }, ··· 771 785 }; 772 786 773 787 static struct s390_insn opcode_aa[] = { 774 - #ifdef CONFIG_64BIT 775 788 { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI }, 776 789 { "rion", 0x01, INSTR_RI_RI }, 777 790 { "tric", 0x02, INSTR_RI_RI }, 778 791 { "rioff", 0x03, INSTR_RI_RI }, 779 792 { { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI }, 780 - #endif 781 793 { "", 0, INSTR_INVALID } 782 794 }; 783 795 784 796 static struct s390_insn opcode_b2[] = { 785 - #ifdef CONFIG_64BIT 786 797 { "stckf", 0x7c, INSTR_S_RD }, 787 798 { "lpp", 0x80, INSTR_S_RD }, 788 799 { "lcctl", 0x84, INSTR_S_RD }, ··· 802 819 { "tend", 0xf8, INSTR_S_00 }, 803 820 { "niai", 0xfa, INSTR_IE_UU }, 804 821 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, 805 - #endif 806 822 { "stidp", 0x02, INSTR_S_RD }, 807 823 { "sck", 0x04, INSTR_S_RD }, 808 824 { "stck", 0x05, INSTR_S_RD }, ··· 890 908 }; 891 909 892 910 static struct s390_insn opcode_b3[] = { 893 - #ifdef CONFIG_64BIT 894 911 { "maylr", 0x38, INSTR_RRF_F0FF }, 895 912 { "mylr", 0x39, INSTR_RRF_F0FF }, 896 913 { "mayr", 0x3a, INSTR_RRF_F0FF }, ··· 977 996 { "qaxtr", 0xfd, INSTR_RRF_FUFF }, 978 997 { "iextr", 0xfe, INSTR_RRF_F0FR }, 979 998 { "rrxtr", 0xff, INSTR_RRF_FFRU }, 980 - #endif 981 999 { "lpebr", 0x00, INSTR_RRE_FF }, 982 1000 { "lnebr", 0x01, INSTR_RRE_FF }, 983 1001 { "ltebr", 0x02, INSTR_RRE_FF }, ··· 1071 1091 }; 1072 1092 1073 1093 static struct s390_insn opcode_b9[] = { 1074 - #ifdef CONFIG_64BIT 1075 1094 { "lpgr", 0x00, INSTR_RRE_RR }, 1076 1095 { "lngr", 0x01, INSTR_RRE_RR }, 1077 1096 { "ltgr", 0x02, INSTR_RRE_RR }, ··· 1183 1204 { "srk", 0xf9, INSTR_RRF_R0RR2 }, 1184 1205 { "alrk", 0xfa, INSTR_RRF_R0RR2 }, 1185 1206 { "slrk", 0xfb, INSTR_RRF_R0RR2 }, 1186 - #endif 1187 1207 { "kmac", 0x1e, INSTR_RRE_RR }, 1188 1208 { "lrvr", 0x1f, INSTR_RRE_RR }, 1189 1209 { "km", 0x2e, INSTR_RRE_RR }, ··· 1202 1224 }; 1203 1225 1204 1226 static struct s390_insn opcode_c0[] = { 1205 - #ifdef CONFIG_64BIT 1206 1227 { "lgfi", 0x01, INSTR_RIL_RI }, 1207 1228 { "xihf", 0x06, INSTR_RIL_RU }, 1208 1229 { "xilf", 0x07, INSTR_RIL_RU }, ··· 1213 1236 { "oilf", 0x0d, INSTR_RIL_RU }, 1214 1237 { "llihf", 0x0e, INSTR_RIL_RU }, 1215 1238 { "llilf", 0x0f, INSTR_RIL_RU }, 1216 - #endif 1217 1239 { "larl", 0x00, INSTR_RIL_RP }, 1218 1240 { "brcl", 0x04, INSTR_RIL_UP }, 1219 1241 { "brasl", 0x05, INSTR_RIL_RP }, ··· 1220 1244 }; 1221 1245 1222 1246 static struct s390_insn opcode_c2[] = { 1223 - #ifdef CONFIG_64BIT 1224 1247 { "msgfi", 0x00, INSTR_RIL_RI }, 1225 1248 { "msfi", 0x01, INSTR_RIL_RI }, 1226 1249 { "slgfi", 0x04, INSTR_RIL_RU }, ··· 1232 1257 { "cfi", 0x0d, INSTR_RIL_RI }, 1233 1258 { "clgfi", 0x0e, INSTR_RIL_RU }, 1234 1259 { "clfi", 0x0f, INSTR_RIL_RU }, 1235 - #endif 1236 1260 { "", 0, INSTR_INVALID } 1237 1261 }; 1238 1262 1239 1263 static struct s390_insn opcode_c4[] = { 1240 - #ifdef CONFIG_64BIT 1241 1264 { "llhrl", 0x02, INSTR_RIL_RP }, 1242 1265 { "lghrl", 0x04, INSTR_RIL_RP }, 1243 1266 { "lhrl", 0x05, INSTR_RIL_RP }, ··· 1247 1274 { "lrl", 0x0d, INSTR_RIL_RP }, 1248 1275 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, 1249 1276 { "strl", 0x0f, INSTR_RIL_RP }, 1250 - #endif 1251 1277 { "", 0, INSTR_INVALID } 1252 1278 }; 1253 1279 1254 1280 static struct s390_insn opcode_c6[] = { 1255 - #ifdef CONFIG_64BIT 1256 1281 { "exrl", 0x00, INSTR_RIL_RP }, 1257 1282 { "pfdrl", 0x02, INSTR_RIL_UP }, 1258 1283 { "cghrl", 0x04, INSTR_RIL_RP }, ··· 1263 1292 { "crl", 0x0d, INSTR_RIL_RP }, 1264 1293 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, 1265 1294 { "clrl", 0x0f, INSTR_RIL_RP }, 1266 - #endif 1267 1295 { "", 0, INSTR_INVALID } 1268 1296 }; 1269 1297 1270 1298 static struct s390_insn opcode_c8[] = { 1271 - #ifdef CONFIG_64BIT 1272 1299 { "mvcos", 0x00, INSTR_SSF_RRDRD }, 1273 1300 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1274 1301 { "csst", 0x02, INSTR_SSF_RRDRD }, 1275 1302 { "lpd", 0x04, INSTR_SSF_RRDRD2 }, 1276 1303 { "lpdg", 0x05, INSTR_SSF_RRDRD2 }, 1277 - #endif 1278 1304 { "", 0, INSTR_INVALID } 1279 1305 }; 1280 1306 1281 1307 static struct s390_insn opcode_cc[] = { 1282 - #ifdef CONFIG_64BIT 1283 1308 { "brcth", 0x06, INSTR_RIL_RP }, 1284 1309 { "aih", 0x08, INSTR_RIL_RI }, 1285 1310 { "alsih", 0x0a, INSTR_RIL_RI }, 1286 1311 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI }, 1287 1312 { "cih", 0x0d, INSTR_RIL_RI }, 1288 1313 { "clih", 0x0f, INSTR_RIL_RI }, 1289 - #endif 1290 1314 { "", 0, INSTR_INVALID } 1291 1315 }; 1292 1316 1293 1317 static struct s390_insn opcode_e3[] = { 1294 - #ifdef CONFIG_64BIT 1295 1318 { "ltg", 0x02, INSTR_RXY_RRRD }, 1296 1319 { "lrag", 0x03, INSTR_RXY_RRRD }, 1297 1320 { "lg", 0x04, INSTR_RXY_RRRD }, ··· 1379 1414 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1380 1415 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD }, 1381 1416 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD }, 1382 - #endif 1383 1417 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1384 1418 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1385 1419 { "strv", 0x3e, INSTR_RXY_RRRD }, ··· 1390 1426 }; 1391 1427 1392 1428 static struct s390_insn opcode_e5[] = { 1393 - #ifdef CONFIG_64BIT 1394 1429 { "strag", 0x02, INSTR_SSE_RDRD }, 1395 1430 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1396 1431 { "mvghi", 0x48, INSTR_SIL_RDI }, ··· 1402 1439 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, 1403 1440 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, 1404 1441 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, 1405 - #endif 1406 1442 { "lasp", 0x00, INSTR_SSE_RDRD }, 1407 1443 { "tprot", 0x01, INSTR_SSE_RDRD }, 1408 1444 { "mvcsk", 0x0e, INSTR_SSE_RDRD }, ··· 1410 1448 }; 1411 1449 1412 1450 static struct s390_insn opcode_e7[] = { 1413 - #ifdef CONFIG_64BIT 1414 1451 { "lcbb", 0x27, INSTR_RXE_RRRDM }, 1415 1452 { "vgef", 0x13, INSTR_VRV_VVRDM }, 1416 1453 { "vgeg", 0x12, INSTR_VRV_VVRDM }, ··· 1549 1588 { "vfsq", 0xce, INSTR_VRR_VV000MM }, 1550 1589 { "vfs", 0xe2, INSTR_VRR_VVV00MM }, 1551 1590 { "vftci", 0x4a, INSTR_VRI_VVIMM }, 1552 - #endif 1553 1591 }; 1554 1592 1555 1593 static struct s390_insn opcode_eb[] = { 1556 - #ifdef CONFIG_64BIT 1557 1594 { "lmg", 0x04, INSTR_RSY_RRRD }, 1558 1595 { "srag", 0x0a, INSTR_RSY_RRRD }, 1559 1596 { "slag", 0x0b, INSTR_RSY_RRRD }, ··· 1618 1659 { "stric", 0x61, INSTR_RSY_RDRM }, 1619 1660 { "mric", 0x62, INSTR_RSY_RDRM }, 1620 1661 { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, 1621 - #endif 1622 1662 { "rll", 0x1d, INSTR_RSY_RRRD }, 1623 1663 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1624 1664 { "tp", 0xc0, INSTR_RSL_R0RD }, ··· 1625 1667 }; 1626 1668 1627 1669 static struct s390_insn opcode_ec[] = { 1628 - #ifdef CONFIG_64BIT 1629 1670 { "brxhg", 0x44, INSTR_RIE_RRP }, 1630 1671 { "brxlg", 0x45, INSTR_RIE_RRP }, 1631 1672 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU }, ··· 1658 1701 { "clgib", 0xfd, INSTR_RIS_RURDU }, 1659 1702 { "cib", 0xfe, INSTR_RIS_RURDI }, 1660 1703 { "clib", 0xff, INSTR_RIS_RURDU }, 1661 - #endif 1662 1704 { "", 0, INSTR_INVALID } 1663 1705 }; 1664 1706 1665 1707 static struct s390_insn opcode_ed[] = { 1666 - #ifdef CONFIG_64BIT 1667 1708 { "mayl", 0x38, INSTR_RXF_FRRDF }, 1668 1709 { "myl", 0x39, INSTR_RXF_FRRDF }, 1669 1710 { "may", 0x3a, INSTR_RXF_FRRDF }, ··· 1686 1731 { "czxt", 0xa9, INSTR_RSL_LRDFU }, 1687 1732 { "cdzt", 0xaa, INSTR_RSL_LRDFU }, 1688 1733 { "cxzt", 0xab, INSTR_RSL_LRDFU }, 1689 - #endif 1690 1734 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1691 1735 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1692 1736 { "lxeb", 0x06, INSTR_RXE_FRRD }, ··· 2005 2051 else 2006 2052 *ptr++ = ' '; 2007 2053 addr = regs->psw.addr + start - 32; 2008 - ptr += sprintf(ptr, ONELONG, addr); 2054 + ptr += sprintf(ptr, "%016lx: ", addr); 2009 2055 if (start + opsize >= end) 2010 2056 break; 2011 2057 for (i = 0; i < opsize; i++)
+6 -20
arch/s390/kernel/dumpstack.c
··· 18 18 #include <asm/dis.h> 19 19 #include <asm/ipl.h> 20 20 21 - #ifndef CONFIG_64BIT 22 - #define LONG "%08lx " 23 - #define FOURLONG "%08lx %08lx %08lx %08lx\n" 24 - static int kstack_depth_to_print = 12; 25 - #else /* CONFIG_64BIT */ 26 - #define LONG "%016lx " 27 - #define FOURLONG "%016lx %016lx %016lx %016lx\n" 28 - static int kstack_depth_to_print = 20; 29 - #endif /* CONFIG_64BIT */ 30 - 31 21 /* 32 22 * For show_trace we have tree different stack to consider: 33 23 * - the panic stack which is used if the kernel stack has overflown ··· 105 115 else 106 116 stack = sp; 107 117 108 - for (i = 0; i < kstack_depth_to_print; i++) { 118 + for (i = 0; i < 20; i++) { 109 119 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 110 120 break; 111 121 if ((i * sizeof(long) % 32) == 0) 112 122 printk("%s ", i == 0 ? "" : "\n"); 113 - printk(LONG, *stack++); 123 + printk("%016lx ", *stack++); 114 124 } 115 125 printk("\n"); 116 126 show_trace(task, sp); ··· 118 128 119 129 static void show_last_breaking_event(struct pt_regs *regs) 120 130 { 121 - #ifdef CONFIG_64BIT 122 131 printk("Last Breaking-Event-Address:\n"); 123 132 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]); 124 - #endif 125 133 } 126 134 127 135 static inline int mask_bits(struct pt_regs *regs, unsigned long bits) ··· 143 155 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT), 144 156 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC), 145 157 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM)); 146 - #ifdef CONFIG_64BIT 147 158 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA)); 148 - #endif 149 - printk("\n%s GPRS: " FOURLONG, mode, 159 + printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode, 150 160 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); 151 - printk(" " FOURLONG, 161 + printk(" %016lx %016lx %016lx %016lx\n", 152 162 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]); 153 - printk(" " FOURLONG, 163 + printk(" %016lx %016lx %016lx %016lx\n", 154 164 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]); 155 - printk(" " FOURLONG, 165 + printk(" %016lx %016lx %016lx %016lx\n", 156 166 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]); 157 167 show_code(regs); 158 168 }
-69
arch/s390/kernel/early.c
··· 64 64 " .align 4\n" 65 65 " .type savesys_ipl_nss, @function\n" 66 66 "savesys_ipl_nss:\n" 67 - #ifdef CONFIG_64BIT 68 67 " stmg 6,15,48(15)\n" 69 68 " lgr 14,3\n" 70 69 " sam31\n" ··· 71 72 " sam64\n" 72 73 " lgr 2,14\n" 73 74 " lmg 6,15,48(15)\n" 74 - #else 75 - " stm 6,15,24(15)\n" 76 - " lr 14,3\n" 77 - " diag 2,14,0x8\n" 78 - " lr 2,14\n" 79 - " lm 6,15,24(15)\n" 80 - #endif 81 75 " br 14\n" 82 76 " .size savesys_ipl_nss, .-savesys_ipl_nss\n" 83 77 " .previous\n"); ··· 232 240 233 241 static __init void setup_topology(void) 234 242 { 235 - #ifdef CONFIG_64BIT 236 243 int max_mnest; 237 244 238 245 if (!test_facility(11)) ··· 242 251 break; 243 252 } 244 253 topology_max_mnest = max_mnest; 245 - #endif 246 254 } 247 255 248 256 static void early_pgm_check_handler(void) ··· 280 290 ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 281 291 } 282 292 283 - static __init void detect_mvpg(void) 284 - { 285 - #ifndef CONFIG_64BIT 286 - int rc; 287 - 288 - asm volatile( 289 - " la 0,0\n" 290 - " mvpg %2,%2\n" 291 - "0: la %0,0\n" 292 - "1:\n" 293 - EX_TABLE(0b,1b) 294 - : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); 295 - if (!rc) 296 - S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; 297 - #endif 298 - } 299 - 300 - static __init void detect_ieee(void) 301 - { 302 - #ifndef CONFIG_64BIT 303 - int rc, tmp; 304 - 305 - asm volatile( 306 - " efpc %1,0\n" 307 - "0: la %0,0\n" 308 - "1:\n" 309 - EX_TABLE(0b,1b) 310 - : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); 311 - if (!rc) 312 - S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; 313 - #endif 314 - } 315 - 316 - static __init void detect_csp(void) 317 - { 318 - #ifndef CONFIG_64BIT 319 - int rc; 320 - 321 - asm volatile( 322 - " la 0,0\n" 323 - " la 1,0\n" 324 - " la 2,4\n" 325 - " csp 0,2\n" 326 - "0: la %0,0\n" 327 - "1:\n" 328 - EX_TABLE(0b,1b) 329 - : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); 330 - if (!rc) 331 - S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; 332 - #endif 333 - } 334 - 335 293 static __init void detect_diag9c(void) 336 294 { 337 295 unsigned int cpu_address; ··· 298 360 299 361 static __init void detect_diag44(void) 300 362 { 301 - #ifdef CONFIG_64BIT 302 363 int rc; 303 364 304 365 asm volatile( ··· 308 371 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); 309 372 if (!rc) 310 373 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; 311 - #endif 312 374 } 313 375 314 376 static __init void detect_machine_facilities(void) 315 377 { 316 - #ifdef CONFIG_64BIT 317 378 if (test_facility(8)) { 318 379 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1; 319 380 __ctl_set_bit(0, 23); ··· 328 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 329 394 if (test_facility(129)) 330 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 331 - #endif 332 396 } 333 397 334 398 static int __init cad_setup(char *str) ··· 435 501 ipl_update_parameters(); 436 502 setup_boot_command_line(); 437 503 create_kernel_nss(); 438 - detect_mvpg(); 439 - detect_ieee(); 440 - detect_csp(); 441 504 detect_diag9c(); 442 505 detect_diag44(); 443 506 detect_machine_facilities();
-966
arch/s390/kernel/entry.S
··· 1 - /* 2 - * S390 low-level entry points. 3 - * 4 - * Copyright IBM Corp. 1999, 2012 5 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 - * Hartmut Penner (hp@de.ibm.com), 7 - * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 - * Heiko Carstens <heiko.carstens@de.ibm.com> 9 - */ 10 - 11 - #include <linux/init.h> 12 - #include <linux/linkage.h> 13 - #include <asm/processor.h> 14 - #include <asm/cache.h> 15 - #include <asm/errno.h> 16 - #include <asm/ptrace.h> 17 - #include <asm/thread_info.h> 18 - #include <asm/asm-offsets.h> 19 - #include <asm/unistd.h> 20 - #include <asm/page.h> 21 - #include <asm/sigp.h> 22 - #include <asm/irq.h> 23 - 24 - __PT_R0 = __PT_GPRS 25 - __PT_R1 = __PT_GPRS + 4 26 - __PT_R2 = __PT_GPRS + 8 27 - __PT_R3 = __PT_GPRS + 12 28 - __PT_R4 = __PT_GPRS + 16 29 - __PT_R5 = __PT_GPRS + 20 30 - __PT_R6 = __PT_GPRS + 24 31 - __PT_R7 = __PT_GPRS + 28 32 - __PT_R8 = __PT_GPRS + 32 33 - __PT_R9 = __PT_GPRS + 36 34 - __PT_R10 = __PT_GPRS + 40 35 - __PT_R11 = __PT_GPRS + 44 36 - __PT_R12 = __PT_GPRS + 48 37 - __PT_R13 = __PT_GPRS + 524 38 - __PT_R14 = __PT_GPRS + 56 39 - __PT_R15 = __PT_GPRS + 60 40 - 41 - STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 42 - STACK_SIZE = 1 << STACK_SHIFT 43 - STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 44 - 45 - _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 46 - _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 47 - _TIF_SYSCALL_TRACEPOINT) 48 - _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) 49 - _PIF_WORK = (_PIF_PER_TRAP) 50 - 51 - #define BASED(name) name-system_call(%r13) 52 - 53 - .macro TRACE_IRQS_ON 54 - #ifdef CONFIG_TRACE_IRQFLAGS 55 - basr %r2,%r0 56 - l %r1,BASED(.Lc_hardirqs_on) 57 - basr %r14,%r1 # call trace_hardirqs_on_caller 58 - #endif 59 - .endm 60 - 61 - .macro TRACE_IRQS_OFF 62 - #ifdef CONFIG_TRACE_IRQFLAGS 63 - basr %r2,%r0 64 - l %r1,BASED(.Lc_hardirqs_off) 65 - basr %r14,%r1 # call trace_hardirqs_off_caller 66 - #endif 67 - .endm 68 - 69 - .macro LOCKDEP_SYS_EXIT 70 - #ifdef CONFIG_LOCKDEP 71 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 72 - jz .+10 73 - l %r1,BASED(.Lc_lockdep_sys_exit) 74 - basr %r14,%r1 # call lockdep_sys_exit 75 - #endif 76 - .endm 77 - 78 - .macro CHECK_STACK stacksize,savearea 79 - #ifdef CONFIG_CHECK_STACK 80 - tml %r15,\stacksize - CONFIG_STACK_GUARD 81 - la %r14,\savearea 82 - jz stack_overflow 83 - #endif 84 - .endm 85 - 86 - .macro SWITCH_ASYNC savearea,stack,shift 87 - tmh %r8,0x0001 # interrupting from user ? 88 - jnz 1f 89 - lr %r14,%r9 90 - sl %r14,BASED(.Lc_critical_start) 91 - cl %r14,BASED(.Lc_critical_length) 92 - jhe 0f 93 - la %r11,\savearea # inside critical section, do cleanup 94 - bras %r14,cleanup_critical 95 - tmh %r8,0x0001 # retest problem state after cleanup 96 - jnz 1f 97 - 0: l %r14,\stack # are we already on the target stack? 98 - slr %r14,%r15 99 - sra %r14,\shift 100 - jnz 1f 101 - CHECK_STACK 1<<\shift,\savearea 102 - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 103 - j 2f 104 - 1: l %r15,\stack # load target stack 105 - 2: la %r11,STACK_FRAME_OVERHEAD(%r15) 106 - .endm 107 - 108 - .macro ADD64 high,low,timer 109 - al \high,\timer 110 - al \low,4+\timer 111 - brc 12,.+8 112 - ahi \high,1 113 - .endm 114 - 115 - .macro SUB64 high,low,timer 116 - sl \high,\timer 117 - sl \low,4+\timer 118 - brc 3,.+8 119 - ahi \high,-1 120 - .endm 121 - 122 - .macro UPDATE_VTIME high,low,enter_timer 123 - lm \high,\low,__LC_EXIT_TIMER 124 - SUB64 \high,\low,\enter_timer 125 - ADD64 \high,\low,__LC_USER_TIMER 126 - stm \high,\low,__LC_USER_TIMER 127 - lm \high,\low,__LC_LAST_UPDATE_TIMER 128 - SUB64 \high,\low,__LC_EXIT_TIMER 129 - ADD64 \high,\low,__LC_SYSTEM_TIMER 130 - stm \high,\low,__LC_SYSTEM_TIMER 131 - mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 132 - .endm 133 - 134 - .macro REENABLE_IRQS 135 - st %r8,__LC_RETURN_PSW 136 - ni __LC_RETURN_PSW,0xbf 137 - ssm __LC_RETURN_PSW 138 - .endm 139 - 140 - .section .kprobes.text, "ax" 141 - 142 - /* 143 - * Scheduler resume function, called by switch_to 144 - * gpr2 = (task_struct *) prev 145 - * gpr3 = (task_struct *) next 146 - * Returns: 147 - * gpr2 = prev 148 - */ 149 - ENTRY(__switch_to) 150 - stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 151 - st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 152 - l %r4,__THREAD_info(%r2) # get thread_info of prev 153 - l %r5,__THREAD_info(%r3) # get thread_info of next 154 - lr %r15,%r5 155 - ahi %r15,STACK_INIT # end of kernel stack of next 156 - st %r3,__LC_CURRENT # store task struct of next 157 - st %r5,__LC_THREAD_INFO # store thread info of next 158 - st %r15,__LC_KERNEL_STACK # store end of kernel stack 159 - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 160 - mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next 161 - l %r15,__THREAD_ksp(%r3) # load kernel stack of next 162 - lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 163 - br %r14 164 - 165 - .L__critical_start: 166 - /* 167 - * SVC interrupt handler routine. System calls are synchronous events and 168 - * are executed with interrupts enabled. 169 - */ 170 - 171 - ENTRY(system_call) 172 - stpt __LC_SYNC_ENTER_TIMER 173 - .Lsysc_stm: 174 - stm %r8,%r15,__LC_SAVE_AREA_SYNC 175 - l %r12,__LC_THREAD_INFO 176 - l %r13,__LC_SVC_NEW_PSW+4 177 - lhi %r14,_PIF_SYSCALL 178 - .Lsysc_per: 179 - l %r15,__LC_KERNEL_STACK 180 - la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 181 - .Lsysc_vtime: 182 - UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 183 - stm %r0,%r7,__PT_R0(%r11) 184 - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 185 - mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 186 - mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 187 - st %r14,__PT_FLAGS(%r11) 188 - .Lsysc_do_svc: 189 - l %r10,__TI_sysc_table(%r12) # 31 bit system call table 190 - lh %r8,__PT_INT_CODE+2(%r11) 191 - sla %r8,2 # shift and test for svc0 192 - jnz .Lsysc_nr_ok 193 - # svc 0: system call number in %r1 194 - cl %r1,BASED(.Lnr_syscalls) 195 - jnl .Lsysc_nr_ok 196 - sth %r1,__PT_INT_CODE+2(%r11) 197 - lr %r8,%r1 198 - sla %r8,2 199 - .Lsysc_nr_ok: 200 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 201 - st %r2,__PT_ORIG_GPR2(%r11) 202 - st %r7,STACK_FRAME_OVERHEAD(%r15) 203 - l %r9,0(%r8,%r10) # get system call addr. 204 - tm __TI_flags+3(%r12),_TIF_TRACE 205 - jnz .Lsysc_tracesys 206 - basr %r14,%r9 # call sys_xxxx 207 - st %r2,__PT_R2(%r11) # store return value 208 - 209 - .Lsysc_return: 210 - LOCKDEP_SYS_EXIT 211 - .Lsysc_tif: 212 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 213 - jno .Lsysc_restore 214 - tm __PT_FLAGS+3(%r11),_PIF_WORK 215 - jnz .Lsysc_work 216 - tm __TI_flags+3(%r12),_TIF_WORK 217 - jnz .Lsysc_work # check for thread work 218 - tm __LC_CPU_FLAGS+3,_CIF_WORK 219 - jnz .Lsysc_work 220 - .Lsysc_restore: 221 - mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 222 - stpt __LC_EXIT_TIMER 223 - lm %r0,%r15,__PT_R0(%r11) 224 - lpsw __LC_RETURN_PSW 225 - .Lsysc_done: 226 - 227 - # 228 - # One of the work bits is on. Find out which one. 229 - # 230 - .Lsysc_work: 231 - tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 232 - jo .Lsysc_mcck_pending 233 - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 234 - jo .Lsysc_reschedule 235 - tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP 236 - jo .Lsysc_singlestep 237 - tm __TI_flags+3(%r12),_TIF_SIGPENDING 238 - jo .Lsysc_sigpending 239 - tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 240 - jo .Lsysc_notify_resume 241 - tm __LC_CPU_FLAGS+3,_CIF_ASCE 242 - jo .Lsysc_uaccess 243 - j .Lsysc_return # beware of critical section cleanup 244 - 245 - # 246 - # _TIF_NEED_RESCHED is set, call schedule 247 - # 248 - .Lsysc_reschedule: 249 - l %r1,BASED(.Lc_schedule) 250 - la %r14,BASED(.Lsysc_return) 251 - br %r1 # call schedule 252 - 253 - # 254 - # _CIF_MCCK_PENDING is set, call handler 255 - # 256 - .Lsysc_mcck_pending: 257 - l %r1,BASED(.Lc_handle_mcck) 258 - la %r14,BASED(.Lsysc_return) 259 - br %r1 # TIF bit will be cleared by handler 260 - 261 - # 262 - # _CIF_ASCE is set, load user space asce 263 - # 264 - .Lsysc_uaccess: 265 - ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 266 - lctl %c1,%c1,__LC_USER_ASCE # load primary asce 267 - j .Lsysc_return 268 - 269 - # 270 - # _TIF_SIGPENDING is set, call do_signal 271 - # 272 - .Lsysc_sigpending: 273 - lr %r2,%r11 # pass pointer to pt_regs 274 - l %r1,BASED(.Lc_do_signal) 275 - basr %r14,%r1 # call do_signal 276 - tm __PT_FLAGS+3(%r11),_PIF_SYSCALL 277 - jno .Lsysc_return 278 - lm %r2,%r7,__PT_R2(%r11) # load svc arguments 279 - l %r10,__TI_sysc_table(%r12) # 31 bit system call table 280 - xr %r8,%r8 # svc 0 returns -ENOSYS 281 - clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 282 - jnl .Lsysc_nr_ok # invalid svc number -> do svc 0 283 - lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 284 - sla %r8,2 285 - j .Lsysc_nr_ok # restart svc 286 - 287 - # 288 - # _TIF_NOTIFY_RESUME is set, call do_notify_resume 289 - # 290 - .Lsysc_notify_resume: 291 - lr %r2,%r11 # pass pointer to pt_regs 292 - l %r1,BASED(.Lc_do_notify_resume) 293 - la %r14,BASED(.Lsysc_return) 294 - br %r1 # call do_notify_resume 295 - 296 - # 297 - # _PIF_PER_TRAP is set, call do_per_trap 298 - # 299 - .Lsysc_singlestep: 300 - ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP 301 - lr %r2,%r11 # pass pointer to pt_regs 302 - l %r1,BASED(.Lc_do_per_trap) 303 - la %r14,BASED(.Lsysc_return) 304 - br %r1 # call do_per_trap 305 - 306 - # 307 - # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 308 - # and after the system call 309 - # 310 - .Lsysc_tracesys: 311 - l %r1,BASED(.Lc_trace_enter) 312 - lr %r2,%r11 # pass pointer to pt_regs 313 - la %r3,0 314 - xr %r0,%r0 315 - icm %r0,3,__PT_INT_CODE+2(%r11) 316 - st %r0,__PT_R2(%r11) 317 - basr %r14,%r1 # call do_syscall_trace_enter 318 - cl %r2,BASED(.Lnr_syscalls) 319 - jnl .Lsysc_tracenogo 320 - lr %r8,%r2 321 - sll %r8,2 322 - l %r9,0(%r8,%r10) 323 - .Lsysc_tracego: 324 - lm %r3,%r7,__PT_R3(%r11) 325 - st %r7,STACK_FRAME_OVERHEAD(%r15) 326 - l %r2,__PT_ORIG_GPR2(%r11) 327 - basr %r14,%r9 # call sys_xxx 328 - st %r2,__PT_R2(%r11) # store return value 329 - .Lsysc_tracenogo: 330 - tm __TI_flags+3(%r12),_TIF_TRACE 331 - jz .Lsysc_return 332 - l %r1,BASED(.Lc_trace_exit) 333 - lr %r2,%r11 # pass pointer to pt_regs 334 - la %r14,BASED(.Lsysc_return) 335 - br %r1 # call do_syscall_trace_exit 336 - 337 - # 338 - # a new process exits the kernel with ret_from_fork 339 - # 340 - ENTRY(ret_from_fork) 341 - la %r11,STACK_FRAME_OVERHEAD(%r15) 342 - l %r12,__LC_THREAD_INFO 343 - l %r13,__LC_SVC_NEW_PSW+4 344 - l %r1,BASED(.Lc_schedule_tail) 345 - basr %r14,%r1 # call schedule_tail 346 - TRACE_IRQS_ON 347 - ssm __LC_SVC_NEW_PSW # reenable interrupts 348 - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 349 - jne .Lsysc_tracenogo 350 - # it's a kernel thread 351 - lm %r9,%r10,__PT_R9(%r11) # load gprs 352 - ENTRY(kernel_thread_starter) 353 - la %r2,0(%r10) 354 - basr %r14,%r9 355 - j .Lsysc_tracenogo 356 - 357 - /* 358 - * Program check handler routine 359 - */ 360 - 361 - ENTRY(pgm_check_handler) 362 - stpt __LC_SYNC_ENTER_TIMER 363 - stm %r8,%r15,__LC_SAVE_AREA_SYNC 364 - l %r12,__LC_THREAD_INFO 365 - l %r13,__LC_SVC_NEW_PSW+4 366 - lm %r8,%r9,__LC_PGM_OLD_PSW 367 - tmh %r8,0x0001 # test problem state bit 368 - jnz 1f # -> fault in user space 369 - tmh %r8,0x4000 # PER bit set in old PSW ? 370 - jnz 0f # -> enabled, can't be a double fault 371 - tm __LC_PGM_ILC+3,0x80 # check for per exception 372 - jnz .Lpgm_svcper # -> single stepped svc 373 - 0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 374 - ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 375 - j 2f 376 - 1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 377 - l %r15,__LC_KERNEL_STACK 378 - 2: la %r11,STACK_FRAME_OVERHEAD(%r15) 379 - stm %r0,%r7,__PT_R0(%r11) 380 - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 381 - stm %r8,%r9,__PT_PSW(%r11) 382 - mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 383 - mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE 384 - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 385 - tm __LC_PGM_ILC+3,0x80 # check for per exception 386 - jz 0f 387 - l %r1,__TI_task(%r12) 388 - tmh %r8,0x0001 # kernel per event ? 389 - jz .Lpgm_kprobe 390 - oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP 391 - mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 392 - mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE 393 - mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 394 - 0: REENABLE_IRQS 395 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 396 - l %r1,BASED(.Lc_jump_table) 397 - la %r10,0x7f 398 - n %r10,__PT_INT_CODE(%r11) 399 - je .Lsysc_return 400 - sll %r10,2 401 - l %r1,0(%r10,%r1) # load address of handler routine 402 - lr %r2,%r11 # pass pointer to pt_regs 403 - basr %r14,%r1 # branch to interrupt-handler 404 - j .Lsysc_return 405 - 406 - # 407 - # PER event in supervisor state, must be kprobes 408 - # 409 - .Lpgm_kprobe: 410 - REENABLE_IRQS 411 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 412 - l %r1,BASED(.Lc_do_per_trap) 413 - lr %r2,%r11 # pass pointer to pt_regs 414 - basr %r14,%r1 # call do_per_trap 415 - j .Lsysc_return 416 - 417 - # 418 - # single stepped system call 419 - # 420 - .Lpgm_svcper: 421 - mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 422 - mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per) 423 - lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 424 - lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 425 - 426 - /* 427 - * IO interrupt handler routine 428 - */ 429 - 430 - ENTRY(io_int_handler) 431 - stck __LC_INT_CLOCK 432 - stpt __LC_ASYNC_ENTER_TIMER 433 - stm %r8,%r15,__LC_SAVE_AREA_ASYNC 434 - l %r12,__LC_THREAD_INFO 435 - l %r13,__LC_SVC_NEW_PSW+4 436 - lm %r8,%r9,__LC_IO_OLD_PSW 437 - tmh %r8,0x0001 # interrupting from user ? 438 - jz .Lio_skip 439 - UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 440 - .Lio_skip: 441 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 442 - stm %r0,%r7,__PT_R0(%r11) 443 - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 444 - stm %r8,%r9,__PT_PSW(%r11) 445 - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 446 - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 447 - TRACE_IRQS_OFF 448 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 449 - .Lio_loop: 450 - l %r1,BASED(.Lc_do_IRQ) 451 - lr %r2,%r11 # pass pointer to pt_regs 452 - lhi %r3,IO_INTERRUPT 453 - tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 454 - jz .Lio_call 455 - lhi %r3,THIN_INTERRUPT 456 - .Lio_call: 457 - basr %r14,%r1 # call do_IRQ 458 - tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR 459 - jz .Lio_return 460 - tpi 0 461 - jz .Lio_return 462 - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 463 - j .Lio_loop 464 - .Lio_return: 465 - LOCKDEP_SYS_EXIT 466 - TRACE_IRQS_ON 467 - .Lio_tif: 468 - tm __TI_flags+3(%r12),_TIF_WORK 469 - jnz .Lio_work # there is work to do (signals etc.) 470 - tm __LC_CPU_FLAGS+3,_CIF_WORK 471 - jnz .Lio_work 472 - .Lio_restore: 473 - mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 474 - stpt __LC_EXIT_TIMER 475 - lm %r0,%r15,__PT_R0(%r11) 476 - lpsw __LC_RETURN_PSW 477 - .Lio_done: 478 - 479 - # 480 - # There is work todo, find out in which context we have been interrupted: 481 - # 1) if we return to user space we can do all _TIF_WORK work 482 - # 2) if we return to kernel code and preemptive scheduling is enabled check 483 - # the preemption counter and if it is zero call preempt_schedule_irq 484 - # Before any work can be done, a switch to the kernel stack is required. 485 - # 486 - .Lio_work: 487 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 488 - jo .Lio_work_user # yes -> do resched & signal 489 - #ifdef CONFIG_PREEMPT 490 - # check for preemptive scheduling 491 - icm %r0,15,__TI_precount(%r12) 492 - jnz .Lio_restore # preemption disabled 493 - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 494 - jno .Lio_restore 495 - # switch to kernel stack 496 - l %r1,__PT_R15(%r11) 497 - ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 498 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 499 - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 500 - la %r11,STACK_FRAME_OVERHEAD(%r1) 501 - lr %r15,%r1 502 - # TRACE_IRQS_ON already done at .Lio_return, call 503 - # TRACE_IRQS_OFF to keep things symmetrical 504 - TRACE_IRQS_OFF 505 - l %r1,BASED(.Lc_preempt_irq) 506 - basr %r14,%r1 # call preempt_schedule_irq 507 - j .Lio_return 508 - #else 509 - j .Lio_restore 510 - #endif 511 - 512 - # 513 - # Need to do work before returning to userspace, switch to kernel stack 514 - # 515 - .Lio_work_user: 516 - l %r1,__LC_KERNEL_STACK 517 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 518 - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 519 - la %r11,STACK_FRAME_OVERHEAD(%r1) 520 - lr %r15,%r1 521 - 522 - # 523 - # One of the work bits is on. Find out which one. 524 - # 525 - .Lio_work_tif: 526 - tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING 527 - jo .Lio_mcck_pending 528 - tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 529 - jo .Lio_reschedule 530 - tm __TI_flags+3(%r12),_TIF_SIGPENDING 531 - jo .Lio_sigpending 532 - tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 533 - jo .Lio_notify_resume 534 - tm __LC_CPU_FLAGS+3,_CIF_ASCE 535 - jo .Lio_uaccess 536 - j .Lio_return # beware of critical section cleanup 537 - 538 - # 539 - # _CIF_MCCK_PENDING is set, call handler 540 - # 541 - .Lio_mcck_pending: 542 - # TRACE_IRQS_ON already done at .Lio_return 543 - l %r1,BASED(.Lc_handle_mcck) 544 - basr %r14,%r1 # TIF bit will be cleared by handler 545 - TRACE_IRQS_OFF 546 - j .Lio_return 547 - 548 - # 549 - # _CIF_ASCE is set, load user space asce 550 - # 551 - .Lio_uaccess: 552 - ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 553 - lctl %c1,%c1,__LC_USER_ASCE # load primary asce 554 - j .Lio_return 555 - 556 - # 557 - # _TIF_NEED_RESCHED is set, call schedule 558 - # 559 - .Lio_reschedule: 560 - # TRACE_IRQS_ON already done at .Lio_return 561 - l %r1,BASED(.Lc_schedule) 562 - ssm __LC_SVC_NEW_PSW # reenable interrupts 563 - basr %r14,%r1 # call scheduler 564 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 565 - TRACE_IRQS_OFF 566 - j .Lio_return 567 - 568 - # 569 - # _TIF_SIGPENDING is set, call do_signal 570 - # 571 - .Lio_sigpending: 572 - # TRACE_IRQS_ON already done at .Lio_return 573 - l %r1,BASED(.Lc_do_signal) 574 - ssm __LC_SVC_NEW_PSW # reenable interrupts 575 - lr %r2,%r11 # pass pointer to pt_regs 576 - basr %r14,%r1 # call do_signal 577 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 578 - TRACE_IRQS_OFF 579 - j .Lio_return 580 - 581 - # 582 - # _TIF_SIGPENDING is set, call do_signal 583 - # 584 - .Lio_notify_resume: 585 - # TRACE_IRQS_ON already done at .Lio_return 586 - l %r1,BASED(.Lc_do_notify_resume) 587 - ssm __LC_SVC_NEW_PSW # reenable interrupts 588 - lr %r2,%r11 # pass pointer to pt_regs 589 - basr %r14,%r1 # call do_notify_resume 590 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 591 - TRACE_IRQS_OFF 592 - j .Lio_return 593 - 594 - /* 595 - * External interrupt handler routine 596 - */ 597 - 598 - ENTRY(ext_int_handler) 599 - stck __LC_INT_CLOCK 600 - stpt __LC_ASYNC_ENTER_TIMER 601 - stm %r8,%r15,__LC_SAVE_AREA_ASYNC 602 - l %r12,__LC_THREAD_INFO 603 - l %r13,__LC_SVC_NEW_PSW+4 604 - lm %r8,%r9,__LC_EXT_OLD_PSW 605 - tmh %r8,0x0001 # interrupting from user ? 606 - jz .Lext_skip 607 - UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 608 - .Lext_skip: 609 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 610 - stm %r0,%r7,__PT_R0(%r11) 611 - mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 612 - stm %r8,%r9,__PT_PSW(%r11) 613 - mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 614 - mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 615 - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 616 - TRACE_IRQS_OFF 617 - l %r1,BASED(.Lc_do_IRQ) 618 - lr %r2,%r11 # pass pointer to pt_regs 619 - lhi %r3,EXT_INTERRUPT 620 - basr %r14,%r1 # call do_IRQ 621 - j .Lio_return 622 - 623 - /* 624 - * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 625 - */ 626 - ENTRY(psw_idle) 627 - st %r3,__SF_EMPTY(%r15) 628 - basr %r1,0 629 - la %r1,.Lpsw_idle_lpsw+4-.(%r1) 630 - st %r1,__SF_EMPTY+4(%r15) 631 - oi __SF_EMPTY+4(%r15),0x80 632 - stck __CLOCK_IDLE_ENTER(%r2) 633 - stpt __TIMER_IDLE_ENTER(%r2) 634 - .Lpsw_idle_lpsw: 635 - lpsw __SF_EMPTY(%r15) 636 - br %r14 637 - .Lpsw_idle_end: 638 - 639 - .L__critical_end: 640 - 641 - /* 642 - * Machine check handler routines 643 - */ 644 - 645 - ENTRY(mcck_int_handler) 646 - stck __LC_MCCK_CLOCK 647 - spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 648 - lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 649 - l %r12,__LC_THREAD_INFO 650 - l %r13,__LC_SVC_NEW_PSW+4 651 - lm %r8,%r9,__LC_MCK_OLD_PSW 652 - tm __LC_MCCK_CODE,0x80 # system damage? 653 - jo .Lmcck_panic # yes -> rest of mcck code invalid 654 - la %r14,__LC_CPU_TIMER_SAVE_AREA 655 - mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 656 - tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 657 - jo 3f 658 - la %r14,__LC_SYNC_ENTER_TIMER 659 - clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 660 - jl 0f 661 - la %r14,__LC_ASYNC_ENTER_TIMER 662 - 0: clc 0(8,%r14),__LC_EXIT_TIMER 663 - jl 1f 664 - la %r14,__LC_EXIT_TIMER 665 - 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER 666 - jl 2f 667 - la %r14,__LC_LAST_UPDATE_TIMER 668 - 2: spt 0(%r14) 669 - mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 670 - 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 671 - jno .Lmcck_panic # no -> skip cleanup critical 672 - tm %r8,0x0001 # interrupting from user ? 673 - jz .Lmcck_skip 674 - UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 675 - .Lmcck_skip: 676 - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 677 - stm %r0,%r7,__PT_R0(%r11) 678 - mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 679 - stm %r8,%r9,__PT_PSW(%r11) 680 - xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 681 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 682 - l %r1,BASED(.Lc_do_machine_check) 683 - lr %r2,%r11 # pass pointer to pt_regs 684 - basr %r14,%r1 # call s390_do_machine_check 685 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 686 - jno .Lmcck_return 687 - l %r1,__LC_KERNEL_STACK # switch to kernel stack 688 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 689 - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 690 - la %r11,STACK_FRAME_OVERHEAD(%r15) 691 - lr %r15,%r1 692 - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 693 - tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 694 - jno .Lmcck_return 695 - TRACE_IRQS_OFF 696 - l %r1,BASED(.Lc_handle_mcck) 697 - basr %r14,%r1 # call s390_handle_mcck 698 - TRACE_IRQS_ON 699 - .Lmcck_return: 700 - mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 701 - tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 702 - jno 0f 703 - lm %r0,%r15,__PT_R0(%r11) 704 - stpt __LC_EXIT_TIMER 705 - lpsw __LC_RETURN_MCCK_PSW 706 - 0: lm %r0,%r15,__PT_R0(%r11) 707 - lpsw __LC_RETURN_MCCK_PSW 708 - 709 - .Lmcck_panic: 710 - l %r14,__LC_PANIC_STACK 711 - slr %r14,%r15 712 - sra %r14,PAGE_SHIFT 713 - jz 0f 714 - l %r15,__LC_PANIC_STACK 715 - j .Lmcck_skip 716 - 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 717 - j .Lmcck_skip 718 - 719 - # 720 - # PSW restart interrupt handler 721 - # 722 - ENTRY(restart_int_handler) 723 - st %r15,__LC_SAVE_AREA_RESTART 724 - l %r15,__LC_RESTART_STACK 725 - ahi %r15,-__PT_SIZE # create pt_regs on stack 726 - xc 0(__PT_SIZE,%r15),0(%r15) 727 - stm %r0,%r14,__PT_R0(%r15) 728 - mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 729 - mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 730 - ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 731 - xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 732 - l %r1,__LC_RESTART_FN # load fn, parm & source cpu 733 - l %r2,__LC_RESTART_DATA 734 - l %r3,__LC_RESTART_SOURCE 735 - ltr %r3,%r3 # test source cpu address 736 - jm 1f # negative -> skip source stop 737 - 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 738 - brc 10,0b # wait for status stored 739 - 1: basr %r14,%r1 # call function 740 - stap __SF_EMPTY(%r15) # store cpu address 741 - lh %r3,__SF_EMPTY(%r15) 742 - 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 743 - brc 2,2b 744 - 3: j 3b 745 - 746 - .section .kprobes.text, "ax" 747 - 748 - #ifdef CONFIG_CHECK_STACK 749 - /* 750 - * The synchronous or the asynchronous stack overflowed. We are dead. 751 - * No need to properly save the registers, we are going to panic anyway. 752 - * Setup a pt_regs so that show_trace can provide a good call trace. 753 - */ 754 - stack_overflow: 755 - l %r15,__LC_PANIC_STACK # change to panic stack 756 - la %r11,STACK_FRAME_OVERHEAD(%r15) 757 - stm %r0,%r7,__PT_R0(%r11) 758 - stm %r8,%r9,__PT_PSW(%r11) 759 - mvc __PT_R8(32,%r11),0(%r14) 760 - l %r1,BASED(1f) 761 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 762 - lr %r2,%r11 # pass pointer to pt_regs 763 - br %r1 # branch to kernel_stack_overflow 764 - 1: .long kernel_stack_overflow 765 - #endif 766 - 767 - .Lcleanup_table: 768 - .long system_call + 0x80000000 769 - .long .Lsysc_do_svc + 0x80000000 770 - .long .Lsysc_tif + 0x80000000 771 - .long .Lsysc_restore + 0x80000000 772 - .long .Lsysc_done + 0x80000000 773 - .long .Lio_tif + 0x80000000 774 - .long .Lio_restore + 0x80000000 775 - .long .Lio_done + 0x80000000 776 - .long psw_idle + 0x80000000 777 - .long .Lpsw_idle_end + 0x80000000 778 - 779 - cleanup_critical: 780 - cl %r9,BASED(.Lcleanup_table) # system_call 781 - jl 0f 782 - cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc 783 - jl .Lcleanup_system_call 784 - cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif 785 - jl 0f 786 - cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore 787 - jl .Lcleanup_sysc_tif 788 - cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done 789 - jl .Lcleanup_sysc_restore 790 - cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif 791 - jl 0f 792 - cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore 793 - jl .Lcleanup_io_tif 794 - cl %r9,BASED(.Lcleanup_table+28) # .Lio_done 795 - jl .Lcleanup_io_restore 796 - cl %r9,BASED(.Lcleanup_table+32) # psw_idle 797 - jl 0f 798 - cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end 799 - jl .Lcleanup_idle 800 - 0: br %r14 801 - 802 - .Lcleanup_system_call: 803 - # check if stpt has been executed 804 - cl %r9,BASED(.Lcleanup_system_call_insn) 805 - jh 0f 806 - mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 807 - chi %r11,__LC_SAVE_AREA_ASYNC 808 - je 0f 809 - mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 810 - 0: # check if stm has been executed 811 - cl %r9,BASED(.Lcleanup_system_call_insn+4) 812 - jh 0f 813 - mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 814 - 0: # set up saved registers r12, and r13 815 - st %r12,16(%r11) # r12 thread-info pointer 816 - st %r13,20(%r11) # r13 literal-pool pointer 817 - # check if the user time calculation has been done 818 - cl %r9,BASED(.Lcleanup_system_call_insn+8) 819 - jh 0f 820 - l %r10,__LC_EXIT_TIMER 821 - l %r15,__LC_EXIT_TIMER+4 822 - SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER 823 - ADD64 %r10,%r15,__LC_USER_TIMER 824 - st %r10,__LC_USER_TIMER 825 - st %r15,__LC_USER_TIMER+4 826 - 0: # check if the system time calculation has been done 827 - cl %r9,BASED(.Lcleanup_system_call_insn+12) 828 - jh 0f 829 - l %r10,__LC_LAST_UPDATE_TIMER 830 - l %r15,__LC_LAST_UPDATE_TIMER+4 831 - SUB64 %r10,%r15,__LC_EXIT_TIMER 832 - ADD64 %r10,%r15,__LC_SYSTEM_TIMER 833 - st %r10,__LC_SYSTEM_TIMER 834 - st %r15,__LC_SYSTEM_TIMER+4 835 - 0: # update accounting time stamp 836 - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 837 - # set up saved register 11 838 - l %r15,__LC_KERNEL_STACK 839 - la %r9,STACK_FRAME_OVERHEAD(%r15) 840 - st %r9,12(%r11) # r11 pt_regs pointer 841 - # fill pt_regs 842 - mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC 843 - stm %r0,%r7,__PT_R0(%r9) 844 - mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW 845 - mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 846 - xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9) 847 - mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL 848 - # setup saved register 15 849 - st %r15,28(%r11) # r15 stack pointer 850 - # set new psw address and exit 851 - l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000 852 - br %r14 853 - .Lcleanup_system_call_insn: 854 - .long system_call + 0x80000000 855 - .long .Lsysc_stm + 0x80000000 856 - .long .Lsysc_vtime + 0x80000000 + 36 857 - .long .Lsysc_vtime + 0x80000000 + 76 858 - 859 - .Lcleanup_sysc_tif: 860 - l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000 861 - br %r14 862 - 863 - .Lcleanup_sysc_restore: 864 - cl %r9,BASED(.Lcleanup_sysc_restore_insn) 865 - jhe 0f 866 - l %r9,12(%r11) # get saved pointer to pt_regs 867 - mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 868 - mvc 0(32,%r11),__PT_R8(%r9) 869 - lm %r0,%r7,__PT_R0(%r9) 870 - 0: lm %r8,%r9,__LC_RETURN_PSW 871 - br %r14 872 - .Lcleanup_sysc_restore_insn: 873 - .long .Lsysc_done - 4 + 0x80000000 874 - 875 - .Lcleanup_io_tif: 876 - l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000 877 - br %r14 878 - 879 - .Lcleanup_io_restore: 880 - cl %r9,BASED(.Lcleanup_io_restore_insn) 881 - jhe 0f 882 - l %r9,12(%r11) # get saved r11 pointer to pt_regs 883 - mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 884 - mvc 0(32,%r11),__PT_R8(%r9) 885 - lm %r0,%r7,__PT_R0(%r9) 886 - 0: lm %r8,%r9,__LC_RETURN_PSW 887 - br %r14 888 - .Lcleanup_io_restore_insn: 889 - .long .Lio_done - 4 + 0x80000000 890 - 891 - .Lcleanup_idle: 892 - # copy interrupt clock & cpu timer 893 - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 894 - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 895 - chi %r11,__LC_SAVE_AREA_ASYNC 896 - je 0f 897 - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 898 - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 899 - 0: # check if stck has been executed 900 - cl %r9,BASED(.Lcleanup_idle_insn) 901 - jhe 1f 902 - mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 903 - mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 904 - 1: # account system time going idle 905 - lm %r9,%r10,__LC_STEAL_TIMER 906 - ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2) 907 - SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 908 - stm %r9,%r10,__LC_STEAL_TIMER 909 - mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 910 - lm %r9,%r10,__LC_SYSTEM_TIMER 911 - ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 912 - SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2) 913 - stm %r9,%r10,__LC_SYSTEM_TIMER 914 - mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 915 - # prepare return psw 916 - n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits 917 - l %r9,24(%r11) # return from psw_idle 918 - br %r14 919 - .Lcleanup_idle_insn: 920 - .long .Lpsw_idle_lpsw + 0x80000000 921 - .Lcleanup_idle_wait: 922 - .long 0xfcfdffff 923 - 924 - /* 925 - * Integer constants 926 - */ 927 - .align 4 928 - .Lnr_syscalls: 929 - .long NR_syscalls 930 - .Lvtimer_max: 931 - .quad 0x7fffffffffffffff 932 - 933 - /* 934 - * Symbol constants 935 - */ 936 - .Lc_do_machine_check: .long s390_do_machine_check 937 - .Lc_handle_mcck: .long s390_handle_mcck 938 - .Lc_do_IRQ: .long do_IRQ 939 - .Lc_do_signal: .long do_signal 940 - .Lc_do_notify_resume: .long do_notify_resume 941 - .Lc_do_per_trap: .long do_per_trap 942 - .Lc_jump_table: .long pgm_check_table 943 - .Lc_schedule: .long schedule 944 - #ifdef CONFIG_PREEMPT 945 - .Lc_preempt_irq: .long preempt_schedule_irq 946 - #endif 947 - .Lc_trace_enter: .long do_syscall_trace_enter 948 - .Lc_trace_exit: .long do_syscall_trace_exit 949 - .Lc_schedule_tail: .long schedule_tail 950 - .Lc_sysc_per: .long .Lsysc_per + 0x80000000 951 - #ifdef CONFIG_TRACE_IRQFLAGS 952 - .Lc_hardirqs_on: .long trace_hardirqs_on_caller 953 - .Lc_hardirqs_off: .long trace_hardirqs_off_caller 954 - #endif 955 - #ifdef CONFIG_LOCKDEP 956 - .Lc_lockdep_sys_exit: .long lockdep_sys_exit 957 - #endif 958 - .Lc_critical_start: .long .L__critical_start + 0x80000000 959 - .Lc_critical_length: .long .L__critical_end - .L__critical_start 960 - 961 - .section .rodata, "a" 962 - #define SYSCALL(esa,esame,emu) .long esa 963 - .globl sys_call_table 964 - sys_call_table: 965 - #include "syscalls.S" 966 - #undef SYSCALL
-49
arch/s390/kernel/head.S
··· 27 27 #include <asm/thread_info.h> 28 28 #include <asm/page.h> 29 29 30 - #ifdef CONFIG_64BIT 31 30 #define ARCH_OFFSET 4 32 - #else 33 - #define ARCH_OFFSET 0 34 - #endif 35 31 36 32 __HEAD 37 33 ··· 63 67 # subroutine to set architecture mode 64 68 # 65 69 .Lsetmode: 66 - #ifdef CONFIG_64BIT 67 70 mvi __LC_AR_MODE_ID,1 # set esame flag 68 71 slr %r0,%r0 # set cpuid to zero 69 72 lhi %r1,2 # mode 2 = esame (dump) ··· 71 76 .fill 16,4,0x0 72 77 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 73 78 sam31 # switch to 31 bit addressing mode 74 - #else 75 - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 76 - #endif 77 79 br %r14 78 80 79 81 # 80 82 # subroutine to wait for end I/O 81 83 # 82 84 .Lirqwait: 83 - #ifdef CONFIG_64BIT 84 85 mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw 85 86 lpsw .Lwaitpsw 86 87 .Lioint: ··· 84 93 .align 8 85 94 .Lnewpsw: 86 95 .quad 0x0000000080000000,.Lioint 87 - #else 88 - mvc 0x78(8),.Lnewpsw # set up IO interrupt psw 89 - lpsw .Lwaitpsw 90 - .Lioint: 91 - br %r14 92 - .align 8 93 - .Lnewpsw: 94 - .long 0x00080000,0x80000000+.Lioint 95 - #endif 96 96 .Lwaitpsw: 97 97 .long 0x020a0000,0x80000000+.Lioint 98 98 ··· 357 375 ENTRY(startup_kdump) 358 376 j .Lep_startup_kdump 359 377 .Lep_startup_normal: 360 - #ifdef CONFIG_64BIT 361 378 mvi __LC_AR_MODE_ID,1 # set esame flag 362 379 slr %r0,%r0 # set cpuid to zero 363 380 lhi %r1,2 # mode 2 = esame (dump) ··· 365 384 .fill 16,4,0x0 366 385 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 367 386 sam31 # switch to 31 bit addressing mode 368 - #else 369 - mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) 370 - #endif 371 387 basr %r13,0 # get base 372 388 .LPG0: 373 389 xc 0x200(256),0x200 # partially clear lowcore ··· 374 396 spt 6f-.LPG0(%r13) 375 397 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 376 398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 377 - #ifndef CONFIG_MARCH_G5 378 399 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 379 400 .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST 380 401 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? ··· 412 435 # the kernel will crash. Format is number of facility words with bits set, 413 436 # followed by the facility words. 414 437 415 - #if defined(CONFIG_64BIT) 416 438 #if defined(CONFIG_MARCH_Z13) 417 439 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 418 440 #elif defined(CONFIG_MARCH_ZEC12) ··· 427 451 #elif defined(CONFIG_MARCH_Z900) 428 452 .long 1, 0xc0000000 429 453 #endif 430 - #else 431 - #if defined(CONFIG_MARCH_ZEC12) 432 - .long 1, 0x8100c880 433 - #elif defined(CONFIG_MARCH_Z196) 434 - .long 1, 0x8100c880 435 - #elif defined(CONFIG_MARCH_Z10) 436 - .long 1, 0x8100c880 437 - #elif defined(CONFIG_MARCH_Z9_109) 438 - .long 1, 0x8100c880 439 - #elif defined(CONFIG_MARCH_Z990) 440 - .long 1, 0x80002000 441 - #elif defined(CONFIG_MARCH_Z900) 442 - .long 1, 0x80000000 443 - #endif 444 - #endif 445 454 4: 446 - #endif 447 - 448 - #ifdef CONFIG_64BIT 449 455 /* Continue with 64bit startup code in head64.S */ 450 456 sam64 # switch to 64 bit mode 451 457 jg startup_continue 452 - #else 453 - /* Continue with 31bit startup code in head31.S */ 454 - l %r13,5f-.LPG0(%r13) 455 - b 0(%r13) 456 - .align 8 457 - 5: .long startup_continue 458 - #endif 459 458 460 459 .align 8 461 460 6: .long 0x7fffffff,0xffffffff
-106
arch/s390/kernel/head31.S
··· 1 - /* 2 - * Copyright IBM Corp. 2005, 2010 3 - * 4 - * Author(s): Hartmut Penner <hp@de.ibm.com> 5 - * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 - * Rob van der Heij <rvdhei@iae.nl> 7 - * Heiko Carstens <heiko.carstens@de.ibm.com> 8 - * 9 - */ 10 - 11 - #include <linux/init.h> 12 - #include <linux/linkage.h> 13 - #include <asm/asm-offsets.h> 14 - #include <asm/thread_info.h> 15 - #include <asm/page.h> 16 - 17 - __HEAD 18 - ENTRY(startup_continue) 19 - basr %r13,0 # get base 20 - .LPG1: 21 - 22 - l %r1,.Lbase_cc-.LPG1(%r13) 23 - mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK 24 - lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 25 - l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 26 - # move IPL device to lowcore 27 - # 28 - # Setup stack 29 - # 30 - l %r15,.Linittu-.LPG1(%r13) 31 - st %r15,__LC_THREAD_INFO # cache thread info in lowcore 32 - mvc __LC_CURRENT(4),__TI_task(%r15) 33 - ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE 34 - st %r15,__LC_KERNEL_STACK # set end of kernel stack 35 - ahi %r15,-96 36 - # 37 - # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, 38 - # and create a kernel NSS if the SAVESYS= parm is defined 39 - # 40 - l %r14,.Lstartup_init-.LPG1(%r13) 41 - basr %r14,%r14 42 - lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space, 43 - # virtual and never return ... 44 - .align 8 45 - .Lentry:.long 0x00080000,0x80000000 + _stext 46 - .Lctl: .long 0x04b50000 # cr0: various things 47 - .long 0 # cr1: primary space segment table 48 - .long .Lduct # cr2: dispatchable unit control table 49 - .long 0 # cr3: instruction authorization 50 - .long 0 # cr4: instruction authorization 51 - .long .Lduct # cr5: primary-aste origin 52 - .long 0 # cr6: I/O interrupts 53 - .long 0 # cr7: secondary space segment table 54 - .long 0 # cr8: access registers translation 55 - .long 0 # cr9: tracing off 56 - .long 0 # cr10: tracing off 57 - .long 0 # cr11: tracing off 58 - .long 0 # cr12: tracing off 59 - .long 0 # cr13: home space segment table 60 - .long 0xc0000000 # cr14: machine check handling off 61 - .long 0 # cr15: linkage stack operations 62 - .Lbss_bgn: .long __bss_start 63 - .Lbss_end: .long _end 64 - .Lparmaddr: .long PARMAREA 65 - .Linittu: .long init_thread_union 66 - .Lstartup_init: 67 - .long startup_init 68 - .align 64 69 - .Lduct: .long 0,0,0,0,.Lduald,0,0,0 70 - .long 0,0,0,0,0,0,0,0 71 - .align 128 72 - .Lduald:.rept 8 73 - .long 0x80000000,0,0,0 # invalid access-list entries 74 - .endr 75 - .Lbase_cc: 76 - .long sched_clock_base_cc 77 - 78 - ENTRY(_ehead) 79 - 80 - .org 0x100000 - 0x11000 # head.o ends at 0x11000 81 - # 82 - # startup-code, running in absolute addressing mode 83 - # 84 - ENTRY(_stext) 85 - basr %r13,0 # get base 86 - .LPG3: 87 - # check control registers 88 - stctl %c0,%c15,0(%r15) 89 - oi 2(%r15),0x60 # enable sigp emergency & external call 90 - oi 0(%r15),0x10 # switch on low address protection 91 - lctl %c0,%c15,0(%r15) 92 - 93 - # 94 - lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess 95 - l %r14,.Lstart-.LPG3(%r13) 96 - basr %r14,%r14 # call start_kernel 97 - # 98 - # We returned from start_kernel ?!? PANIK 99 - # 100 - basr %r13,0 101 - lpsw .Ldw-.(%r13) # load disabled wait psw 102 - # 103 - .align 8 104 - .Ldw: .long 0x000a0000,0x00000000 105 - .Lstart:.long start_kernel 106 - .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-8
arch/s390/kernel/head_kdump.S
··· 92 92 #else 93 93 .align 2 94 94 .Lep_startup_kdump: 95 - #ifdef CONFIG_64BIT 96 95 larl %r13,startup_kdump_crash 97 96 lpswe 0(%r13) 98 97 .align 8 99 98 startup_kdump_crash: 100 99 .quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash 101 - #else 102 - basr %r13,0 103 - 0: lpsw startup_kdump_crash-0b(%r13) 104 - .align 8 105 - startup_kdump_crash: 106 - .long 0x000a0000,0x00000000 + startup_kdump_crash 107 - #endif /* CONFIG_64BIT */ 108 100 #endif /* CONFIG_CRASH_DUMP */
-2
arch/s390/kernel/ipl.c
··· 2062 2062 { 2063 2063 struct reset_call *reset; 2064 2064 2065 - #ifdef CONFIG_64BIT 2066 2065 if (diag308_set_works) { 2067 2066 diag308_reset(); 2068 2067 return; 2069 2068 } 2070 - #endif 2071 2069 list_for_each_entry(reset, &rcall, list) 2072 2070 reset->fn(); 2073 2071 }
-12
arch/s390/kernel/module.c
··· 38 38 #define DEBUGP(fmt , ...) 39 39 #endif 40 40 41 - #ifndef CONFIG_64BIT 42 - #define PLT_ENTRY_SIZE 12 43 - #else /* CONFIG_64BIT */ 44 41 #define PLT_ENTRY_SIZE 20 45 - #endif /* CONFIG_64BIT */ 46 42 47 - #ifdef CONFIG_64BIT 48 43 void *module_alloc(unsigned long size) 49 44 { 50 45 if (PAGE_ALIGN(size) > MODULES_LEN) ··· 48 53 GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, 49 54 __builtin_return_address(0)); 50 55 } 51 - #endif 52 56 53 57 void module_arch_freeing_init(struct module *mod) 54 58 { ··· 317 323 unsigned int *ip; 318 324 ip = me->module_core + me->arch.plt_offset + 319 325 info->plt_offset; 320 - #ifndef CONFIG_64BIT 321 - ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ 322 - ip[1] = 0x100607f1; 323 - ip[2] = val; 324 - #else /* CONFIG_64BIT */ 325 326 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ 326 327 ip[1] = 0x100a0004; 327 328 ip[2] = 0x07f10000; 328 329 ip[3] = (unsigned int) (val >> 32); 329 330 ip[4] = (unsigned int) val; 330 - #endif /* CONFIG_64BIT */ 331 331 info->plt_initialized = 1; 332 332 } 333 333 if (r_type == R_390_PLTOFF16 ||
+28 -64
arch/s390/kernel/nmi.c
··· 117 117 */ 118 118 kill_task = 1; 119 119 } 120 - #ifndef CONFIG_64BIT 120 + fpt_save_area = &S390_lowcore.floating_pt_save_area; 121 + fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 122 + if (!mci->fc) { 123 + /* 124 + * Floating point control register can't be restored. 125 + * Task will be terminated. 126 + */ 127 + asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 128 + kill_task = 1; 129 + } else 130 + asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 131 + 121 132 asm volatile( 122 133 " ld 0,0(%0)\n" 123 - " ld 2,8(%0)\n" 124 - " ld 4,16(%0)\n" 125 - " ld 6,24(%0)" 126 - : : "a" (&S390_lowcore.floating_pt_save_area)); 127 - #endif 128 - 129 - if (MACHINE_HAS_IEEE) { 130 - #ifdef CONFIG_64BIT 131 - fpt_save_area = &S390_lowcore.floating_pt_save_area; 132 - fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 133 - #else 134 - fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; 135 - fpt_creg_save_area = fpt_save_area + 128; 136 - #endif 137 - if (!mci->fc) { 138 - /* 139 - * Floating point control register can't be restored. 140 - * Task will be terminated. 141 - */ 142 - asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); 143 - kill_task = 1; 144 - 145 - } else 146 - asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 147 - 148 - asm volatile( 149 - " ld 0,0(%0)\n" 150 - " ld 1,8(%0)\n" 151 - " ld 2,16(%0)\n" 152 - " ld 3,24(%0)\n" 153 - " ld 4,32(%0)\n" 154 - " ld 5,40(%0)\n" 155 - " ld 6,48(%0)\n" 156 - " ld 7,56(%0)\n" 157 - " ld 8,64(%0)\n" 158 - " ld 9,72(%0)\n" 159 - " ld 10,80(%0)\n" 160 - " ld 11,88(%0)\n" 161 - " ld 12,96(%0)\n" 162 - " ld 13,104(%0)\n" 163 - " ld 14,112(%0)\n" 164 - " ld 15,120(%0)\n" 165 - : : "a" (fpt_save_area)); 166 - } 167 - 168 - #ifdef CONFIG_64BIT 134 + " ld 1,8(%0)\n" 135 + " ld 2,16(%0)\n" 136 + " ld 3,24(%0)\n" 137 + " ld 4,32(%0)\n" 138 + " ld 5,40(%0)\n" 139 + " ld 6,48(%0)\n" 140 + " ld 7,56(%0)\n" 141 + " ld 8,64(%0)\n" 142 + " ld 9,72(%0)\n" 143 + " ld 10,80(%0)\n" 144 + " ld 11,88(%0)\n" 145 + " ld 12,96(%0)\n" 146 + " ld 13,104(%0)\n" 147 + " ld 14,112(%0)\n" 148 + " ld 15,120(%0)\n" 149 + : : "a" (fpt_save_area)); 169 150 /* Revalidate vector registers */ 170 151 if (MACHINE_HAS_VX && current->thread.vxrs) { 171 152 if (!mci->vr) { ··· 159 178 restore_vx_regs((__vector128 *) 160 179 S390_lowcore.vector_save_area_addr); 161 180 } 162 - #endif 163 181 /* Revalidate access registers */ 164 182 asm volatile( 165 183 " lam 0,15,0(%0)" ··· 178 198 */ 179 199 s390_handle_damage("invalid control registers."); 180 200 } else { 181 - #ifdef CONFIG_64BIT 182 201 asm volatile( 183 202 " lctlg 0,15,0(%0)" 184 203 : : "a" (&S390_lowcore.cregs_save_area)); 185 - #else 186 - asm volatile( 187 - " lctl 0,15,0(%0)" 188 - : : "a" (&S390_lowcore.cregs_save_area)); 189 - #endif 190 204 } 191 205 /* 192 206 * We don't even try to revalidate the TOD register, since we simply 193 207 * can't write something sensible into that register. 194 208 */ 195 - #ifdef CONFIG_64BIT 196 209 /* 197 210 * See if we can revalidate the TOD programmable register with its 198 211 * old contents (should be zero) otherwise set it to zero. ··· 201 228 " sckpf" 202 229 : : "a" (&S390_lowcore.tod_progreg_save_area) 203 230 : "0", "cc"); 204 - #endif 205 231 /* Revalidate clock comparator register */ 206 232 set_clock_comparator(S390_lowcore.clock_comparator); 207 233 /* Check if old PSW is valid */ ··· 252 280 if (mci->b) { 253 281 /* Processing backup -> verify if we can survive this */ 254 282 u64 z_mcic, o_mcic, t_mcic; 255 - #ifdef CONFIG_64BIT 256 283 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 257 284 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 258 285 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 259 286 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 260 287 1ULL<<16); 261 - #else 262 - z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | 263 - 1ULL<<29); 264 - o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 265 - 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 266 - 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); 267 - #endif 268 288 t_mcic = *(u64 *)mci; 269 289 270 290 if (((t_mcic & z_mcic) != 0) ||
+8 -14
arch/s390/kernel/pgm_check.S
··· 6 6 7 7 #include <linux/linkage.h> 8 8 9 - #ifdef CONFIG_32BIT 10 - #define PGM_CHECK_64BIT(handler) .long default_trap_handler 11 - #else 12 - #define PGM_CHECK_64BIT(handler) .long handler 13 - #endif 14 - 15 9 #define PGM_CHECK(handler) .long handler 16 10 #define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler) 17 11 18 12 /* 19 13 * The program check table contains exactly 128 (0x00-0x7f) entries. Each 20 - * line defines the 31 and/or 64 bit function to be called corresponding 21 - * to the program check interruption code. 14 + * line defines the function to be called corresponding to the program check 15 + * interruption code. 22 16 */ 23 17 .section .rodata, "a" 24 18 ENTRY(pgm_check_table) ··· 40 46 PGM_CHECK(operand_exception) /* 15 */ 41 47 PGM_CHECK_DEFAULT /* 16 */ 42 48 PGM_CHECK_DEFAULT /* 17 */ 43 - PGM_CHECK_64BIT(transaction_exception) /* 18 */ 49 + PGM_CHECK(transaction_exception) /* 18 */ 44 50 PGM_CHECK_DEFAULT /* 19 */ 45 51 PGM_CHECK_DEFAULT /* 1a */ 46 - PGM_CHECK_64BIT(vector_exception) /* 1b */ 52 + PGM_CHECK(vector_exception) /* 1b */ 47 53 PGM_CHECK(space_switch_exception) /* 1c */ 48 54 PGM_CHECK(hfp_sqrt_exception) /* 1d */ 49 55 PGM_CHECK_DEFAULT /* 1e */ ··· 72 78 PGM_CHECK_DEFAULT /* 35 */ 73 79 PGM_CHECK_DEFAULT /* 36 */ 74 80 PGM_CHECK_DEFAULT /* 37 */ 75 - PGM_CHECK_64BIT(do_dat_exception) /* 38 */ 76 - PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 77 - PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 78 - PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 81 + PGM_CHECK(do_dat_exception) /* 38 */ 82 + PGM_CHECK(do_dat_exception) /* 39 */ 83 + PGM_CHECK(do_dat_exception) /* 3a */ 84 + PGM_CHECK(do_dat_exception) /* 3b */ 79 85 PGM_CHECK_DEFAULT /* 3c */ 80 86 PGM_CHECK_DEFAULT /* 3d */ 81 87 PGM_CHECK_DEFAULT /* 3e */
+1 -28
arch/s390/kernel/process.c
··· 79 79 { 80 80 } 81 81 82 - #ifdef CONFIG_64BIT 83 82 void arch_release_task_struct(struct task_struct *tsk) 84 83 { 85 84 if (tsk->thread.vxrs) 86 85 kfree(tsk->thread.vxrs); 87 86 } 88 - #endif 89 87 90 88 int copy_thread(unsigned long clone_flags, unsigned long new_stackp, 91 89 unsigned long arg, struct task_struct *p) ··· 142 144 p->thread.ri_signum = 0; 143 145 frame->childregs.psw.mask &= ~PSW_MASK_RI; 144 146 145 - #ifndef CONFIG_64BIT 146 - /* 147 - * save fprs to current->thread.fp_regs to merge them with 148 - * the emulated registers and then copy the result to the child. 149 - */ 150 - save_fp_ctl(&current->thread.fp_regs.fpc); 151 - save_fp_regs(current->thread.fp_regs.fprs); 152 - memcpy(&p->thread.fp_regs, &current->thread.fp_regs, 153 - sizeof(s390_fp_regs)); 154 - /* Set a new TLS ? */ 155 - if (clone_flags & CLONE_SETTLS) 156 - p->thread.acrs[0] = frame->childregs.gprs[6]; 157 - #else /* CONFIG_64BIT */ 158 147 /* Save the fpu registers to new thread structure. */ 159 148 save_fp_ctl(&p->thread.fp_regs.fpc); 160 149 save_fp_regs(p->thread.fp_regs.fprs); ··· 157 172 p->thread.acrs[1] = (unsigned int)tls; 158 173 } 159 174 } 160 - #endif /* CONFIG_64BIT */ 161 175 return 0; 162 176 } 163 177 164 178 asmlinkage void execve_tail(void) 165 179 { 166 180 current->thread.fp_regs.fpc = 0; 167 - if (MACHINE_HAS_IEEE) 168 - asm volatile("sfpc %0,%0" : : "d" (0)); 181 + asm volatile("sfpc %0,%0" : : "d" (0)); 169 182 } 170 183 171 184 /* ··· 171 188 */ 172 189 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 173 190 { 174 - #ifndef CONFIG_64BIT 175 - /* 176 - * save fprs to current->thread.fp_regs to merge them with 177 - * the emulated registers and then copy the result to the dump. 178 - */ 179 - save_fp_ctl(&current->thread.fp_regs.fpc); 180 - save_fp_regs(current->thread.fp_regs.fprs); 181 - memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs)); 182 - #else /* CONFIG_64BIT */ 183 191 save_fp_ctl(&fpregs->fpc); 184 192 save_fp_regs(fpregs->fprs); 185 - #endif /* CONFIG_64BIT */ 186 193 return 1; 187 194 } 188 195 EXPORT_SYMBOL(dump_fpu);
+5 -41
arch/s390/kernel/ptrace.c
··· 44 44 struct thread_struct *thread = &task->thread; 45 45 struct per_regs old, new; 46 46 47 - #ifdef CONFIG_64BIT 48 47 /* Take care of the enable/disable of transactional execution. */ 49 48 if (MACHINE_HAS_TE || MACHINE_HAS_VX) { 50 49 unsigned long cr, cr_new; ··· 79 80 __ctl_load(cr_new, 2, 2); 80 81 } 81 82 } 82 - #endif 83 83 /* Copy user specified PER registers */ 84 84 new.control = thread->per_user.control; 85 85 new.start = thread->per_user.start; ··· 91 93 new.control |= PER_EVENT_BRANCH; 92 94 else 93 95 new.control |= PER_EVENT_IFETCH; 94 - #ifdef CONFIG_64BIT 95 96 new.control |= PER_CONTROL_SUSPENSION; 96 97 new.control |= PER_EVENT_TRANSACTION_END; 97 - #endif 98 98 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) 99 99 new.control |= PER_EVENT_IFETCH; 100 100 new.start = 0; ··· 142 146 task->thread.per_flags = 0; 143 147 } 144 148 145 - #ifndef CONFIG_64BIT 146 - # define __ADDR_MASK 3 147 - #else 148 - # define __ADDR_MASK 7 149 - #endif 149 + #define __ADDR_MASK 7 150 150 151 151 static inline unsigned long __peek_user_per(struct task_struct *child, 152 152 addr_t addr) ··· 215 223 * access registers are stored in the thread structure 216 224 */ 217 225 offset = addr - (addr_t) &dummy->regs.acrs; 218 - #ifdef CONFIG_64BIT 219 226 /* 220 227 * Very special case: old & broken 64 bit gdb reading 221 228 * from acrs[15]. Result is a 64 bit value. Read the ··· 223 232 if (addr == (addr_t) &dummy->regs.acrs[15]) 224 233 tmp = ((unsigned long) child->thread.acrs[15]) << 32; 225 234 else 226 - #endif 227 - tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); 235 + tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); 228 236 229 237 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 230 238 /* ··· 251 261 * or the child->thread.vxrs array 252 262 */ 253 263 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 254 - #ifdef CONFIG_64BIT 255 264 if (child->thread.vxrs) 256 265 tmp = *(addr_t *) 257 266 ((addr_t) child->thread.vxrs + 2*offset); 258 267 else 259 - #endif 260 268 tmp = *(addr_t *) 261 269 ((addr_t) &child->thread.fp_regs.fprs + offset); 262 270 ··· 281 293 * an alignment of 4. Programmers from hell... 282 294 */ 283 295 mask = __ADDR_MASK; 284 - #ifdef CONFIG_64BIT 285 296 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 286 297 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 287 298 mask = 3; 288 - #endif 289 299 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 290 300 return -EIO; 291 301 ··· 356 370 * access registers are stored in the thread structure 357 371 */ 358 372 offset = addr - (addr_t) &dummy->regs.acrs; 359 - #ifdef CONFIG_64BIT 360 373 /* 361 374 * Very special case: old & broken 64 bit gdb writing 362 375 * to acrs[15] with a 64 bit value. Ignore the lower ··· 365 380 if (addr == (addr_t) &dummy->regs.acrs[15]) 366 381 child->thread.acrs[15] = (unsigned int) (data >> 32); 367 382 else 368 - #endif 369 - *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; 383 + *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; 370 384 371 385 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { 372 386 /* ··· 395 411 * or the child->thread.vxrs array 396 412 */ 397 413 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; 398 - #ifdef CONFIG_64BIT 399 414 if (child->thread.vxrs) 400 415 *(addr_t *)((addr_t) 401 416 child->thread.vxrs + 2*offset) = data; 402 417 else 403 - #endif 404 418 *(addr_t *)((addr_t) 405 419 &child->thread.fp_regs.fprs + offset) = data; 406 420 ··· 423 441 * an alignment of 4. Programmers from hell indeed... 424 442 */ 425 443 mask = __ADDR_MASK; 426 - #ifdef CONFIG_64BIT 427 444 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && 428 445 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) 429 446 mask = 3; 430 - #endif 431 447 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 432 448 return -EIO; 433 449 ··· 629 649 * or the child->thread.vxrs array 630 650 */ 631 651 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 632 - #ifdef CONFIG_64BIT 633 652 if (child->thread.vxrs) 634 653 tmp = *(__u32 *) 635 654 ((addr_t) child->thread.vxrs + 2*offset); 636 655 else 637 - #endif 638 656 tmp = *(__u32 *) 639 657 ((addr_t) &child->thread.fp_regs.fprs + offset); 640 658 ··· 754 776 * or the child->thread.vxrs array 755 777 */ 756 778 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; 757 - #ifdef CONFIG_64BIT 758 779 if (child->thread.vxrs) 759 780 *(__u32 *)((addr_t) 760 781 child->thread.vxrs + 2*offset) = tmp; 761 782 else 762 - #endif 763 783 *(__u32 *)((addr_t) 764 784 &child->thread.fp_regs.fprs + offset) = tmp; 765 785 ··· 955 979 if (target == current) { 956 980 save_fp_ctl(&target->thread.fp_regs.fpc); 957 981 save_fp_regs(target->thread.fp_regs.fprs); 958 - } 959 - #ifdef CONFIG_64BIT 960 - else if (target->thread.vxrs) { 982 + } else if (target->thread.vxrs) { 961 983 int i; 962 984 963 985 for (i = 0; i < __NUM_VXRS_LOW; i++) 964 986 target->thread.fp_regs.fprs[i] = 965 987 *(freg_t *)(target->thread.vxrs + i); 966 988 } 967 - #endif 968 989 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 969 990 &target->thread.fp_regs, 0, -1); 970 991 } ··· 999 1026 if (target == current) { 1000 1027 restore_fp_ctl(&target->thread.fp_regs.fpc); 1001 1028 restore_fp_regs(target->thread.fp_regs.fprs); 1002 - } 1003 - #ifdef CONFIG_64BIT 1004 - else if (target->thread.vxrs) { 1029 + } else if (target->thread.vxrs) { 1005 1030 int i; 1006 1031 1007 1032 for (i = 0; i < __NUM_VXRS_LOW; i++) 1008 1033 *(freg_t *)(target->thread.vxrs + i) = 1009 1034 target->thread.fp_regs.fprs[i]; 1010 1035 } 1011 - #endif 1012 1036 } 1013 1037 1014 1038 return rc; 1015 1039 } 1016 - 1017 - #ifdef CONFIG_64BIT 1018 1040 1019 1041 static int s390_last_break_get(struct task_struct *target, 1020 1042 const struct user_regset *regset, ··· 1150 1182 return rc; 1151 1183 } 1152 1184 1153 - #endif 1154 - 1155 1185 static int s390_system_call_get(struct task_struct *target, 1156 1186 const struct user_regset *regset, 1157 1187 unsigned int pos, unsigned int count, ··· 1195 1229 .get = s390_system_call_get, 1196 1230 .set = s390_system_call_set, 1197 1231 }, 1198 - #ifdef CONFIG_64BIT 1199 1232 { 1200 1233 .core_note_type = NT_S390_LAST_BREAK, 1201 1234 .n = 1, ··· 1227 1262 .get = s390_vxrs_high_get, 1228 1263 .set = s390_vxrs_high_set, 1229 1264 }, 1230 - #endif 1231 1265 }; 1232 1266 1233 1267 static const struct user_regset_view user_s390_view = {
-92
arch/s390/kernel/reipl.S
··· 1 - /* 2 - * S390 version 3 - * Copyright IBM Corp. 2000 4 - * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 5 - */ 6 - 7 - #include <linux/linkage.h> 8 - #include <asm/asm-offsets.h> 9 - #include <asm/sigp.h> 10 - 11 - # 12 - # store_status: Empty implementation until kdump is supported on 31 bit 13 - # 14 - ENTRY(store_status) 15 - br %r14 16 - 17 - # 18 - # do_reipl_asm 19 - # Parameter: r2 = schid of reipl device 20 - # 21 - ENTRY(do_reipl_asm) 22 - basr %r13,0 23 - .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) 24 - .Lpg1: # do store status of all registers 25 - 26 - stm %r0,%r15,__LC_GPREGS_SAVE_AREA 27 - stctl %c0,%c15,__LC_CREGS_SAVE_AREA 28 - stam %a0,%a15,__LC_AREGS_SAVE_AREA 29 - l %r10,.Ldump_pfx-.Lpg0(%r13) 30 - mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) 31 - stckc .Lclkcmp-.Lpg0(%r13) 32 - mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) 33 - stpt __LC_CPU_TIMER_SAVE_AREA 34 - st %r13, __LC_PSW_SAVE_AREA+4 35 - lctl %c6,%c6,.Lall-.Lpg0(%r13) 36 - lr %r1,%r2 37 - mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) 38 - stsch .Lschib-.Lpg0(%r13) 39 - oi .Lschib+5-.Lpg0(%r13),0x84 40 - .Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 41 - msch .Lschib-.Lpg0(%r13) 42 - lhi %r0,5 43 - .Lssch: ssch .Liplorb-.Lpg0(%r13) 44 - jz .L001 45 - brct %r0,.Lssch 46 - bas %r14,.Ldisab-.Lpg0(%r13) 47 - .L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13) 48 - .Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13) 49 - .Lcont: c %r1,__LC_SUBCHANNEL_ID 50 - jnz .Ltpi 51 - clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) 52 - jnz .Ltpi 53 - tsch .Liplirb-.Lpg0(%r13) 54 - tm .Liplirb+9-.Lpg0(%r13),0xbf 55 - jz .L002 56 - bas %r14,.Ldisab-.Lpg0(%r13) 57 - .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 58 - jz .L003 59 - bas %r14,.Ldisab-.Lpg0(%r13) 60 - .L003: st %r1,__LC_SUBCHANNEL_ID 61 - lpsw 0 62 - sigp 0,0,SIGP_RESTART 63 - .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) 64 - lpsw .Ldispsw-.Lpg0(%r13) 65 - .align 8 66 - .Lclkcmp: .quad 0x0000000000000000 67 - .Lall: .long 0xff000000 68 - .Ldump_pfx: .long dump_prefix_page 69 - .align 8 70 - .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 71 - .Lpcnew: .long 0x00080000,0x80000000+.Lecs 72 - .Lionew: .long 0x00080000,0x80000000+.Lcont 73 - .Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi 74 - .Ldispsw: .long 0x000a0000,0x00000000 75 - .Liplccws: .long 0x02000000,0x60000018 76 - .long 0x08000008,0x20000001 77 - .Liplorb: .long 0x0049504c,0x0040ff80 78 - .long 0x00000000+.Liplccws 79 - .Lschib: .long 0x00000000,0x00000000 80 - .long 0x00000000,0x00000000 81 - .long 0x00000000,0x00000000 82 - .long 0x00000000,0x00000000 83 - .long 0x00000000,0x00000000 84 - .long 0x00000000,0x00000000 85 - .Liplirb: .long 0x00000000,0x00000000 86 - .long 0x00000000,0x00000000 87 - .long 0x00000000,0x00000000 88 - .long 0x00000000,0x00000000 89 - .long 0x00000000,0x00000000 90 - .long 0x00000000,0x00000000 91 - .long 0x00000000,0x00000000 92 - .long 0x00000000,0x00000000
-118
arch/s390/kernel/relocate_kernel.S
··· 1 - /* 2 - * Copyright IBM Corp. 2005 3 - * 4 - * Author(s): Rolf Adelsberger, 5 - * Heiko Carstens <heiko.carstens@de.ibm.com> 6 - * 7 - */ 8 - 9 - #include <linux/linkage.h> 10 - #include <asm/sigp.h> 11 - 12 - /* 13 - * moves the new kernel to its destination... 14 - * %r2 = pointer to first kimage_entry_t 15 - * %r3 = start address - where to jump to after the job is done... 16 - * 17 - * %r5 will be used as temp. storage 18 - * %r6 holds the destination address 19 - * %r7 = PAGE_SIZE 20 - * %r8 holds the source address 21 - * %r9 = PAGE_SIZE 22 - * %r10 is a page mask 23 - */ 24 - 25 - .text 26 - ENTRY(relocate_kernel) 27 - basr %r13,0 # base address 28 - .base: 29 - stnsm sys_msk-.base(%r13),0xfb # disable DAT 30 - stctl %c0,%c15,ctlregs-.base(%r13) 31 - stm %r0,%r15,gprregs-.base(%r13) 32 - la %r1,load_psw-.base(%r13) 33 - mvc 0(8,%r0),0(%r1) 34 - la %r0,.back-.base(%r13) 35 - st %r0,4(%r0) 36 - oi 4(%r0),0x80 37 - mvc 0x68(8,%r0),0(%r1) 38 - la %r0,.back_pgm-.base(%r13) 39 - st %r0,0x6c(%r0) 40 - oi 0x6c(%r0),0x80 41 - lhi %r0,0 42 - diag %r0,%r0,0x308 43 - .back: 44 - basr %r13,0 45 - .back_base: 46 - oi have_diag308-.back_base(%r13),0x01 47 - lctl %c0,%c15,ctlregs-.back_base(%r13) 48 - lm %r0,%r15,gprregs-.back_base(%r13) 49 - j .start_reloc 50 - .back_pgm: 51 - lm %r0,%r15,gprregs-.base(%r13) 52 - .start_reloc: 53 - lhi %r10,-1 # preparing the mask 54 - sll %r10,12 # shift it such that it becomes 0xf000 55 - .top: 56 - lhi %r7,4096 # load PAGE_SIZE in r7 57 - lhi %r9,4096 # load PAGE_SIZE in r9 58 - l %r5,0(%r2) # read another word for indirection page 59 - ahi %r2,4 # increment pointer 60 - tml %r5,0x1 # is it a destination page? 61 - je .indir_check # NO, goto "indir_check" 62 - lr %r6,%r5 # r6 = r5 63 - nr %r6,%r10 # mask it out and... 64 - j .top # ...next iteration 65 - .indir_check: 66 - tml %r5,0x2 # is it a indirection page? 67 - je .done_test # NO, goto "done_test" 68 - nr %r5,%r10 # YES, mask out, 69 - lr %r2,%r5 # move it into the right register, 70 - j .top # and read next... 71 - .done_test: 72 - tml %r5,0x4 # is it the done indicator? 73 - je .source_test # NO! Well, then it should be the source indicator... 74 - j .done # ok, lets finish it here... 75 - .source_test: 76 - tml %r5,0x8 # it should be a source indicator... 77 - je .top # NO, ignore it... 78 - lr %r8,%r5 # r8 = r5 79 - nr %r8,%r10 # masking 80 - 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 81 - jo 0b 82 - j .top 83 - .done: 84 - sr %r0,%r0 # clear register r0 85 - la %r4,load_psw-.base(%r13) # load psw-address into the register 86 - o %r3,4(%r4) # or load address into psw 87 - st %r3,4(%r4) 88 - mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 89 - tm have_diag308-.base(%r13),0x01 90 - jno .no_diag308 91 - diag %r0,%r0,0x308 92 - .no_diag308: 93 - sr %r1,%r1 # clear %r1 94 - sr %r2,%r2 # clear %r2 95 - sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero 96 - lpsw 0 # hopefully start new kernel... 97 - 98 - .align 8 99 - load_psw: 100 - .long 0x00080000,0x80000000 101 - sys_msk: 102 - .quad 0 103 - ctlregs: 104 - .rept 16 105 - .long 0 106 - .endr 107 - gprregs: 108 - .rept 16 109 - .long 0 110 - .endr 111 - have_diag308: 112 - .byte 0 113 - .align 8 114 - relocate_kernel_end: 115 - .align 8 116 - .globl relocate_kernel_len 117 - relocate_kernel_len: 118 - .quad relocate_kernel_end - relocate_kernel
-10
arch/s390/kernel/sclp.S
··· 36 36 ahi %r15,-96 # create stack frame 37 37 la %r8,LC_EXT_NEW_PSW # register int handler 38 38 la %r9,.LextpswS1-.LbaseS1(%r13) 39 - #ifdef CONFIG_64BIT 40 39 tm LC_AR_MODE_ID,1 41 40 jno .Lesa1 42 41 la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit 43 42 la %r9,.LextpswS1_64-.LbaseS1(%r13) 44 43 .Lesa1: 45 - #endif 46 44 mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8) 47 45 mvc 0(16,%r8),0(%r9) 48 - #ifdef CONFIG_64BIT 49 46 epsw %r6,%r7 # set current addressing mode 50 47 nill %r6,0x1 # in new psw (31 or 64 bit mode) 51 48 nilh %r7,0x8000 52 49 stm %r6,%r7,0(%r8) 53 - #endif 54 50 lhi %r6,0x0200 # cr mask for ext int (cr0.54) 55 51 ltr %r2,%r2 56 52 jz .LsetctS1 ··· 88 92 .long 0, 0, 0, 0 # old ext int PSW 89 93 .LextpswS1: 90 94 .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 91 - #ifdef CONFIG_64BIT 92 95 .LextpswS1_64: 93 96 .quad 0, .LwaitS1 # PSW to handle ext int, 64 bit 94 - #endif 95 97 .LwaitpswS1: 96 98 .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 97 99 .LtimeS1: ··· 266 272 ENTRY(_sclp_print_early) 267 273 stm %r6,%r15,24(%r15) # save registers 268 274 ahi %r15,-96 # create stack frame 269 - #ifdef CONFIG_64BIT 270 275 tm LC_AR_MODE_ID,1 271 276 jno .Lesa2 272 277 ahi %r15,-80 273 278 stmh %r6,%r15,96(%r15) # store upper register halves 274 279 .Lesa2: 275 - #endif 276 280 lr %r10,%r2 # save string pointer 277 281 lhi %r2,0 278 282 bras %r14,_sclp_setup # enable console ··· 283 291 lhi %r2,1 284 292 bras %r14,_sclp_setup # disable console 285 293 .LendS5: 286 - #ifdef CONFIG_64BIT 287 294 tm LC_AR_MODE_ID,1 288 295 jno .Lesa3 289 296 lgfr %r2,%r2 # sign extend return value 290 297 lmh %r6,%r15,96(%r15) # restore upper register halves 291 298 ahi %r15,80 292 299 .Lesa3: 293 - #endif 294 300 lm %r6,%r15,120(%r15) # restore registers 295 301 br %r14 296 302
-72
arch/s390/kernel/setup.c
··· 92 92 struct page *vmemmap; 93 93 EXPORT_SYMBOL(vmemmap); 94 94 95 - #ifdef CONFIG_64BIT 96 95 unsigned long MODULES_VADDR; 97 96 unsigned long MODULES_END; 98 - #endif 99 97 100 98 /* An array with a pointer to the lowcore of every CPU. */ 101 99 struct _lowcore *lowcore_ptr[NR_CPUS]; ··· 332 334 lc->stfl_fac_list = S390_lowcore.stfl_fac_list; 333 335 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 334 336 MAX_FACILITY_BIT/8); 335 - #ifndef CONFIG_64BIT 336 - if (MACHINE_HAS_IEEE) { 337 - lc->extended_save_area_addr = (__u32) 338 - __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); 339 - /* enable extended save area */ 340 - __ctl_set_bit(14, 29); 341 - } 342 - #else 343 337 if (MACHINE_HAS_VX) 344 338 lc->vector_save_area_addr = 345 339 (unsigned long) &lc->vector_save_area; 346 340 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 347 - #endif 348 341 lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 349 342 lc->async_enter_timer = S390_lowcore.async_enter_timer; 350 343 lc->exit_timer = S390_lowcore.exit_timer; ··· 439 450 unsigned long vmax, vmalloc_size, tmp; 440 451 441 452 /* Choose kernel address space layout: 2, 3, or 4 levels. */ 442 - #ifdef CONFIG_64BIT 443 453 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 444 454 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 445 455 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); ··· 450 462 MODULES_END = vmax; 451 463 MODULES_VADDR = MODULES_END - MODULES_LEN; 452 464 VMALLOC_END = MODULES_VADDR; 453 - #else 454 - vmalloc_size = VMALLOC_END ?: 96UL << 20; 455 - vmax = 1UL << 31; /* 2-level kernel page table */ 456 - /* vmalloc area is at the end of the kernel address space. */ 457 - VMALLOC_END = vmax; 458 - #endif 459 465 VMALLOC_START = vmax - vmalloc_size; 460 466 461 467 /* Split remaining virtual space between 1:1 mapping & vmemmap array */ ··· 736 754 if (MACHINE_HAS_HPAGE) 737 755 elf_hwcap |= HWCAP_S390_HPAGE; 738 756 739 - #if defined(CONFIG_64BIT) 740 757 /* 741 758 * 64-bit register support for 31-bit processes 742 759 * HWCAP_S390_HIGH_GPRS is bit 9. ··· 753 772 */ 754 773 if (test_facility(129)) 755 774 elf_hwcap |= HWCAP_S390_VXRS; 756 - #endif 757 - 758 775 get_cpu_id(&cpu_id); 759 776 add_device_randomness(&cpu_id, sizeof(cpu_id)); 760 777 switch (cpu_id.machine) { 761 778 case 0x9672: 762 - #if !defined(CONFIG_64BIT) 763 - default: /* Use "g5" as default for 31 bit kernels. */ 764 - #endif 765 779 strcpy(elf_platform, "g5"); 766 780 break; 767 781 case 0x2064: 768 782 case 0x2066: 769 - #if defined(CONFIG_64BIT) 770 783 default: /* Use "z900" as default for 64 bit kernels. */ 771 - #endif 772 784 strcpy(elf_platform, "z900"); 773 785 break; 774 786 case 0x2084: ··· 813 839 /* 814 840 * print what head.S has found out about the machine 815 841 */ 816 - #ifndef CONFIG_64BIT 817 - if (MACHINE_IS_VM) 818 - pr_info("Linux is running as a z/VM " 819 - "guest operating system in 31-bit mode\n"); 820 - else if (MACHINE_IS_LPAR) 821 - pr_info("Linux is running natively in 31-bit mode\n"); 822 - if (MACHINE_HAS_IEEE) 823 - pr_info("The hardware system has IEEE compatible " 824 - "floating point units\n"); 825 - else 826 - pr_info("The hardware system has no IEEE compatible " 827 - "floating point units\n"); 828 - #else /* CONFIG_64BIT */ 829 842 if (MACHINE_IS_VM) 830 843 pr_info("Linux is running as a z/VM " 831 844 "guest operating system in 64-bit mode\n"); ··· 820 859 pr_info("Linux is running under KVM in 64-bit mode\n"); 821 860 else if (MACHINE_IS_LPAR) 822 861 pr_info("Linux is running natively in 64-bit mode\n"); 823 - #endif /* CONFIG_64BIT */ 824 862 825 863 /* Have one command line that is parsed and saved in /proc/cmdline */ 826 864 /* boot_command_line has been already set up in early.c */ ··· 890 930 /* Add system specific data to the random pool */ 891 931 setup_randomness(); 892 932 } 893 - 894 - #ifdef CONFIG_32BIT 895 - static int no_removal_warning __initdata; 896 - 897 - static int __init parse_no_removal_warning(char *str) 898 - { 899 - no_removal_warning = 1; 900 - return 0; 901 - } 902 - __setup("no_removal_warning", parse_no_removal_warning); 903 - 904 - static int __init removal_warning(void) 905 - { 906 - if (no_removal_warning) 907 - return 0; 908 - printk(KERN_ALERT "\n\n"); 909 - printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n"); 910 - printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n"); 911 - printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n"); 912 - printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n"); 913 - printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n"); 914 - printk(KERN_CONT "please let us know. Please write to:\n"); 915 - printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n"); 916 - printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n"); 917 - printk(KERN_CONT "Thank you!\n\n"); 918 - printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n"); 919 - printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n"); 920 - schedule_timeout_uninterruptible(300 * HZ); 921 - return 0; 922 - } 923 - early_initcall(removal_warning); 924 - #endif
-10
arch/s390/kernel/signal.c
··· 106 106 { 107 107 save_access_regs(current->thread.acrs); 108 108 save_fp_ctl(&current->thread.fp_regs.fpc); 109 - #ifdef CONFIG_64BIT 110 109 if (current->thread.vxrs) { 111 110 int i; 112 111 ··· 114 115 current->thread.fp_regs.fprs[i] = 115 116 *(freg_t *)(current->thread.vxrs + i); 116 117 } else 117 - #endif 118 118 save_fp_regs(current->thread.fp_regs.fprs); 119 119 } 120 120 ··· 122 124 { 123 125 restore_access_regs(current->thread.acrs); 124 126 /* restore_fp_ctl is done in restore_sigregs */ 125 - #ifdef CONFIG_64BIT 126 127 if (current->thread.vxrs) { 127 128 int i; 128 129 ··· 130 133 current->thread.fp_regs.fprs[i]; 131 134 restore_vx_regs(current->thread.vxrs); 132 135 } else 133 - #endif 134 136 restore_fp_regs(current->thread.fp_regs.fprs); 135 137 } 136 138 ··· 196 200 static int save_sigregs_ext(struct pt_regs *regs, 197 201 _sigregs_ext __user *sregs_ext) 198 202 { 199 - #ifdef CONFIG_64BIT 200 203 __u64 vxrs[__NUM_VXRS_LOW]; 201 204 int i; 202 205 ··· 210 215 sizeof(sregs_ext->vxrs_high))) 211 216 return -EFAULT; 212 217 } 213 - #endif 214 218 return 0; 215 219 } 216 220 217 221 static int restore_sigregs_ext(struct pt_regs *regs, 218 222 _sigregs_ext __user *sregs_ext) 219 223 { 220 - #ifdef CONFIG_64BIT 221 224 __u64 vxrs[__NUM_VXRS_LOW]; 222 225 int i; 223 226 ··· 230 237 for (i = 0; i < __NUM_VXRS_LOW; i++) 231 238 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; 232 239 } 233 - #endif 234 240 return 0; 235 241 } 236 242 ··· 408 416 * included in the signal frame on a 31-bit system. 409 417 */ 410 418 uc_flags = 0; 411 - #ifdef CONFIG_64BIT 412 419 if (MACHINE_HAS_VX) { 413 420 frame_size += sizeof(_sigregs_ext); 414 421 if (current->thread.vxrs) 415 422 uc_flags |= UC_VXRS; 416 423 } 417 - #endif 418 424 frame = get_sigframe(&ksig->ka, regs, frame_size); 419 425 if (frame == (void __user *) -1UL) 420 426 return -EFAULT;
-33
arch/s390/kernel/smp.c
··· 198 198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; 199 199 lc->cpu_nr = cpu; 200 200 lc->spinlock_lockval = arch_spin_lockval(cpu); 201 - #ifndef CONFIG_64BIT 202 - if (MACHINE_HAS_IEEE) { 203 - lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); 204 - if (!lc->extended_save_area_addr) 205 - goto out; 206 - } 207 - #else 208 201 if (MACHINE_HAS_VX) 209 202 lc->vector_save_area_addr = 210 203 (unsigned long) &lc->vector_save_area; 211 204 if (vdso_alloc_per_cpu(lc)) 212 205 goto out; 213 - #endif 214 206 lowcore_ptr[cpu] = lc; 215 207 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 216 208 return 0; ··· 221 229 { 222 230 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 223 231 lowcore_ptr[pcpu - pcpu_devices] = NULL; 224 - #ifndef CONFIG_64BIT 225 - if (MACHINE_HAS_IEEE) { 226 - struct _lowcore *lc = pcpu->lowcore; 227 - 228 - free_page((unsigned long) lc->extended_save_area_addr); 229 - lc->extended_save_area_addr = 0; 230 - } 231 - #else 232 232 vdso_free_per_cpu(pcpu->lowcore); 233 - #endif 234 233 if (pcpu == &pcpu_devices[0]) 235 234 return; 236 235 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); ··· 474 491 { 475 492 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 476 493 } 477 - 478 - #ifndef CONFIG_64BIT 479 - /* 480 - * this function sends a 'purge tlb' signal to another CPU. 481 - */ 482 - static void smp_ptlb_callback(void *info) 483 - { 484 - __tlb_flush_local(); 485 - } 486 - 487 - void smp_ptlb_all(void) 488 - { 489 - on_each_cpu(smp_ptlb_callback, NULL, 1); 490 - } 491 - EXPORT_SYMBOL(smp_ptlb_all); 492 - #endif /* ! CONFIG_64BIT */ 493 494 494 495 /* 495 496 * this function sends a 'reschedule' IPI to another CPU.
-49
arch/s390/kernel/sys_s390.c
··· 76 76 return sys_ipc(call, first, second, third, ptr, third); 77 77 } 78 78 79 - #ifdef CONFIG_64BIT 80 79 SYSCALL_DEFINE1(s390_personality, unsigned int, personality) 81 80 { 82 81 unsigned int ret; ··· 89 90 90 91 return ret; 91 92 } 92 - #endif /* CONFIG_64BIT */ 93 - 94 - /* 95 - * Wrapper function for sys_fadvise64/fadvise64_64 96 - */ 97 - #ifndef CONFIG_64BIT 98 - 99 - SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low, 100 - size_t, len, int, advice) 101 - { 102 - return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low, 103 - len, advice); 104 - } 105 - 106 - struct fadvise64_64_args { 107 - int fd; 108 - long long offset; 109 - long long len; 110 - int advice; 111 - }; 112 - 113 - SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args) 114 - { 115 - struct fadvise64_64_args a; 116 - 117 - if ( copy_from_user(&a, args, sizeof(a)) ) 118 - return -EFAULT; 119 - return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); 120 - } 121 - 122 - /* 123 - * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last 124 - * 64 bit argument "len" is split into the upper and lower 32 bits. The 125 - * system call wrapper in the user space loads the value to %r6/%r7. 126 - * The code in entry.S keeps the values in %r2 - %r6 where they are and 127 - * stores %r7 to 96(%r15). But the standard C linkage requires that 128 - * the whole 64 bit value for len is stored on the stack and doesn't 129 - * use %r6 at all. So s390_fallocate has to convert the arguments from 130 - * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len 131 - * to 132 - * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len 133 - */ 134 - SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset, 135 - u32, len_high, u32, len_low) 136 - { 137 - return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); 138 - } 139 - #endif
+2 -151
arch/s390/kernel/traps.c
··· 26 26 27 27 static inline void __user *get_trap_ip(struct pt_regs *regs) 28 28 { 29 - #ifdef CONFIG_64BIT 30 29 unsigned long address; 31 30 32 31 if (regs->int_code & 0x200) ··· 34 35 address = regs->psw.addr; 35 36 return (void __user *) 36 37 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); 37 - #else 38 - return (void __user *) 39 - ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); 40 - #endif 41 38 } 42 39 43 40 static inline void report_user_fault(struct pt_regs *regs, int signr) ··· 148 153 "privileged operation") 149 154 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 150 155 "special operation exception") 151 - 152 - #ifdef CONFIG_64BIT 153 156 DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 154 157 "transaction constraint exception") 155 - #endif 156 158 157 159 static inline void do_fp_trap(struct pt_regs *regs, int fpc) 158 160 { ··· 203 211 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) { 204 212 is_uprobe_insn = 1; 205 213 #endif 206 - #ifdef CONFIG_MATHEMU 207 - } else if (opcode[0] == 0xb3) { 208 - if (get_user(*((__u16 *) (opcode+2)), location+1)) 209 - return; 210 - signal = math_emu_b3(opcode, regs); 211 - } else if (opcode[0] == 0xed) { 212 - if (get_user(*((__u32 *) (opcode+2)), 213 - (__u32 __user *)(location+1))) 214 - return; 215 - signal = math_emu_ed(opcode, regs); 216 - } else if (*((__u16 *) opcode) == 0xb299) { 217 - if (get_user(*((__u16 *) (opcode+2)), location+1)) 218 - return; 219 - signal = math_emu_srnm(opcode, regs); 220 - } else if (*((__u16 *) opcode) == 0xb29c) { 221 - if (get_user(*((__u16 *) (opcode+2)), location+1)) 222 - return; 223 - signal = math_emu_stfpc(opcode, regs); 224 - } else if (*((__u16 *) opcode) == 0xb29d) { 225 - if (get_user(*((__u16 *) (opcode+2)), location+1)) 226 - return; 227 - signal = math_emu_lfpc(opcode, regs); 228 - #endif 229 214 } else 230 215 signal = SIGILL; 231 216 } ··· 216 247 3, SIGTRAP) != NOTIFY_STOP) 217 248 signal = SIGILL; 218 249 } 219 - 220 - #ifdef CONFIG_MATHEMU 221 - if (signal == SIGFPE) 222 - do_fp_trap(regs, current->thread.fp_regs.fpc); 223 - else if (signal == SIGSEGV) 224 - do_trap(regs, signal, SEGV_MAPERR, "user address fault"); 225 - else 226 - #endif 227 250 if (signal) 228 251 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 229 252 } 230 253 NOKPROBE_SYMBOL(illegal_op); 231 254 232 - #ifdef CONFIG_MATHEMU 233 - void specification_exception(struct pt_regs *regs) 234 - { 235 - __u8 opcode[6]; 236 - __u16 __user *location = NULL; 237 - int signal = 0; 238 - 239 - location = (__u16 __user *) get_trap_ip(regs); 240 - 241 - if (user_mode(regs)) { 242 - get_user(*((__u16 *) opcode), location); 243 - switch (opcode[0]) { 244 - case 0x28: /* LDR Rx,Ry */ 245 - signal = math_emu_ldr(opcode); 246 - break; 247 - case 0x38: /* LER Rx,Ry */ 248 - signal = math_emu_ler(opcode); 249 - break; 250 - case 0x60: /* STD R,D(X,B) */ 251 - get_user(*((__u16 *) (opcode+2)), location+1); 252 - signal = math_emu_std(opcode, regs); 253 - break; 254 - case 0x68: /* LD R,D(X,B) */ 255 - get_user(*((__u16 *) (opcode+2)), location+1); 256 - signal = math_emu_ld(opcode, regs); 257 - break; 258 - case 0x70: /* STE R,D(X,B) */ 259 - get_user(*((__u16 *) (opcode+2)), location+1); 260 - signal = math_emu_ste(opcode, regs); 261 - break; 262 - case 0x78: /* LE R,D(X,B) */ 263 - get_user(*((__u16 *) (opcode+2)), location+1); 264 - signal = math_emu_le(opcode, regs); 265 - break; 266 - default: 267 - signal = SIGILL; 268 - break; 269 - } 270 - } else 271 - signal = SIGILL; 272 - 273 - if (signal == SIGFPE) 274 - do_fp_trap(regs, current->thread.fp_regs.fpc); 275 - else if (signal) 276 - do_trap(regs, signal, ILL_ILLOPN, "specification exception"); 277 - } 278 - #else 279 255 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, 280 256 "specification exception"); 281 - #endif 282 257 283 - #ifdef CONFIG_64BIT 284 258 int alloc_vector_registers(struct task_struct *tsk) 285 259 { 286 260 __vector128 *vxrs; ··· 289 377 return 1; 290 378 } 291 379 __setup("novx", disable_vector_extension); 292 - #endif 293 380 294 381 void data_exception(struct pt_regs *regs) 295 382 { ··· 297 386 298 387 location = get_trap_ip(regs); 299 388 300 - if (MACHINE_HAS_IEEE) 301 - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 302 - 303 - #ifdef CONFIG_MATHEMU 304 - else if (user_mode(regs)) { 305 - __u8 opcode[6]; 306 - get_user(*((__u16 *) opcode), location); 307 - switch (opcode[0]) { 308 - case 0x28: /* LDR Rx,Ry */ 309 - signal = math_emu_ldr(opcode); 310 - break; 311 - case 0x38: /* LER Rx,Ry */ 312 - signal = math_emu_ler(opcode); 313 - break; 314 - case 0x60: /* STD R,D(X,B) */ 315 - get_user(*((__u16 *) (opcode+2)), location+1); 316 - signal = math_emu_std(opcode, regs); 317 - break; 318 - case 0x68: /* LD R,D(X,B) */ 319 - get_user(*((__u16 *) (opcode+2)), location+1); 320 - signal = math_emu_ld(opcode, regs); 321 - break; 322 - case 0x70: /* STE R,D(X,B) */ 323 - get_user(*((__u16 *) (opcode+2)), location+1); 324 - signal = math_emu_ste(opcode, regs); 325 - break; 326 - case 0x78: /* LE R,D(X,B) */ 327 - get_user(*((__u16 *) (opcode+2)), location+1); 328 - signal = math_emu_le(opcode, regs); 329 - break; 330 - case 0xb3: 331 - get_user(*((__u16 *) (opcode+2)), location+1); 332 - signal = math_emu_b3(opcode, regs); 333 - break; 334 - case 0xed: 335 - get_user(*((__u32 *) (opcode+2)), 336 - (__u32 __user *)(location+1)); 337 - signal = math_emu_ed(opcode, regs); 338 - break; 339 - case 0xb2: 340 - if (opcode[1] == 0x99) { 341 - get_user(*((__u16 *) (opcode+2)), location+1); 342 - signal = math_emu_srnm(opcode, regs); 343 - } else if (opcode[1] == 0x9c) { 344 - get_user(*((__u16 *) (opcode+2)), location+1); 345 - signal = math_emu_stfpc(opcode, regs); 346 - } else if (opcode[1] == 0x9d) { 347 - get_user(*((__u16 *) (opcode+2)), location+1); 348 - signal = math_emu_lfpc(opcode, regs); 349 - } else 350 - signal = SIGILL; 351 - break; 352 - default: 353 - signal = SIGILL; 354 - break; 355 - } 356 - } 357 - #endif 358 - #ifdef CONFIG_64BIT 389 + asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 359 390 /* Check for vector register enablement */ 360 391 if (MACHINE_HAS_VX && !current->thread.vxrs && 361 392 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { ··· 307 454 clear_pt_regs_flag(regs, PIF_PER_TRAP); 308 455 return; 309 456 } 310 - #endif 311 - 312 457 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) 313 458 signal = SIGFPE; 314 459 else 315 460 signal = SIGILL; 316 - if (signal == SIGFPE) 461 + if (signal == SIGFPE) 317 462 do_fp_trap(regs, current->thread.fp_regs.fpc); 318 463 else if (signal) 319 464 do_trap(regs, signal, ILL_ILLOPN, "data exception");
+2 -14
arch/s390/kernel/vdso.c
··· 32 32 #include <asm/vdso.h> 33 33 #include <asm/facility.h> 34 34 35 - #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 35 + #ifdef CONFIG_COMPAT 36 36 extern char vdso32_start, vdso32_end; 37 37 static void *vdso32_kbase = &vdso32_start; 38 38 static unsigned int vdso32_pages; 39 39 static struct page **vdso32_pagelist; 40 40 #endif 41 41 42 - #ifdef CONFIG_64BIT 43 42 extern char vdso64_start, vdso64_end; 44 43 static void *vdso64_kbase = &vdso64_start; 45 44 static unsigned int vdso64_pages; 46 45 static struct page **vdso64_pagelist; 47 - #endif /* CONFIG_64BIT */ 48 46 49 47 /* 50 48 * Should the kernel map a VDSO page into processes and pass its ··· 85 87 vd->ectg_available = test_facility(31); 86 88 } 87 89 88 - #ifdef CONFIG_64BIT 89 90 /* 90 91 * Allocate/free per cpu vdso data. 91 92 */ ··· 166 169 cr5 = offsetof(struct _lowcore, paste); 167 170 __ctl_load(cr5, 5, 5); 168 171 } 169 - #endif /* CONFIG_64BIT */ 170 172 171 173 /* 172 174 * This is called from binfmt_elf, we create the special vma for the ··· 187 191 if (!uses_interp) 188 192 return 0; 189 193 190 - #ifdef CONFIG_64BIT 191 194 vdso_pagelist = vdso64_pagelist; 192 195 vdso_pages = vdso64_pages; 193 196 #ifdef CONFIG_COMPAT ··· 195 200 vdso_pages = vdso32_pages; 196 201 } 197 202 #endif 198 - #else 199 - vdso_pagelist = vdso32_pagelist; 200 - vdso_pages = vdso32_pages; 201 - #endif 202 - 203 203 /* 204 204 * vDSO has a problem and was disabled, just don't "enable" it for 205 205 * the process ··· 258 268 if (!vdso_enabled) 259 269 return 0; 260 270 vdso_init_data(vdso_data); 261 - #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) 271 + #ifdef CONFIG_COMPAT 262 272 /* Calculate the size of the 32 bit vDSO */ 263 273 vdso32_pages = ((&vdso32_end - &vdso32_start 264 274 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; ··· 277 287 vdso32_pagelist[vdso32_pages] = NULL; 278 288 #endif 279 289 280 - #ifdef CONFIG_64BIT 281 290 /* Calculate the size of the 64 bit vDSO */ 282 291 vdso64_pages = ((&vdso64_end - &vdso64_start 283 292 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; ··· 296 307 if (vdso_alloc_per_cpu(&S390_lowcore)) 297 308 BUG(); 298 309 vdso_init_cr5(); 299 - #endif /* CONFIG_64BIT */ 300 310 301 311 get_page(virt_to_page(vdso_data)); 302 312
-7
arch/s390/kernel/vmlinux.lds.S
··· 6 6 #include <asm/page.h> 7 7 #include <asm-generic/vmlinux.lds.h> 8 8 9 - #ifndef CONFIG_64BIT 10 - OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 11 - OUTPUT_ARCH(s390:31-bit) 12 - ENTRY(startup) 13 - jiffies = jiffies_64 + 4; 14 - #else 15 9 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 16 10 OUTPUT_ARCH(s390:64-bit) 17 11 ENTRY(startup) 18 12 jiffies = jiffies_64; 19 - #endif 20 13 21 14 PHDRS { 22 15 text PT_LOAD FLAGS(5); /* R_E */
+1 -2
arch/s390/lib/Makefile
··· 3 3 # 4 4 5 5 lib-y += delay.o string.o uaccess.o find.o 6 - obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o 7 - obj-$(CONFIG_64BIT) += mem64.o 6 + obj-y += mem64.o 8 7 lib-$(CONFIG_SMP) += spinlock.o 9 8 lib-$(CONFIG_KPROBES) += probes.o 10 9 lib-$(CONFIG_UPROBES) += probes.o
-147
arch/s390/lib/div64.c
··· 1 - /* 2 - * __div64_32 implementation for 31 bit. 3 - * 4 - * Copyright IBM Corp. 2006 5 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 - */ 7 - 8 - #include <linux/types.h> 9 - #include <linux/module.h> 10 - 11 - #ifdef CONFIG_MARCH_G5 12 - 13 - /* 14 - * Function to divide an unsigned 64 bit integer by an unsigned 15 - * 31 bit integer using signed 64/32 bit division. 16 - */ 17 - static uint32_t __div64_31(uint64_t *n, uint32_t base) 18 - { 19 - register uint32_t reg2 asm("2"); 20 - register uint32_t reg3 asm("3"); 21 - uint32_t *words = (uint32_t *) n; 22 - uint32_t tmp; 23 - 24 - /* Special case base==1, remainder = 0, quotient = n */ 25 - if (base == 1) 26 - return 0; 27 - /* 28 - * Special case base==0 will cause a fixed point divide exception 29 - * on the dr instruction and may not happen anyway. For the 30 - * following calculation we can assume base > 1. The first 31 - * signed 64 / 32 bit division with an upper half of 0 will 32 - * give the correct upper half of the 64 bit quotient. 33 - */ 34 - reg2 = 0UL; 35 - reg3 = words[0]; 36 - asm volatile( 37 - " dr %0,%2\n" 38 - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); 39 - words[0] = reg3; 40 - reg3 = words[1]; 41 - /* 42 - * To get the lower half of the 64 bit quotient and the 32 bit 43 - * remainder we have to use a little trick. Since we only have 44 - * a signed division the quotient can get too big. To avoid this 45 - * the 64 bit dividend is halved, then the signed division will 46 - * work. Afterwards the quotient and the remainder are doubled. 47 - * If the last bit of the dividend has been one the remainder 48 - * is increased by one then checked against the base. If the 49 - * remainder has overflown subtract base and increase the 50 - * quotient. Simple, no ? 51 - */ 52 - asm volatile( 53 - " nr %2,%1\n" 54 - " srdl %0,1\n" 55 - " dr %0,%3\n" 56 - " alr %0,%0\n" 57 - " alr %1,%1\n" 58 - " alr %0,%2\n" 59 - " clr %0,%3\n" 60 - " jl 0f\n" 61 - " slr %0,%3\n" 62 - " ahi %1,1\n" 63 - "0:\n" 64 - : "+d" (reg2), "+d" (reg3), "=d" (tmp) 65 - : "d" (base), "2" (1UL) : "cc" ); 66 - words[1] = reg3; 67 - return reg2; 68 - } 69 - 70 - /* 71 - * Function to divide an unsigned 64 bit integer by an unsigned 72 - * 32 bit integer using the unsigned 64/31 bit division. 73 - */ 74 - uint32_t __div64_32(uint64_t *n, uint32_t base) 75 - { 76 - uint32_t r; 77 - 78 - /* 79 - * If the most significant bit of base is set, divide n by 80 - * (base/2). That allows to use 64/31 bit division and gives a 81 - * good approximation of the result: n = (base/2)*q + r. The 82 - * result needs to be corrected with two simple transformations. 83 - * If base is already < 2^31-1 __div64_31 can be used directly. 84 - */ 85 - r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); 86 - if ((signed) base < 0) { 87 - uint64_t q = *n; 88 - /* 89 - * First transformation: 90 - * n = (base/2)*q + r 91 - * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r 92 - * Since r < (base/2), r + (base/2) < base. 93 - * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) 94 - * n = ((base/2)*2)*q1 + r1 with r1 < base. 95 - */ 96 - if (q & 1) 97 - r += base/2; 98 - q >>= 1; 99 - /* 100 - * Second transformation. ((base/2)*2) could have lost the 101 - * last bit. 102 - * n = ((base/2)*2)*q1 + r1 103 - * = base*q1 - ((base&1) ? q1 : 0) + r1 104 - */ 105 - if (base & 1) { 106 - int64_t rx = r - q; 107 - /* 108 - * base is >= 2^31. The worst case for the while 109 - * loop is n=2^64-1 base=2^31+1. That gives a 110 - * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since 111 - * base >= 2^31 the loop is finished after a maximum 112 - * of three iterations. 113 - */ 114 - while (rx < 0) { 115 - rx += base; 116 - q--; 117 - } 118 - r = rx; 119 - } 120 - *n = q; 121 - } 122 - return r; 123 - } 124 - 125 - #else /* MARCH_G5 */ 126 - 127 - uint32_t __div64_32(uint64_t *n, uint32_t base) 128 - { 129 - register uint32_t reg2 asm("2"); 130 - register uint32_t reg3 asm("3"); 131 - uint32_t *words = (uint32_t *) n; 132 - 133 - reg2 = 0UL; 134 - reg3 = words[0]; 135 - asm volatile( 136 - " dlr %0,%2\n" 137 - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); 138 - words[0] = reg3; 139 - reg3 = words[1]; 140 - asm volatile( 141 - " dlr %0,%2\n" 142 - : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); 143 - words[1] = reg3; 144 - return reg2; 145 - } 146 - 147 - #endif /* MARCH_G5 */
-92
arch/s390/lib/mem32.S
··· 1 - /* 2 - * String handling functions. 3 - * 4 - * Copyright IBM Corp. 2012 5 - */ 6 - 7 - #include <linux/linkage.h> 8 - 9 - /* 10 - * memset implementation 11 - * 12 - * This code corresponds to the C construct below. We do distinguish 13 - * between clearing (c == 0) and setting a memory array (c != 0) simply 14 - * because nearly all memset invocations in the kernel clear memory and 15 - * the xc instruction is preferred in such cases. 16 - * 17 - * void *memset(void *s, int c, size_t n) 18 - * { 19 - * if (likely(c == 0)) 20 - * return __builtin_memset(s, 0, n); 21 - * return __builtin_memset(s, c, n); 22 - * } 23 - */ 24 - ENTRY(memset) 25 - basr %r5,%r0 26 - .Lmemset_base: 27 - ltr %r4,%r4 28 - bzr %r14 29 - ltr %r3,%r3 30 - jnz .Lmemset_fill 31 - ahi %r4,-1 32 - lr %r3,%r4 33 - srl %r3,8 34 - ltr %r3,%r3 35 - lr %r1,%r2 36 - je .Lmemset_clear_rest 37 - .Lmemset_clear_loop: 38 - xc 0(256,%r1),0(%r1) 39 - la %r1,256(%r1) 40 - brct %r3,.Lmemset_clear_loop 41 - .Lmemset_clear_rest: 42 - ex %r4,.Lmemset_xc-.Lmemset_base(%r5) 43 - br %r14 44 - .Lmemset_fill: 45 - stc %r3,0(%r2) 46 - chi %r4,1 47 - lr %r1,%r2 48 - ber %r14 49 - ahi %r4,-2 50 - lr %r3,%r4 51 - srl %r3,8 52 - ltr %r3,%r3 53 - je .Lmemset_fill_rest 54 - .Lmemset_fill_loop: 55 - mvc 1(256,%r1),0(%r1) 56 - la %r1,256(%r1) 57 - brct %r3,.Lmemset_fill_loop 58 - .Lmemset_fill_rest: 59 - ex %r4,.Lmemset_mvc-.Lmemset_base(%r5) 60 - br %r14 61 - .Lmemset_xc: 62 - xc 0(1,%r1),0(%r1) 63 - .Lmemset_mvc: 64 - mvc 1(1,%r1),0(%r1) 65 - 66 - /* 67 - * memcpy implementation 68 - * 69 - * void *memcpy(void *dest, const void *src, size_t n) 70 - */ 71 - ENTRY(memcpy) 72 - basr %r5,%r0 73 - .Lmemcpy_base: 74 - ltr %r4,%r4 75 - bzr %r14 76 - ahi %r4,-1 77 - lr %r0,%r4 78 - srl %r0,8 79 - ltr %r0,%r0 80 - lr %r1,%r2 81 - jnz .Lmemcpy_loop 82 - .Lmemcpy_rest: 83 - ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5) 84 - br %r14 85 - .Lmemcpy_loop: 86 - mvc 0(256,%r1),0(%r3) 87 - la %r1,256(%r1) 88 - la %r3,256(%r3) 89 - brct %r0,.Lmemcpy_loop 90 - j .Lmemcpy_rest 91 - .Lmemcpy_mvc: 92 - mvc 0(1,%r1),0(%r3)
-78
arch/s390/lib/qrnnd.S
··· 1 - # S/390 __udiv_qrnnd 2 - 3 - #include <linux/linkage.h> 4 - 5 - # r2 : &__r 6 - # r3 : upper half of 64 bit word n 7 - # r4 : lower half of 64 bit word n 8 - # r5 : divisor d 9 - # the reminder r of the division is to be stored to &__r and 10 - # the quotient q is to be returned 11 - 12 - .text 13 - ENTRY(__udiv_qrnnd) 14 - st %r2,24(%r15) # store pointer to reminder for later 15 - lr %r0,%r3 # reload n 16 - lr %r1,%r4 17 - ltr %r2,%r5 # reload and test divisor 18 - jp 5f 19 - # divisor >= 0x80000000 20 - srdl %r0,2 # n/4 21 - srl %r2,1 # d/2 22 - slr %r1,%r2 # special case if last bit of d is set 23 - brc 3,0f # (n/4) div (n/2) can overflow by 1 24 - ahi %r0,-1 # trick: subtract n/2, then divide 25 - 0: dr %r0,%r2 # signed division 26 - ahi %r1,1 # trick part 2: add 1 to the quotient 27 - # now (n >> 2) = (d >> 1) * %r1 + %r0 28 - lhi %r3,1 29 - nr %r3,%r1 # test last bit of q 30 - jz 1f 31 - alr %r0,%r2 # add (d>>1) to r 32 - 1: srl %r1,1 # q >>= 1 33 - # now (n >> 2) = (d&-2) * %r1 + %r0 34 - lhi %r3,1 35 - nr %r3,%r5 # test last bit of d 36 - jz 2f 37 - slr %r0,%r1 # r -= q 38 - brc 3,2f # borrow ? 39 - alr %r0,%r5 # r += d 40 - ahi %r1,-1 41 - 2: # now (n >> 2) = d * %r1 + %r0 42 - alr %r1,%r1 # q <<= 1 43 - alr %r0,%r0 # r <<= 1 44 - brc 12,3f # overflow on r ? 45 - slr %r0,%r5 # r -= d 46 - ahi %r1,1 # q += 1 47 - 3: lhi %r3,2 48 - nr %r3,%r4 # test next to last bit of n 49 - jz 4f 50 - ahi %r0,1 # r += 1 51 - 4: clr %r0,%r5 # r >= d ? 52 - jl 6f 53 - slr %r0,%r5 # r -= d 54 - ahi %r1,1 # q += 1 55 - # now (n >> 1) = d * %r1 + %r0 56 - j 6f 57 - 5: # divisor < 0x80000000 58 - srdl %r0,1 59 - dr %r0,%r2 # signed division 60 - # now (n >> 1) = d * %r1 + %r0 61 - 6: alr %r1,%r1 # q <<= 1 62 - alr %r0,%r0 # r <<= 1 63 - brc 12,7f # overflow on r ? 64 - slr %r0,%r5 # r -= d 65 - ahi %r1,1 # q += 1 66 - 7: lhi %r3,1 67 - nr %r3,%r4 # isolate last bit of n 68 - alr %r0,%r3 # r += (n & 1) 69 - clr %r0,%r5 # r >= d ? 70 - jl 8f 71 - slr %r0,%r5 # r -= d 72 - ahi %r1,1 # q += 1 73 - 8: # now n = d * %r1 + %r0 74 - l %r2,24(%r15) 75 - st %r0,0(%r2) 76 - lr %r2,%r1 77 - br %r14 78 - .end __udiv_qrnnd
+61 -75
arch/s390/lib/uaccess.c
··· 15 15 #include <asm/mmu_context.h> 16 16 #include <asm/facility.h> 17 17 18 - #ifndef CONFIG_64BIT 19 - #define AHI "ahi" 20 - #define ALR "alr" 21 - #define CLR "clr" 22 - #define LHI "lhi" 23 - #define SLR "slr" 24 - #else 25 - #define AHI "aghi" 26 - #define ALR "algr" 27 - #define CLR "clgr" 28 - #define LHI "lghi" 29 - #define SLR "slgr" 30 - #endif 31 - 32 18 static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; 33 19 34 20 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, ··· 27 41 asm volatile( 28 42 "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" 29 43 "9: jz 7f\n" 30 - "1:"ALR" %0,%3\n" 31 - " "SLR" %1,%3\n" 32 - " "SLR" %2,%3\n" 44 + "1: algr %0,%3\n" 45 + " slgr %1,%3\n" 46 + " slgr %2,%3\n" 33 47 " j 0b\n" 34 48 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ 35 49 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ 36 - " "SLR" %4,%1\n" 37 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 50 + " slgr %4,%1\n" 51 + " clgr %0,%4\n" /* copy crosses next page boundary? */ 38 52 " jnh 4f\n" 39 53 "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" 40 - "10:"SLR" %0,%4\n" 41 - " "ALR" %2,%4\n" 42 - "4:"LHI" %4,-1\n" 43 - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ 54 + "10:slgr %0,%4\n" 55 + " algr %2,%4\n" 56 + "4: lghi %4,-1\n" 57 + " algr %4,%0\n" /* copy remaining size, subtract 1 */ 44 58 " bras %3,6f\n" /* memset loop */ 45 59 " xc 0(1,%2),0(%2)\n" 46 60 "5: xc 0(256,%2),0(%2)\n" 47 61 " la %2,256(%2)\n" 48 - "6:"AHI" %4,-256\n" 62 + "6: aghi %4,-256\n" 49 63 " jnm 5b\n" 50 64 " ex %4,0(%3)\n" 51 65 " j 8f\n" 52 - "7:"SLR" %0,%0\n" 66 + "7:slgr %0,%0\n" 53 67 "8:\n" 54 68 EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) 55 69 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) ··· 68 82 " sacf 0\n" 69 83 "0: mvcp 0(%0,%2),0(%1),%3\n" 70 84 "10:jz 8f\n" 71 - "1:"ALR" %0,%3\n" 85 + "1: algr %0,%3\n" 72 86 " la %1,256(%1)\n" 73 87 " la %2,256(%2)\n" 74 88 "2: mvcp 0(%0,%2),0(%1),%3\n" 75 89 "11:jnz 1b\n" 76 90 " j 8f\n" 77 91 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 78 - " "LHI" %3,-4096\n" 92 + " lghi %3,-4096\n" 79 93 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 80 - " "SLR" %4,%1\n" 81 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 94 + " slgr %4,%1\n" 95 + " clgr %0,%4\n" /* copy crosses next page boundary? */ 82 96 " jnh 5f\n" 83 97 "4: mvcp 0(%4,%2),0(%1),%3\n" 84 - "12:"SLR" %0,%4\n" 85 - " "ALR" %2,%4\n" 86 - "5:"LHI" %4,-1\n" 87 - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ 98 + "12:slgr %0,%4\n" 99 + " algr %2,%4\n" 100 + "5: lghi %4,-1\n" 101 + " algr %4,%0\n" /* copy remaining size, subtract 1 */ 88 102 " bras %3,7f\n" /* memset loop */ 89 103 " xc 0(1,%2),0(%2)\n" 90 104 "6: xc 0(256,%2),0(%2)\n" 91 105 " la %2,256(%2)\n" 92 - "7:"AHI" %4,-256\n" 106 + "7: aghi %4,-256\n" 93 107 " jnm 6b\n" 94 108 " ex %4,0(%3)\n" 95 109 " j 9f\n" 96 - "8:"SLR" %0,%0\n" 110 + "8:slgr %0,%0\n" 97 111 "9: sacf 768\n" 98 112 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) 99 113 EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) ··· 120 134 asm volatile( 121 135 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" 122 136 "6: jz 4f\n" 123 - "1:"ALR" %0,%3\n" 124 - " "SLR" %1,%3\n" 125 - " "SLR" %2,%3\n" 137 + "1: algr %0,%3\n" 138 + " slgr %1,%3\n" 139 + " slgr %2,%3\n" 126 140 " j 0b\n" 127 141 "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ 128 142 " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ 129 - " "SLR" %4,%1\n" 130 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 143 + " slgr %4,%1\n" 144 + " clgr %0,%4\n" /* copy crosses next page boundary? */ 131 145 " jnh 5f\n" 132 146 "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" 133 - "7:"SLR" %0,%4\n" 147 + "7: slgr %0,%4\n" 134 148 " j 5f\n" 135 - "4:"SLR" %0,%0\n" 149 + "4: slgr %0,%0\n" 136 150 "5:\n" 137 151 EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) 138 152 : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) ··· 151 165 " sacf 0\n" 152 166 "0: mvcs 0(%0,%1),0(%2),%3\n" 153 167 "7: jz 5f\n" 154 - "1:"ALR" %0,%3\n" 168 + "1: algr %0,%3\n" 155 169 " la %1,256(%1)\n" 156 170 " la %2,256(%2)\n" 157 171 "2: mvcs 0(%0,%1),0(%2),%3\n" 158 172 "8: jnz 1b\n" 159 173 " j 5f\n" 160 174 "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ 161 - " "LHI" %3,-4096\n" 175 + " lghi %3,-4096\n" 162 176 " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ 163 - " "SLR" %4,%1\n" 164 - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ 177 + " slgr %4,%1\n" 178 + " clgr %0,%4\n" /* copy crosses next page boundary? */ 165 179 " jnh 6f\n" 166 180 "4: mvcs 0(%4,%1),0(%2),%3\n" 167 - "9:"SLR" %0,%4\n" 181 + "9: slgr %0,%4\n" 168 182 " j 6f\n" 169 - "5:"SLR" %0,%0\n" 183 + "5: slgr %0,%0\n" 170 184 "6: sacf 768\n" 171 185 EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) 172 186 EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) ··· 194 208 asm volatile( 195 209 "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" 196 210 " jz 2f\n" 197 - "1:"ALR" %0,%3\n" 198 - " "SLR" %1,%3\n" 199 - " "SLR" %2,%3\n" 211 + "1: algr %0,%3\n" 212 + " slgr %1,%3\n" 213 + " slgr %2,%3\n" 200 214 " j 0b\n" 201 - "2:"SLR" %0,%0\n" 215 + "2:slgr %0,%0\n" 202 216 "3: \n" 203 217 EX_TABLE(0b,3b) 204 218 : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) ··· 214 228 load_kernel_asce(); 215 229 asm volatile( 216 230 " sacf 256\n" 217 - " "AHI" %0,-1\n" 231 + " aghi %0,-1\n" 218 232 " jo 5f\n" 219 233 " bras %3,3f\n" 220 - "0:"AHI" %0,257\n" 234 + "0: aghi %0,257\n" 221 235 "1: mvc 0(1,%1),0(%2)\n" 222 236 " la %1,1(%1)\n" 223 237 " la %2,1(%2)\n" 224 - " "AHI" %0,-1\n" 238 + " aghi %0,-1\n" 225 239 " jnz 1b\n" 226 240 " j 5f\n" 227 241 "2: mvc 0(256,%1),0(%2)\n" 228 242 " la %1,256(%1)\n" 229 243 " la %2,256(%2)\n" 230 - "3:"AHI" %0,-256\n" 244 + "3: aghi %0,-256\n" 231 245 " jnm 2b\n" 232 246 "4: ex %0,1b-0b(%3)\n" 233 - "5: "SLR" %0,%0\n" 247 + "5: slgr %0,%0\n" 234 248 "6: sacf 768\n" 235 249 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 236 250 : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) ··· 255 269 asm volatile( 256 270 "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" 257 271 " jz 4f\n" 258 - "1:"ALR" %0,%2\n" 259 - " "SLR" %1,%2\n" 272 + "1: algr %0,%2\n" 273 + " slgr %1,%2\n" 260 274 " j 0b\n" 261 275 "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ 262 276 " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ 263 - " "SLR" %3,%1\n" 264 - " "CLR" %0,%3\n" /* copy crosses next page boundary? */ 277 + " slgr %3,%1\n" 278 + " clgr %0,%3\n" /* copy crosses next page boundary? */ 265 279 " jnh 5f\n" 266 280 "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" 267 - " "SLR" %0,%3\n" 281 + " slgr %0,%3\n" 268 282 " j 5f\n" 269 - "4:"SLR" %0,%0\n" 283 + "4:slgr %0,%0\n" 270 284 "5:\n" 271 285 EX_TABLE(0b,2b) EX_TABLE(3b,5b) 272 286 : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) ··· 281 295 load_kernel_asce(); 282 296 asm volatile( 283 297 " sacf 256\n" 284 - " "AHI" %0,-1\n" 298 + " aghi %0,-1\n" 285 299 " jo 5f\n" 286 300 " bras %3,3f\n" 287 301 " xc 0(1,%1),0(%1)\n" 288 - "0:"AHI" %0,257\n" 302 + "0: aghi %0,257\n" 289 303 " la %2,255(%1)\n" /* %2 = ptr + 255 */ 290 304 " srl %2,12\n" 291 305 " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ 292 - " "SLR" %2,%1\n" 293 - " "CLR" %0,%2\n" /* clear crosses next page boundary? */ 306 + " slgr %2,%1\n" 307 + " clgr %0,%2\n" /* clear crosses next page boundary? */ 294 308 " jnh 5f\n" 295 - " "AHI" %2,-1\n" 309 + " aghi %2,-1\n" 296 310 "1: ex %2,0(%3)\n" 297 - " "AHI" %2,1\n" 298 - " "SLR" %0,%2\n" 311 + " aghi %2,1\n" 312 + " slgr %0,%2\n" 299 313 " j 5f\n" 300 314 "2: xc 0(256,%1),0(%1)\n" 301 315 " la %1,256(%1)\n" 302 - "3:"AHI" %0,-256\n" 316 + "3: aghi %0,-256\n" 303 317 " jnm 2b\n" 304 318 "4: ex %0,0(%3)\n" 305 - "5: "SLR" %0,%0\n" 319 + "5: slgr %0,%0\n" 306 320 "6: sacf 768\n" 307 321 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) 308 322 : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) ··· 327 341 asm volatile( 328 342 " la %2,0(%1)\n" 329 343 " la %3,0(%0,%1)\n" 330 - " "SLR" %0,%0\n" 344 + " slgr %0,%0\n" 331 345 " sacf 256\n" 332 346 "0: srst %3,%2\n" 333 347 " jo 0b\n" 334 348 " la %0,1(%3)\n" /* strnlen_user results includes \0 */ 335 - " "SLR" %0,%1\n" 349 + " slgr %0,%1\n" 336 350 "1: sacf 768\n" 337 351 EX_TABLE(0b,1b) 338 352 : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) ··· 385 399 386 400 static int __init uaccess_init(void) 387 401 { 388 - if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) 402 + if (!uaccess_primary && test_facility(27)) 389 403 static_key_slow_inc(&have_mvcos); 390 404 return 0; 391 405 }
-26
arch/s390/lib/ucmpdi2.c
··· 1 - #include <linux/module.h> 2 - 3 - union ull_union { 4 - unsigned long long ull; 5 - struct { 6 - unsigned int high; 7 - unsigned int low; 8 - } ui; 9 - }; 10 - 11 - int __ucmpdi2(unsigned long long a, unsigned long long b) 12 - { 13 - union ull_union au = {.ull = a}; 14 - union ull_union bu = {.ull = b}; 15 - 16 - if (au.ui.high < bu.ui.high) 17 - return 0; 18 - else if (au.ui.high > bu.ui.high) 19 - return 2; 20 - if (au.ui.low < bu.ui.low) 21 - return 0; 22 - else if (au.ui.low > bu.ui.low) 23 - return 2; 24 - return 1; 25 - } 26 - EXPORT_SYMBOL(__ucmpdi2);
-7
arch/s390/math-emu/Makefile
··· 1 - # 2 - # Makefile for the FPU instruction emulation. 3 - # 4 - 5 - obj-$(CONFIG_MATHEMU) := math.o 6 - 7 - ccflags-y := -I$(src) -Iinclude/math-emu -w
-2255
arch/s390/math-emu/math.c
··· 1 - /* 2 - * S390 version 3 - * Copyright IBM Corp. 1999, 2001 4 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 5 - * 6 - * 'math.c' emulates IEEE instructions on a S390 processor 7 - * that does not have the IEEE fpu (all processors before G5). 8 - */ 9 - 10 - #include <linux/types.h> 11 - #include <linux/sched.h> 12 - #include <linux/mm.h> 13 - #include <asm/uaccess.h> 14 - #include <asm/lowcore.h> 15 - 16 - #include <asm/sfp-util.h> 17 - #include <math-emu/soft-fp.h> 18 - #include <math-emu/single.h> 19 - #include <math-emu/double.h> 20 - #include <math-emu/quad.h> 21 - 22 - #define FPC_VALID_MASK 0xF8F8FF03 23 - 24 - /* 25 - * I miss a macro to round a floating point number to the 26 - * nearest integer in the same floating point format. 27 - */ 28 - #define _FP_TO_FPINT_ROUND(fs, wc, X) \ 29 - do { \ 30 - switch (X##_c) \ 31 - { \ 32 - case FP_CLS_NORMAL: \ 33 - if (X##_e > _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs) \ 34 - { /* floating point number has no bits after the dot. */ \ 35 - } \ 36 - else if (X##_e <= _FP_FRACBITS_##fs + _FP_EXPBIAS_##fs && \ 37 - X##_e > _FP_EXPBIAS_##fs) \ 38 - { /* some bits before the dot, some after it. */ \ 39 - _FP_FRAC_SRS_##wc(X, _FP_WFRACBITS_##fs, \ 40 - X##_e - _FP_EXPBIAS_##fs \ 41 - + _FP_FRACBITS_##fs); \ 42 - _FP_ROUND(wc, X); \ 43 - _FP_FRAC_SLL_##wc(X, X##_e - _FP_EXPBIAS_##fs \ 44 - + _FP_FRACBITS_##fs); \ 45 - } \ 46 - else \ 47 - { /* all bits after the dot. */ \ 48 - FP_SET_EXCEPTION(FP_EX_INEXACT); \ 49 - X##_c = FP_CLS_ZERO; \ 50 - } \ 51 - break; \ 52 - case FP_CLS_NAN: \ 53 - case FP_CLS_INF: \ 54 - case FP_CLS_ZERO: \ 55 - break; \ 56 - } \ 57 - } while (0) 58 - 59 - #define FP_TO_FPINT_ROUND_S(X) _FP_TO_FPINT_ROUND(S,1,X) 60 - #define FP_TO_FPINT_ROUND_D(X) _FP_TO_FPINT_ROUND(D,2,X) 61 - #define FP_TO_FPINT_ROUND_Q(X) _FP_TO_FPINT_ROUND(Q,4,X) 62 - 63 - typedef union { 64 - long double ld; 65 - struct { 66 - __u64 high; 67 - __u64 low; 68 - } w; 69 - } mathemu_ldcv; 70 - 71 - #ifdef CONFIG_SYSCTL 72 - int sysctl_ieee_emulation_warnings=1; 73 - #endif 74 - 75 - #define mathemu_put_user(x, p) \ 76 - do { \ 77 - if (put_user((x),(p))) \ 78 - return SIGSEGV; \ 79 - } while (0) 80 - 81 - #define mathemu_get_user(x, p) \ 82 - do { \ 83 - if (get_user((x),(p))) \ 84 - return SIGSEGV; \ 85 - } while (0) 86 - 87 - #define mathemu_copy_from_user(d, s, n)\ 88 - do { \ 89 - if (copy_from_user((d),(s),(n)) != 0) \ 90 - return SIGSEGV; \ 91 - } while (0) 92 - 93 - #define mathemu_copy_to_user(d, s, n) \ 94 - do { \ 95 - if (copy_to_user((d),(s),(n)) != 0) \ 96 - return SIGSEGV; \ 97 - } while (0) 98 - 99 - static void display_emulation_not_implemented(struct pt_regs *regs, char *instr) 100 - { 101 - __u16 *location; 102 - 103 - #ifdef CONFIG_SYSCTL 104 - if(sysctl_ieee_emulation_warnings) 105 - #endif 106 - { 107 - location = (__u16 *)(regs->psw.addr-S390_lowcore.pgm_ilc); 108 - printk("%s ieee fpu instruction not emulated " 109 - "process name: %s pid: %d \n", 110 - instr, current->comm, current->pid); 111 - printk("%s's PSW: %08lx %08lx\n", instr, 112 - (unsigned long) regs->psw.mask, 113 - (unsigned long) location); 114 - } 115 - } 116 - 117 - static inline void emu_set_CC (struct pt_regs *regs, int cc) 118 - { 119 - regs->psw.mask = (regs->psw.mask & 0xFFFFCFFF) | ((cc&3) << 12); 120 - } 121 - 122 - /* 123 - * Set the condition code in the user psw. 124 - * 0 : Result is zero 125 - * 1 : Result is less than zero 126 - * 2 : Result is greater than zero 127 - * 3 : Result is NaN or INF 128 - */ 129 - static inline void emu_set_CC_cs(struct pt_regs *regs, int class, int sign) 130 - { 131 - switch (class) { 132 - case FP_CLS_NORMAL: 133 - case FP_CLS_INF: 134 - emu_set_CC(regs, sign ? 1 : 2); 135 - break; 136 - case FP_CLS_ZERO: 137 - emu_set_CC(regs, 0); 138 - break; 139 - case FP_CLS_NAN: 140 - emu_set_CC(regs, 3); 141 - break; 142 - } 143 - } 144 - 145 - /* Add long double */ 146 - static int emu_axbr (struct pt_regs *regs, int rx, int ry) { 147 - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 148 - FP_DECL_EX; 149 - mathemu_ldcv cvt; 150 - int mode; 151 - 152 - mode = current->thread.fp_regs.fpc & 3; 153 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 154 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 155 - FP_UNPACK_QP(QA, &cvt.ld); 156 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 157 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 158 - FP_UNPACK_QP(QB, &cvt.ld); 159 - FP_ADD_Q(QR, QA, QB); 160 - FP_PACK_QP(&cvt.ld, QR); 161 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 162 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 163 - emu_set_CC_cs(regs, QR_c, QR_s); 164 - return _fex; 165 - } 166 - 167 - /* Add double */ 168 - static int emu_adbr (struct pt_regs *regs, int rx, int ry) { 169 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 170 - FP_DECL_EX; 171 - int mode; 172 - 173 - mode = current->thread.fp_regs.fpc & 3; 174 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 175 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 176 - FP_ADD_D(DR, DA, DB); 177 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 178 - emu_set_CC_cs(regs, DR_c, DR_s); 179 - return _fex; 180 - } 181 - 182 - /* Add double */ 183 - static int emu_adb (struct pt_regs *regs, int rx, double *val) { 184 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 185 - FP_DECL_EX; 186 - int mode; 187 - 188 - mode = current->thread.fp_regs.fpc & 3; 189 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 190 - FP_UNPACK_DP(DB, val); 191 - FP_ADD_D(DR, DA, DB); 192 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 193 - emu_set_CC_cs(regs, DR_c, DR_s); 194 - return _fex; 195 - } 196 - 197 - /* Add float */ 198 - static int emu_aebr (struct pt_regs *regs, int rx, int ry) { 199 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 200 - FP_DECL_EX; 201 - int mode; 202 - 203 - mode = current->thread.fp_regs.fpc & 3; 204 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 205 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 206 - FP_ADD_S(SR, SA, SB); 207 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 208 - emu_set_CC_cs(regs, SR_c, SR_s); 209 - return _fex; 210 - } 211 - 212 - /* Add float */ 213 - static int emu_aeb (struct pt_regs *regs, int rx, float *val) { 214 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 215 - FP_DECL_EX; 216 - int mode; 217 - 218 - mode = current->thread.fp_regs.fpc & 3; 219 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 220 - FP_UNPACK_SP(SB, val); 221 - FP_ADD_S(SR, SA, SB); 222 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 223 - emu_set_CC_cs(regs, SR_c, SR_s); 224 - return _fex; 225 - } 226 - 227 - /* Compare long double */ 228 - static int emu_cxbr (struct pt_regs *regs, int rx, int ry) { 229 - FP_DECL_Q(QA); FP_DECL_Q(QB); 230 - mathemu_ldcv cvt; 231 - int IR; 232 - 233 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 234 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 235 - FP_UNPACK_RAW_QP(QA, &cvt.ld); 236 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 237 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 238 - FP_UNPACK_RAW_QP(QB, &cvt.ld); 239 - FP_CMP_Q(IR, QA, QB, 3); 240 - /* 241 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 242 - * IR == 1 if DA > DB and IR == 3 if unorderded 243 - */ 244 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 245 - return 0; 246 - } 247 - 248 - /* Compare double */ 249 - static int emu_cdbr (struct pt_regs *regs, int rx, int ry) { 250 - FP_DECL_D(DA); FP_DECL_D(DB); 251 - int IR; 252 - 253 - FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d); 254 - FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d); 255 - FP_CMP_D(IR, DA, DB, 3); 256 - /* 257 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 258 - * IR == 1 if DA > DB and IR == 3 if unorderded 259 - */ 260 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 261 - return 0; 262 - } 263 - 264 - /* Compare double */ 265 - static int emu_cdb (struct pt_regs *regs, int rx, double *val) { 266 - FP_DECL_D(DA); FP_DECL_D(DB); 267 - int IR; 268 - 269 - FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d); 270 - FP_UNPACK_RAW_DP(DB, val); 271 - FP_CMP_D(IR, DA, DB, 3); 272 - /* 273 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 274 - * IR == 1 if DA > DB and IR == 3 if unorderded 275 - */ 276 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 277 - return 0; 278 - } 279 - 280 - /* Compare float */ 281 - static int emu_cebr (struct pt_regs *regs, int rx, int ry) { 282 - FP_DECL_S(SA); FP_DECL_S(SB); 283 - int IR; 284 - 285 - FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f); 286 - FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f); 287 - FP_CMP_S(IR, SA, SB, 3); 288 - /* 289 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 290 - * IR == 1 if DA > DB and IR == 3 if unorderded 291 - */ 292 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 293 - return 0; 294 - } 295 - 296 - /* Compare float */ 297 - static int emu_ceb (struct pt_regs *regs, int rx, float *val) { 298 - FP_DECL_S(SA); FP_DECL_S(SB); 299 - int IR; 300 - 301 - FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f); 302 - FP_UNPACK_RAW_SP(SB, val); 303 - FP_CMP_S(IR, SA, SB, 3); 304 - /* 305 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 306 - * IR == 1 if DA > DB and IR == 3 if unorderded 307 - */ 308 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 309 - return 0; 310 - } 311 - 312 - /* Compare and signal long double */ 313 - static int emu_kxbr (struct pt_regs *regs, int rx, int ry) { 314 - FP_DECL_Q(QA); FP_DECL_Q(QB); 315 - FP_DECL_EX; 316 - mathemu_ldcv cvt; 317 - int IR; 318 - 319 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 320 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 321 - FP_UNPACK_RAW_QP(QA, &cvt.ld); 322 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 323 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 324 - FP_UNPACK_QP(QB, &cvt.ld); 325 - FP_CMP_Q(IR, QA, QB, 3); 326 - /* 327 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 328 - * IR == 1 if DA > DB and IR == 3 if unorderded 329 - */ 330 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 331 - if (IR == 3) 332 - FP_SET_EXCEPTION (FP_EX_INVALID); 333 - return _fex; 334 - } 335 - 336 - /* Compare and signal double */ 337 - static int emu_kdbr (struct pt_regs *regs, int rx, int ry) { 338 - FP_DECL_D(DA); FP_DECL_D(DB); 339 - FP_DECL_EX; 340 - int IR; 341 - 342 - FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d); 343 - FP_UNPACK_RAW_DP(DB, &current->thread.fp_regs.fprs[ry].d); 344 - FP_CMP_D(IR, DA, DB, 3); 345 - /* 346 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 347 - * IR == 1 if DA > DB and IR == 3 if unorderded 348 - */ 349 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 350 - if (IR == 3) 351 - FP_SET_EXCEPTION (FP_EX_INVALID); 352 - return _fex; 353 - } 354 - 355 - /* Compare and signal double */ 356 - static int emu_kdb (struct pt_regs *regs, int rx, double *val) { 357 - FP_DECL_D(DA); FP_DECL_D(DB); 358 - FP_DECL_EX; 359 - int IR; 360 - 361 - FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d); 362 - FP_UNPACK_RAW_DP(DB, val); 363 - FP_CMP_D(IR, DA, DB, 3); 364 - /* 365 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 366 - * IR == 1 if DA > DB and IR == 3 if unorderded 367 - */ 368 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 369 - if (IR == 3) 370 - FP_SET_EXCEPTION (FP_EX_INVALID); 371 - return _fex; 372 - } 373 - 374 - /* Compare and signal float */ 375 - static int emu_kebr (struct pt_regs *regs, int rx, int ry) { 376 - FP_DECL_S(SA); FP_DECL_S(SB); 377 - FP_DECL_EX; 378 - int IR; 379 - 380 - FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f); 381 - FP_UNPACK_RAW_SP(SB, &current->thread.fp_regs.fprs[ry].f); 382 - FP_CMP_S(IR, SA, SB, 3); 383 - /* 384 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 385 - * IR == 1 if DA > DB and IR == 3 if unorderded 386 - */ 387 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 388 - if (IR == 3) 389 - FP_SET_EXCEPTION (FP_EX_INVALID); 390 - return _fex; 391 - } 392 - 393 - /* Compare and signal float */ 394 - static int emu_keb (struct pt_regs *regs, int rx, float *val) { 395 - FP_DECL_S(SA); FP_DECL_S(SB); 396 - FP_DECL_EX; 397 - int IR; 398 - 399 - FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f); 400 - FP_UNPACK_RAW_SP(SB, val); 401 - FP_CMP_S(IR, SA, SB, 3); 402 - /* 403 - * IR == -1 if DA < DB, IR == 0 if DA == DB, 404 - * IR == 1 if DA > DB and IR == 3 if unorderded 405 - */ 406 - emu_set_CC(regs, (IR == -1) ? 1 : (IR == 1) ? 2 : IR); 407 - if (IR == 3) 408 - FP_SET_EXCEPTION (FP_EX_INVALID); 409 - return _fex; 410 - } 411 - 412 - /* Convert from fixed long double */ 413 - static int emu_cxfbr (struct pt_regs *regs, int rx, int ry) { 414 - FP_DECL_Q(QR); 415 - FP_DECL_EX; 416 - mathemu_ldcv cvt; 417 - __s32 si; 418 - int mode; 419 - 420 - mode = current->thread.fp_regs.fpc & 3; 421 - si = regs->gprs[ry]; 422 - FP_FROM_INT_Q(QR, si, 32, int); 423 - FP_PACK_QP(&cvt.ld, QR); 424 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 425 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 426 - return _fex; 427 - } 428 - 429 - /* Convert from fixed double */ 430 - static int emu_cdfbr (struct pt_regs *regs, int rx, int ry) { 431 - FP_DECL_D(DR); 432 - FP_DECL_EX; 433 - __s32 si; 434 - int mode; 435 - 436 - mode = current->thread.fp_regs.fpc & 3; 437 - si = regs->gprs[ry]; 438 - FP_FROM_INT_D(DR, si, 32, int); 439 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 440 - return _fex; 441 - } 442 - 443 - /* Convert from fixed float */ 444 - static int emu_cefbr (struct pt_regs *regs, int rx, int ry) { 445 - FP_DECL_S(SR); 446 - FP_DECL_EX; 447 - __s32 si; 448 - int mode; 449 - 450 - mode = current->thread.fp_regs.fpc & 3; 451 - si = regs->gprs[ry]; 452 - FP_FROM_INT_S(SR, si, 32, int); 453 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 454 - return _fex; 455 - } 456 - 457 - /* Convert to fixed long double */ 458 - static int emu_cfxbr (struct pt_regs *regs, int rx, int ry, int mask) { 459 - FP_DECL_Q(QA); 460 - FP_DECL_EX; 461 - mathemu_ldcv cvt; 462 - __s32 si; 463 - int mode; 464 - 465 - if (mask == 0) 466 - mode = current->thread.fp_regs.fpc & 3; 467 - else if (mask == 1) 468 - mode = FP_RND_NEAREST; 469 - else 470 - mode = mask - 4; 471 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 472 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 473 - FP_UNPACK_QP(QA, &cvt.ld); 474 - FP_TO_INT_ROUND_Q(si, QA, 32, 1); 475 - regs->gprs[rx] = si; 476 - emu_set_CC_cs(regs, QA_c, QA_s); 477 - return _fex; 478 - } 479 - 480 - /* Convert to fixed double */ 481 - static int emu_cfdbr (struct pt_regs *regs, int rx, int ry, int mask) { 482 - FP_DECL_D(DA); 483 - FP_DECL_EX; 484 - __s32 si; 485 - int mode; 486 - 487 - if (mask == 0) 488 - mode = current->thread.fp_regs.fpc & 3; 489 - else if (mask == 1) 490 - mode = FP_RND_NEAREST; 491 - else 492 - mode = mask - 4; 493 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 494 - FP_TO_INT_ROUND_D(si, DA, 32, 1); 495 - regs->gprs[rx] = si; 496 - emu_set_CC_cs(regs, DA_c, DA_s); 497 - return _fex; 498 - } 499 - 500 - /* Convert to fixed float */ 501 - static int emu_cfebr (struct pt_regs *regs, int rx, int ry, int mask) { 502 - FP_DECL_S(SA); 503 - FP_DECL_EX; 504 - __s32 si; 505 - int mode; 506 - 507 - if (mask == 0) 508 - mode = current->thread.fp_regs.fpc & 3; 509 - else if (mask == 1) 510 - mode = FP_RND_NEAREST; 511 - else 512 - mode = mask - 4; 513 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 514 - FP_TO_INT_ROUND_S(si, SA, 32, 1); 515 - regs->gprs[rx] = si; 516 - emu_set_CC_cs(regs, SA_c, SA_s); 517 - return _fex; 518 - } 519 - 520 - /* Divide long double */ 521 - static int emu_dxbr (struct pt_regs *regs, int rx, int ry) { 522 - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 523 - FP_DECL_EX; 524 - mathemu_ldcv cvt; 525 - int mode; 526 - 527 - mode = current->thread.fp_regs.fpc & 3; 528 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 529 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 530 - FP_UNPACK_QP(QA, &cvt.ld); 531 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 532 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 533 - FP_UNPACK_QP(QB, &cvt.ld); 534 - FP_DIV_Q(QR, QA, QB); 535 - FP_PACK_QP(&cvt.ld, QR); 536 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 537 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 538 - return _fex; 539 - } 540 - 541 - /* Divide double */ 542 - static int emu_ddbr (struct pt_regs *regs, int rx, int ry) { 543 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 544 - FP_DECL_EX; 545 - int mode; 546 - 547 - mode = current->thread.fp_regs.fpc & 3; 548 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 549 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 550 - FP_DIV_D(DR, DA, DB); 551 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 552 - return _fex; 553 - } 554 - 555 - /* Divide double */ 556 - static int emu_ddb (struct pt_regs *regs, int rx, double *val) { 557 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 558 - FP_DECL_EX; 559 - int mode; 560 - 561 - mode = current->thread.fp_regs.fpc & 3; 562 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 563 - FP_UNPACK_DP(DB, val); 564 - FP_DIV_D(DR, DA, DB); 565 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 566 - return _fex; 567 - } 568 - 569 - /* Divide float */ 570 - static int emu_debr (struct pt_regs *regs, int rx, int ry) { 571 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 572 - FP_DECL_EX; 573 - int mode; 574 - 575 - mode = current->thread.fp_regs.fpc & 3; 576 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 577 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 578 - FP_DIV_S(SR, SA, SB); 579 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 580 - return _fex; 581 - } 582 - 583 - /* Divide float */ 584 - static int emu_deb (struct pt_regs *regs, int rx, float *val) { 585 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 586 - FP_DECL_EX; 587 - int mode; 588 - 589 - mode = current->thread.fp_regs.fpc & 3; 590 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 591 - FP_UNPACK_SP(SB, val); 592 - FP_DIV_S(SR, SA, SB); 593 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 594 - return _fex; 595 - } 596 - 597 - /* Divide to integer double */ 598 - static int emu_didbr (struct pt_regs *regs, int rx, int ry, int mask) { 599 - display_emulation_not_implemented(regs, "didbr"); 600 - return 0; 601 - } 602 - 603 - /* Divide to integer float */ 604 - static int emu_diebr (struct pt_regs *regs, int rx, int ry, int mask) { 605 - display_emulation_not_implemented(regs, "diebr"); 606 - return 0; 607 - } 608 - 609 - /* Extract fpc */ 610 - static int emu_efpc (struct pt_regs *regs, int rx, int ry) { 611 - regs->gprs[rx] = current->thread.fp_regs.fpc; 612 - return 0; 613 - } 614 - 615 - /* Load and test long double */ 616 - static int emu_ltxbr (struct pt_regs *regs, int rx, int ry) { 617 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 618 - mathemu_ldcv cvt; 619 - FP_DECL_Q(QA); 620 - FP_DECL_EX; 621 - 622 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 623 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 624 - FP_UNPACK_QP(QA, &cvt.ld); 625 - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; 626 - fp_regs->fprs[rx+2].ui = fp_regs->fprs[ry+2].ui; 627 - emu_set_CC_cs(regs, QA_c, QA_s); 628 - return _fex; 629 - } 630 - 631 - /* Load and test double */ 632 - static int emu_ltdbr (struct pt_regs *regs, int rx, int ry) { 633 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 634 - FP_DECL_D(DA); 635 - FP_DECL_EX; 636 - 637 - FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); 638 - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; 639 - emu_set_CC_cs(regs, DA_c, DA_s); 640 - return _fex; 641 - } 642 - 643 - /* Load and test double */ 644 - static int emu_ltebr (struct pt_regs *regs, int rx, int ry) { 645 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 646 - FP_DECL_S(SA); 647 - FP_DECL_EX; 648 - 649 - FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); 650 - fp_regs->fprs[rx].ui = fp_regs->fprs[ry].ui; 651 - emu_set_CC_cs(regs, SA_c, SA_s); 652 - return _fex; 653 - } 654 - 655 - /* Load complement long double */ 656 - static int emu_lcxbr (struct pt_regs *regs, int rx, int ry) { 657 - FP_DECL_Q(QA); FP_DECL_Q(QR); 658 - FP_DECL_EX; 659 - mathemu_ldcv cvt; 660 - int mode; 661 - 662 - mode = current->thread.fp_regs.fpc & 3; 663 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 664 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 665 - FP_UNPACK_QP(QA, &cvt.ld); 666 - FP_NEG_Q(QR, QA); 667 - FP_PACK_QP(&cvt.ld, QR); 668 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 669 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 670 - emu_set_CC_cs(regs, QR_c, QR_s); 671 - return _fex; 672 - } 673 - 674 - /* Load complement double */ 675 - static int emu_lcdbr (struct pt_regs *regs, int rx, int ry) { 676 - FP_DECL_D(DA); FP_DECL_D(DR); 677 - FP_DECL_EX; 678 - int mode; 679 - 680 - mode = current->thread.fp_regs.fpc & 3; 681 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 682 - FP_NEG_D(DR, DA); 683 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 684 - emu_set_CC_cs(regs, DR_c, DR_s); 685 - return _fex; 686 - } 687 - 688 - /* Load complement float */ 689 - static int emu_lcebr (struct pt_regs *regs, int rx, int ry) { 690 - FP_DECL_S(SA); FP_DECL_S(SR); 691 - FP_DECL_EX; 692 - int mode; 693 - 694 - mode = current->thread.fp_regs.fpc & 3; 695 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 696 - FP_NEG_S(SR, SA); 697 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 698 - emu_set_CC_cs(regs, SR_c, SR_s); 699 - return _fex; 700 - } 701 - 702 - /* Load floating point integer long double */ 703 - static int emu_fixbr (struct pt_regs *regs, int rx, int ry, int mask) { 704 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 705 - FP_DECL_Q(QA); 706 - FP_DECL_EX; 707 - mathemu_ldcv cvt; 708 - __s32 si; 709 - int mode; 710 - 711 - if (mask == 0) 712 - mode = fp_regs->fpc & 3; 713 - else if (mask == 1) 714 - mode = FP_RND_NEAREST; 715 - else 716 - mode = mask - 4; 717 - cvt.w.high = fp_regs->fprs[ry].ui; 718 - cvt.w.low = fp_regs->fprs[ry+2].ui; 719 - FP_UNPACK_QP(QA, &cvt.ld); 720 - FP_TO_FPINT_ROUND_Q(QA); 721 - FP_PACK_QP(&cvt.ld, QA); 722 - fp_regs->fprs[rx].ui = cvt.w.high; 723 - fp_regs->fprs[rx+2].ui = cvt.w.low; 724 - return _fex; 725 - } 726 - 727 - /* Load floating point integer double */ 728 - static int emu_fidbr (struct pt_regs *regs, int rx, int ry, int mask) { 729 - /* FIXME: rounding mode !! */ 730 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 731 - FP_DECL_D(DA); 732 - FP_DECL_EX; 733 - __s32 si; 734 - int mode; 735 - 736 - if (mask == 0) 737 - mode = fp_regs->fpc & 3; 738 - else if (mask == 1) 739 - mode = FP_RND_NEAREST; 740 - else 741 - mode = mask - 4; 742 - FP_UNPACK_DP(DA, &fp_regs->fprs[ry].d); 743 - FP_TO_FPINT_ROUND_D(DA); 744 - FP_PACK_DP(&fp_regs->fprs[rx].d, DA); 745 - return _fex; 746 - } 747 - 748 - /* Load floating point integer float */ 749 - static int emu_fiebr (struct pt_regs *regs, int rx, int ry, int mask) { 750 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 751 - FP_DECL_S(SA); 752 - FP_DECL_EX; 753 - __s32 si; 754 - int mode; 755 - 756 - if (mask == 0) 757 - mode = fp_regs->fpc & 3; 758 - else if (mask == 1) 759 - mode = FP_RND_NEAREST; 760 - else 761 - mode = mask - 4; 762 - FP_UNPACK_SP(SA, &fp_regs->fprs[ry].f); 763 - FP_TO_FPINT_ROUND_S(SA); 764 - FP_PACK_SP(&fp_regs->fprs[rx].f, SA); 765 - return _fex; 766 - } 767 - 768 - /* Load lengthened double to long double */ 769 - static int emu_lxdbr (struct pt_regs *regs, int rx, int ry) { 770 - FP_DECL_D(DA); FP_DECL_Q(QR); 771 - FP_DECL_EX; 772 - mathemu_ldcv cvt; 773 - int mode; 774 - 775 - mode = current->thread.fp_regs.fpc & 3; 776 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 777 - FP_CONV (Q, D, 4, 2, QR, DA); 778 - FP_PACK_QP(&cvt.ld, QR); 779 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 780 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 781 - return _fex; 782 - } 783 - 784 - /* Load lengthened double to long double */ 785 - static int emu_lxdb (struct pt_regs *regs, int rx, double *val) { 786 - FP_DECL_D(DA); FP_DECL_Q(QR); 787 - FP_DECL_EX; 788 - mathemu_ldcv cvt; 789 - int mode; 790 - 791 - mode = current->thread.fp_regs.fpc & 3; 792 - FP_UNPACK_DP(DA, val); 793 - FP_CONV (Q, D, 4, 2, QR, DA); 794 - FP_PACK_QP(&cvt.ld, QR); 795 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 796 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 797 - return _fex; 798 - } 799 - 800 - /* Load lengthened float to long double */ 801 - static int emu_lxebr (struct pt_regs *regs, int rx, int ry) { 802 - FP_DECL_S(SA); FP_DECL_Q(QR); 803 - FP_DECL_EX; 804 - mathemu_ldcv cvt; 805 - int mode; 806 - 807 - mode = current->thread.fp_regs.fpc & 3; 808 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 809 - FP_CONV (Q, S, 4, 1, QR, SA); 810 - FP_PACK_QP(&cvt.ld, QR); 811 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 812 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 813 - return _fex; 814 - } 815 - 816 - /* Load lengthened float to long double */ 817 - static int emu_lxeb (struct pt_regs *regs, int rx, float *val) { 818 - FP_DECL_S(SA); FP_DECL_Q(QR); 819 - FP_DECL_EX; 820 - mathemu_ldcv cvt; 821 - int mode; 822 - 823 - mode = current->thread.fp_regs.fpc & 3; 824 - FP_UNPACK_SP(SA, val); 825 - FP_CONV (Q, S, 4, 1, QR, SA); 826 - FP_PACK_QP(&cvt.ld, QR); 827 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 828 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 829 - return _fex; 830 - } 831 - 832 - /* Load lengthened float to double */ 833 - static int emu_ldebr (struct pt_regs *regs, int rx, int ry) { 834 - FP_DECL_S(SA); FP_DECL_D(DR); 835 - FP_DECL_EX; 836 - int mode; 837 - 838 - mode = current->thread.fp_regs.fpc & 3; 839 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 840 - FP_CONV (D, S, 2, 1, DR, SA); 841 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 842 - return _fex; 843 - } 844 - 845 - /* Load lengthened float to double */ 846 - static int emu_ldeb (struct pt_regs *regs, int rx, float *val) { 847 - FP_DECL_S(SA); FP_DECL_D(DR); 848 - FP_DECL_EX; 849 - int mode; 850 - 851 - mode = current->thread.fp_regs.fpc & 3; 852 - FP_UNPACK_SP(SA, val); 853 - FP_CONV (D, S, 2, 1, DR, SA); 854 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 855 - return _fex; 856 - } 857 - 858 - /* Load negative long double */ 859 - static int emu_lnxbr (struct pt_regs *regs, int rx, int ry) { 860 - FP_DECL_Q(QA); FP_DECL_Q(QR); 861 - FP_DECL_EX; 862 - mathemu_ldcv cvt; 863 - int mode; 864 - 865 - mode = current->thread.fp_regs.fpc & 3; 866 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 867 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 868 - FP_UNPACK_QP(QA, &cvt.ld); 869 - if (QA_s == 0) { 870 - FP_NEG_Q(QR, QA); 871 - FP_PACK_QP(&cvt.ld, QR); 872 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 873 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 874 - } else { 875 - current->thread.fp_regs.fprs[rx].ui = 876 - current->thread.fp_regs.fprs[ry].ui; 877 - current->thread.fp_regs.fprs[rx+2].ui = 878 - current->thread.fp_regs.fprs[ry+2].ui; 879 - } 880 - emu_set_CC_cs(regs, QR_c, QR_s); 881 - return _fex; 882 - } 883 - 884 - /* Load negative double */ 885 - static int emu_lndbr (struct pt_regs *regs, int rx, int ry) { 886 - FP_DECL_D(DA); FP_DECL_D(DR); 887 - FP_DECL_EX; 888 - int mode; 889 - 890 - mode = current->thread.fp_regs.fpc & 3; 891 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 892 - if (DA_s == 0) { 893 - FP_NEG_D(DR, DA); 894 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 895 - } else 896 - current->thread.fp_regs.fprs[rx].ui = 897 - current->thread.fp_regs.fprs[ry].ui; 898 - emu_set_CC_cs(regs, DR_c, DR_s); 899 - return _fex; 900 - } 901 - 902 - /* Load negative float */ 903 - static int emu_lnebr (struct pt_regs *regs, int rx, int ry) { 904 - FP_DECL_S(SA); FP_DECL_S(SR); 905 - FP_DECL_EX; 906 - int mode; 907 - 908 - mode = current->thread.fp_regs.fpc & 3; 909 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 910 - if (SA_s == 0) { 911 - FP_NEG_S(SR, SA); 912 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 913 - } else 914 - current->thread.fp_regs.fprs[rx].ui = 915 - current->thread.fp_regs.fprs[ry].ui; 916 - emu_set_CC_cs(regs, SR_c, SR_s); 917 - return _fex; 918 - } 919 - 920 - /* Load positive long double */ 921 - static int emu_lpxbr (struct pt_regs *regs, int rx, int ry) { 922 - FP_DECL_Q(QA); FP_DECL_Q(QR); 923 - FP_DECL_EX; 924 - mathemu_ldcv cvt; 925 - int mode; 926 - 927 - mode = current->thread.fp_regs.fpc & 3; 928 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 929 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 930 - FP_UNPACK_QP(QA, &cvt.ld); 931 - if (QA_s != 0) { 932 - FP_NEG_Q(QR, QA); 933 - FP_PACK_QP(&cvt.ld, QR); 934 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 935 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 936 - } else{ 937 - current->thread.fp_regs.fprs[rx].ui = 938 - current->thread.fp_regs.fprs[ry].ui; 939 - current->thread.fp_regs.fprs[rx+2].ui = 940 - current->thread.fp_regs.fprs[ry+2].ui; 941 - } 942 - emu_set_CC_cs(regs, QR_c, QR_s); 943 - return _fex; 944 - } 945 - 946 - /* Load positive double */ 947 - static int emu_lpdbr (struct pt_regs *regs, int rx, int ry) { 948 - FP_DECL_D(DA); FP_DECL_D(DR); 949 - FP_DECL_EX; 950 - int mode; 951 - 952 - mode = current->thread.fp_regs.fpc & 3; 953 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 954 - if (DA_s != 0) { 955 - FP_NEG_D(DR, DA); 956 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 957 - } else 958 - current->thread.fp_regs.fprs[rx].ui = 959 - current->thread.fp_regs.fprs[ry].ui; 960 - emu_set_CC_cs(regs, DR_c, DR_s); 961 - return _fex; 962 - } 963 - 964 - /* Load positive float */ 965 - static int emu_lpebr (struct pt_regs *regs, int rx, int ry) { 966 - FP_DECL_S(SA); FP_DECL_S(SR); 967 - FP_DECL_EX; 968 - int mode; 969 - 970 - mode = current->thread.fp_regs.fpc & 3; 971 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 972 - if (SA_s != 0) { 973 - FP_NEG_S(SR, SA); 974 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 975 - } else 976 - current->thread.fp_regs.fprs[rx].ui = 977 - current->thread.fp_regs.fprs[ry].ui; 978 - emu_set_CC_cs(regs, SR_c, SR_s); 979 - return _fex; 980 - } 981 - 982 - /* Load rounded long double to double */ 983 - static int emu_ldxbr (struct pt_regs *regs, int rx, int ry) { 984 - FP_DECL_Q(QA); FP_DECL_D(DR); 985 - FP_DECL_EX; 986 - mathemu_ldcv cvt; 987 - int mode; 988 - 989 - mode = current->thread.fp_regs.fpc & 3; 990 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 991 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 992 - FP_UNPACK_QP(QA, &cvt.ld); 993 - FP_CONV (D, Q, 2, 4, DR, QA); 994 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].f, DR); 995 - return _fex; 996 - } 997 - 998 - /* Load rounded long double to float */ 999 - static int emu_lexbr (struct pt_regs *regs, int rx, int ry) { 1000 - FP_DECL_Q(QA); FP_DECL_S(SR); 1001 - FP_DECL_EX; 1002 - mathemu_ldcv cvt; 1003 - int mode; 1004 - 1005 - mode = current->thread.fp_regs.fpc & 3; 1006 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 1007 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 1008 - FP_UNPACK_QP(QA, &cvt.ld); 1009 - FP_CONV (S, Q, 1, 4, SR, QA); 1010 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1011 - return _fex; 1012 - } 1013 - 1014 - /* Load rounded double to float */ 1015 - static int emu_ledbr (struct pt_regs *regs, int rx, int ry) { 1016 - FP_DECL_D(DA); FP_DECL_S(SR); 1017 - FP_DECL_EX; 1018 - int mode; 1019 - 1020 - mode = current->thread.fp_regs.fpc & 3; 1021 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 1022 - FP_CONV (S, D, 1, 2, SR, DA); 1023 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1024 - return _fex; 1025 - } 1026 - 1027 - /* Multiply long double */ 1028 - static int emu_mxbr (struct pt_regs *regs, int rx, int ry) { 1029 - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 1030 - FP_DECL_EX; 1031 - mathemu_ldcv cvt; 1032 - int mode; 1033 - 1034 - mode = current->thread.fp_regs.fpc & 3; 1035 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 1036 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 1037 - FP_UNPACK_QP(QA, &cvt.ld); 1038 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 1039 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 1040 - FP_UNPACK_QP(QB, &cvt.ld); 1041 - FP_MUL_Q(QR, QA, QB); 1042 - FP_PACK_QP(&cvt.ld, QR); 1043 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 1044 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 1045 - return _fex; 1046 - } 1047 - 1048 - /* Multiply double */ 1049 - static int emu_mdbr (struct pt_regs *regs, int rx, int ry) { 1050 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1051 - FP_DECL_EX; 1052 - int mode; 1053 - 1054 - mode = current->thread.fp_regs.fpc & 3; 1055 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1056 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 1057 - FP_MUL_D(DR, DA, DB); 1058 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1059 - return _fex; 1060 - } 1061 - 1062 - /* Multiply double */ 1063 - static int emu_mdb (struct pt_regs *regs, int rx, double *val) { 1064 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1065 - FP_DECL_EX; 1066 - int mode; 1067 - 1068 - mode = current->thread.fp_regs.fpc & 3; 1069 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1070 - FP_UNPACK_DP(DB, val); 1071 - FP_MUL_D(DR, DA, DB); 1072 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1073 - return _fex; 1074 - } 1075 - 1076 - /* Multiply double to long double */ 1077 - static int emu_mxdbr (struct pt_regs *regs, int rx, int ry) { 1078 - FP_DECL_D(DA); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 1079 - FP_DECL_EX; 1080 - mathemu_ldcv cvt; 1081 - int mode; 1082 - 1083 - mode = current->thread.fp_regs.fpc & 3; 1084 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1085 - FP_CONV (Q, D, 4, 2, QA, DA); 1086 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 1087 - FP_CONV (Q, D, 4, 2, QB, DA); 1088 - FP_MUL_Q(QR, QA, QB); 1089 - FP_PACK_QP(&cvt.ld, QR); 1090 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 1091 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 1092 - return _fex; 1093 - } 1094 - 1095 - /* Multiply double to long double */ 1096 - static int emu_mxdb (struct pt_regs *regs, int rx, long double *val) { 1097 - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 1098 - FP_DECL_EX; 1099 - mathemu_ldcv cvt; 1100 - int mode; 1101 - 1102 - mode = current->thread.fp_regs.fpc & 3; 1103 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 1104 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 1105 - FP_UNPACK_QP(QA, &cvt.ld); 1106 - FP_UNPACK_QP(QB, val); 1107 - FP_MUL_Q(QR, QA, QB); 1108 - FP_PACK_QP(&cvt.ld, QR); 1109 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 1110 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 1111 - return _fex; 1112 - } 1113 - 1114 - /* Multiply float */ 1115 - static int emu_meebr (struct pt_regs *regs, int rx, int ry) { 1116 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 1117 - FP_DECL_EX; 1118 - int mode; 1119 - 1120 - mode = current->thread.fp_regs.fpc & 3; 1121 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1122 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 1123 - FP_MUL_S(SR, SA, SB); 1124 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1125 - return _fex; 1126 - } 1127 - 1128 - /* Multiply float */ 1129 - static int emu_meeb (struct pt_regs *regs, int rx, float *val) { 1130 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 1131 - FP_DECL_EX; 1132 - int mode; 1133 - 1134 - mode = current->thread.fp_regs.fpc & 3; 1135 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1136 - FP_UNPACK_SP(SB, val); 1137 - FP_MUL_S(SR, SA, SB); 1138 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1139 - return _fex; 1140 - } 1141 - 1142 - /* Multiply float to double */ 1143 - static int emu_mdebr (struct pt_regs *regs, int rx, int ry) { 1144 - FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1145 - FP_DECL_EX; 1146 - int mode; 1147 - 1148 - mode = current->thread.fp_regs.fpc & 3; 1149 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1150 - FP_CONV (D, S, 2, 1, DA, SA); 1151 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 1152 - FP_CONV (D, S, 2, 1, DB, SA); 1153 - FP_MUL_D(DR, DA, DB); 1154 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1155 - return _fex; 1156 - } 1157 - 1158 - /* Multiply float to double */ 1159 - static int emu_mdeb (struct pt_regs *regs, int rx, float *val) { 1160 - FP_DECL_S(SA); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1161 - FP_DECL_EX; 1162 - int mode; 1163 - 1164 - mode = current->thread.fp_regs.fpc & 3; 1165 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1166 - FP_CONV (D, S, 2, 1, DA, SA); 1167 - FP_UNPACK_SP(SA, val); 1168 - FP_CONV (D, S, 2, 1, DB, SA); 1169 - FP_MUL_D(DR, DA, DB); 1170 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1171 - return _fex; 1172 - } 1173 - 1174 - /* Multiply and add double */ 1175 - static int emu_madbr (struct pt_regs *regs, int rx, int ry, int rz) { 1176 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); 1177 - FP_DECL_EX; 1178 - int mode; 1179 - 1180 - mode = current->thread.fp_regs.fpc & 3; 1181 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1182 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 1183 - FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d); 1184 - FP_MUL_D(DR, DA, DB); 1185 - FP_ADD_D(DR, DR, DC); 1186 - FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR); 1187 - return _fex; 1188 - } 1189 - 1190 - /* Multiply and add double */ 1191 - static int emu_madb (struct pt_regs *regs, int rx, double *val, int rz) { 1192 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); 1193 - FP_DECL_EX; 1194 - int mode; 1195 - 1196 - mode = current->thread.fp_regs.fpc & 3; 1197 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1198 - FP_UNPACK_DP(DB, val); 1199 - FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d); 1200 - FP_MUL_D(DR, DA, DB); 1201 - FP_ADD_D(DR, DR, DC); 1202 - FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR); 1203 - return _fex; 1204 - } 1205 - 1206 - /* Multiply and add float */ 1207 - static int emu_maebr (struct pt_regs *regs, int rx, int ry, int rz) { 1208 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); 1209 - FP_DECL_EX; 1210 - int mode; 1211 - 1212 - mode = current->thread.fp_regs.fpc & 3; 1213 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1214 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 1215 - FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f); 1216 - FP_MUL_S(SR, SA, SB); 1217 - FP_ADD_S(SR, SR, SC); 1218 - FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR); 1219 - return _fex; 1220 - } 1221 - 1222 - /* Multiply and add float */ 1223 - static int emu_maeb (struct pt_regs *regs, int rx, float *val, int rz) { 1224 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); 1225 - FP_DECL_EX; 1226 - int mode; 1227 - 1228 - mode = current->thread.fp_regs.fpc & 3; 1229 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1230 - FP_UNPACK_SP(SB, val); 1231 - FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f); 1232 - FP_MUL_S(SR, SA, SB); 1233 - FP_ADD_S(SR, SR, SC); 1234 - FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR); 1235 - return _fex; 1236 - } 1237 - 1238 - /* Multiply and subtract double */ 1239 - static int emu_msdbr (struct pt_regs *regs, int rx, int ry, int rz) { 1240 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); 1241 - FP_DECL_EX; 1242 - int mode; 1243 - 1244 - mode = current->thread.fp_regs.fpc & 3; 1245 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1246 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 1247 - FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d); 1248 - FP_MUL_D(DR, DA, DB); 1249 - FP_SUB_D(DR, DR, DC); 1250 - FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR); 1251 - return _fex; 1252 - } 1253 - 1254 - /* Multiply and subtract double */ 1255 - static int emu_msdb (struct pt_regs *regs, int rx, double *val, int rz) { 1256 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(DR); 1257 - FP_DECL_EX; 1258 - int mode; 1259 - 1260 - mode = current->thread.fp_regs.fpc & 3; 1261 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1262 - FP_UNPACK_DP(DB, val); 1263 - FP_UNPACK_DP(DC, &current->thread.fp_regs.fprs[rz].d); 1264 - FP_MUL_D(DR, DA, DB); 1265 - FP_SUB_D(DR, DR, DC); 1266 - FP_PACK_DP(&current->thread.fp_regs.fprs[rz].d, DR); 1267 - return _fex; 1268 - } 1269 - 1270 - /* Multiply and subtract float */ 1271 - static int emu_msebr (struct pt_regs *regs, int rx, int ry, int rz) { 1272 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); 1273 - FP_DECL_EX; 1274 - int mode; 1275 - 1276 - mode = current->thread.fp_regs.fpc & 3; 1277 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1278 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 1279 - FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f); 1280 - FP_MUL_S(SR, SA, SB); 1281 - FP_SUB_S(SR, SR, SC); 1282 - FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR); 1283 - return _fex; 1284 - } 1285 - 1286 - /* Multiply and subtract float */ 1287 - static int emu_mseb (struct pt_regs *regs, int rx, float *val, int rz) { 1288 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(SR); 1289 - FP_DECL_EX; 1290 - int mode; 1291 - 1292 - mode = current->thread.fp_regs.fpc & 3; 1293 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1294 - FP_UNPACK_SP(SB, val); 1295 - FP_UNPACK_SP(SC, &current->thread.fp_regs.fprs[rz].f); 1296 - FP_MUL_S(SR, SA, SB); 1297 - FP_SUB_S(SR, SR, SC); 1298 - FP_PACK_SP(&current->thread.fp_regs.fprs[rz].f, SR); 1299 - return _fex; 1300 - } 1301 - 1302 - /* Set floating point control word */ 1303 - static int emu_sfpc (struct pt_regs *regs, int rx, int ry) { 1304 - __u32 temp; 1305 - 1306 - temp = regs->gprs[rx]; 1307 - if ((temp & ~FPC_VALID_MASK) != 0) 1308 - return SIGILL; 1309 - current->thread.fp_regs.fpc = temp; 1310 - return 0; 1311 - } 1312 - 1313 - /* Square root long double */ 1314 - static int emu_sqxbr (struct pt_regs *regs, int rx, int ry) { 1315 - FP_DECL_Q(QA); FP_DECL_Q(QR); 1316 - FP_DECL_EX; 1317 - mathemu_ldcv cvt; 1318 - int mode; 1319 - 1320 - mode = current->thread.fp_regs.fpc & 3; 1321 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 1322 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 1323 - FP_UNPACK_QP(QA, &cvt.ld); 1324 - FP_SQRT_Q(QR, QA); 1325 - FP_PACK_QP(&cvt.ld, QR); 1326 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 1327 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 1328 - emu_set_CC_cs(regs, QR_c, QR_s); 1329 - return _fex; 1330 - } 1331 - 1332 - /* Square root double */ 1333 - static int emu_sqdbr (struct pt_regs *regs, int rx, int ry) { 1334 - FP_DECL_D(DA); FP_DECL_D(DR); 1335 - FP_DECL_EX; 1336 - int mode; 1337 - 1338 - mode = current->thread.fp_regs.fpc & 3; 1339 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[ry].d); 1340 - FP_SQRT_D(DR, DA); 1341 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1342 - emu_set_CC_cs(regs, DR_c, DR_s); 1343 - return _fex; 1344 - } 1345 - 1346 - /* Square root double */ 1347 - static int emu_sqdb (struct pt_regs *regs, int rx, double *val) { 1348 - FP_DECL_D(DA); FP_DECL_D(DR); 1349 - FP_DECL_EX; 1350 - int mode; 1351 - 1352 - mode = current->thread.fp_regs.fpc & 3; 1353 - FP_UNPACK_DP(DA, val); 1354 - FP_SQRT_D(DR, DA); 1355 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1356 - emu_set_CC_cs(regs, DR_c, DR_s); 1357 - return _fex; 1358 - } 1359 - 1360 - /* Square root float */ 1361 - static int emu_sqebr (struct pt_regs *regs, int rx, int ry) { 1362 - FP_DECL_S(SA); FP_DECL_S(SR); 1363 - FP_DECL_EX; 1364 - int mode; 1365 - 1366 - mode = current->thread.fp_regs.fpc & 3; 1367 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[ry].f); 1368 - FP_SQRT_S(SR, SA); 1369 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1370 - emu_set_CC_cs(regs, SR_c, SR_s); 1371 - return _fex; 1372 - } 1373 - 1374 - /* Square root float */ 1375 - static int emu_sqeb (struct pt_regs *regs, int rx, float *val) { 1376 - FP_DECL_S(SA); FP_DECL_S(SR); 1377 - FP_DECL_EX; 1378 - int mode; 1379 - 1380 - mode = current->thread.fp_regs.fpc & 3; 1381 - FP_UNPACK_SP(SA, val); 1382 - FP_SQRT_S(SR, SA); 1383 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1384 - emu_set_CC_cs(regs, SR_c, SR_s); 1385 - return _fex; 1386 - } 1387 - 1388 - /* Subtract long double */ 1389 - static int emu_sxbr (struct pt_regs *regs, int rx, int ry) { 1390 - FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); 1391 - FP_DECL_EX; 1392 - mathemu_ldcv cvt; 1393 - int mode; 1394 - 1395 - mode = current->thread.fp_regs.fpc & 3; 1396 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 1397 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 1398 - FP_UNPACK_QP(QA, &cvt.ld); 1399 - cvt.w.high = current->thread.fp_regs.fprs[ry].ui; 1400 - cvt.w.low = current->thread.fp_regs.fprs[ry+2].ui; 1401 - FP_UNPACK_QP(QB, &cvt.ld); 1402 - FP_SUB_Q(QR, QA, QB); 1403 - FP_PACK_QP(&cvt.ld, QR); 1404 - current->thread.fp_regs.fprs[rx].ui = cvt.w.high; 1405 - current->thread.fp_regs.fprs[rx+2].ui = cvt.w.low; 1406 - emu_set_CC_cs(regs, QR_c, QR_s); 1407 - return _fex; 1408 - } 1409 - 1410 - /* Subtract double */ 1411 - static int emu_sdbr (struct pt_regs *regs, int rx, int ry) { 1412 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1413 - FP_DECL_EX; 1414 - int mode; 1415 - 1416 - mode = current->thread.fp_regs.fpc & 3; 1417 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1418 - FP_UNPACK_DP(DB, &current->thread.fp_regs.fprs[ry].d); 1419 - FP_SUB_D(DR, DA, DB); 1420 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1421 - emu_set_CC_cs(regs, DR_c, DR_s); 1422 - return _fex; 1423 - } 1424 - 1425 - /* Subtract double */ 1426 - static int emu_sdb (struct pt_regs *regs, int rx, double *val) { 1427 - FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); 1428 - FP_DECL_EX; 1429 - int mode; 1430 - 1431 - mode = current->thread.fp_regs.fpc & 3; 1432 - FP_UNPACK_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1433 - FP_UNPACK_DP(DB, val); 1434 - FP_SUB_D(DR, DA, DB); 1435 - FP_PACK_DP(&current->thread.fp_regs.fprs[rx].d, DR); 1436 - emu_set_CC_cs(regs, DR_c, DR_s); 1437 - return _fex; 1438 - } 1439 - 1440 - /* Subtract float */ 1441 - static int emu_sebr (struct pt_regs *regs, int rx, int ry) { 1442 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 1443 - FP_DECL_EX; 1444 - int mode; 1445 - 1446 - mode = current->thread.fp_regs.fpc & 3; 1447 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1448 - FP_UNPACK_SP(SB, &current->thread.fp_regs.fprs[ry].f); 1449 - FP_SUB_S(SR, SA, SB); 1450 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1451 - emu_set_CC_cs(regs, SR_c, SR_s); 1452 - return _fex; 1453 - } 1454 - 1455 - /* Subtract float */ 1456 - static int emu_seb (struct pt_regs *regs, int rx, float *val) { 1457 - FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); 1458 - FP_DECL_EX; 1459 - int mode; 1460 - 1461 - mode = current->thread.fp_regs.fpc & 3; 1462 - FP_UNPACK_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1463 - FP_UNPACK_SP(SB, val); 1464 - FP_SUB_S(SR, SA, SB); 1465 - FP_PACK_SP(&current->thread.fp_regs.fprs[rx].f, SR); 1466 - emu_set_CC_cs(regs, SR_c, SR_s); 1467 - return _fex; 1468 - } 1469 - 1470 - /* Test data class long double */ 1471 - static int emu_tcxb (struct pt_regs *regs, int rx, long val) { 1472 - FP_DECL_Q(QA); 1473 - mathemu_ldcv cvt; 1474 - int bit; 1475 - 1476 - cvt.w.high = current->thread.fp_regs.fprs[rx].ui; 1477 - cvt.w.low = current->thread.fp_regs.fprs[rx+2].ui; 1478 - FP_UNPACK_RAW_QP(QA, &cvt.ld); 1479 - switch (QA_e) { 1480 - default: 1481 - bit = 8; /* normalized number */ 1482 - break; 1483 - case 0: 1484 - if (_FP_FRAC_ZEROP_4(QA)) 1485 - bit = 10; /* zero */ 1486 - else 1487 - bit = 6; /* denormalized number */ 1488 - break; 1489 - case _FP_EXPMAX_Q: 1490 - if (_FP_FRAC_ZEROP_4(QA)) 1491 - bit = 4; /* infinity */ 1492 - else if (_FP_FRAC_HIGH_RAW_Q(QA) & _FP_QNANBIT_Q) 1493 - bit = 2; /* quiet NAN */ 1494 - else 1495 - bit = 0; /* signaling NAN */ 1496 - break; 1497 - } 1498 - if (!QA_s) 1499 - bit++; 1500 - emu_set_CC(regs, ((__u32) val >> bit) & 1); 1501 - return 0; 1502 - } 1503 - 1504 - /* Test data class double */ 1505 - static int emu_tcdb (struct pt_regs *regs, int rx, long val) { 1506 - FP_DECL_D(DA); 1507 - int bit; 1508 - 1509 - FP_UNPACK_RAW_DP(DA, &current->thread.fp_regs.fprs[rx].d); 1510 - switch (DA_e) { 1511 - default: 1512 - bit = 8; /* normalized number */ 1513 - break; 1514 - case 0: 1515 - if (_FP_FRAC_ZEROP_2(DA)) 1516 - bit = 10; /* zero */ 1517 - else 1518 - bit = 6; /* denormalized number */ 1519 - break; 1520 - case _FP_EXPMAX_D: 1521 - if (_FP_FRAC_ZEROP_2(DA)) 1522 - bit = 4; /* infinity */ 1523 - else if (_FP_FRAC_HIGH_RAW_D(DA) & _FP_QNANBIT_D) 1524 - bit = 2; /* quiet NAN */ 1525 - else 1526 - bit = 0; /* signaling NAN */ 1527 - break; 1528 - } 1529 - if (!DA_s) 1530 - bit++; 1531 - emu_set_CC(regs, ((__u32) val >> bit) & 1); 1532 - return 0; 1533 - } 1534 - 1535 - /* Test data class float */ 1536 - static int emu_tceb (struct pt_regs *regs, int rx, long val) { 1537 - FP_DECL_S(SA); 1538 - int bit; 1539 - 1540 - FP_UNPACK_RAW_SP(SA, &current->thread.fp_regs.fprs[rx].f); 1541 - switch (SA_e) { 1542 - default: 1543 - bit = 8; /* normalized number */ 1544 - break; 1545 - case 0: 1546 - if (_FP_FRAC_ZEROP_1(SA)) 1547 - bit = 10; /* zero */ 1548 - else 1549 - bit = 6; /* denormalized number */ 1550 - break; 1551 - case _FP_EXPMAX_S: 1552 - if (_FP_FRAC_ZEROP_1(SA)) 1553 - bit = 4; /* infinity */ 1554 - else if (_FP_FRAC_HIGH_RAW_S(SA) & _FP_QNANBIT_S) 1555 - bit = 2; /* quiet NAN */ 1556 - else 1557 - bit = 0; /* signaling NAN */ 1558 - break; 1559 - } 1560 - if (!SA_s) 1561 - bit++; 1562 - emu_set_CC(regs, ((__u32) val >> bit) & 1); 1563 - return 0; 1564 - } 1565 - 1566 - static inline void emu_load_regd(int reg) { 1567 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1568 - return; 1569 - asm volatile( /* load reg from fp_regs.fprs[reg] */ 1570 - " bras 1,0f\n" 1571 - " ld 0,0(%1)\n" 1572 - "0: ex %0,0(1)" 1573 - : /* no output */ 1574 - : "a" (reg<<4),"a" (&current->thread.fp_regs.fprs[reg].d) 1575 - : "1"); 1576 - } 1577 - 1578 - static inline void emu_load_rege(int reg) { 1579 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1580 - return; 1581 - asm volatile( /* load reg from fp_regs.fprs[reg] */ 1582 - " bras 1,0f\n" 1583 - " le 0,0(%1)\n" 1584 - "0: ex %0,0(1)" 1585 - : /* no output */ 1586 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1587 - : "1"); 1588 - } 1589 - 1590 - static inline void emu_store_regd(int reg) { 1591 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1592 - return; 1593 - asm volatile( /* store reg to fp_regs.fprs[reg] */ 1594 - " bras 1,0f\n" 1595 - " std 0,0(%1)\n" 1596 - "0: ex %0,0(1)" 1597 - : /* no output */ 1598 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].d) 1599 - : "1"); 1600 - } 1601 - 1602 - 1603 - static inline void emu_store_rege(int reg) { 1604 - if ((reg&9) != 0) /* test if reg in {0,2,4,6} */ 1605 - return; 1606 - asm volatile( /* store reg to fp_regs.fprs[reg] */ 1607 - " bras 1,0f\n" 1608 - " ste 0,0(%1)\n" 1609 - "0: ex %0,0(1)" 1610 - : /* no output */ 1611 - : "a" (reg<<4), "a" (&current->thread.fp_regs.fprs[reg].f) 1612 - : "1"); 1613 - } 1614 - 1615 - int math_emu_b3(__u8 *opcode, struct pt_regs * regs) { 1616 - int _fex = 0; 1617 - static const __u8 format_table[256] = { 1618 - [0x00] = 0x03,[0x01] = 0x03,[0x02] = 0x03,[0x03] = 0x03, 1619 - [0x04] = 0x0f,[0x05] = 0x0d,[0x06] = 0x0e,[0x07] = 0x0d, 1620 - [0x08] = 0x03,[0x09] = 0x03,[0x0a] = 0x03,[0x0b] = 0x03, 1621 - [0x0c] = 0x0f,[0x0d] = 0x03,[0x0e] = 0x06,[0x0f] = 0x06, 1622 - [0x10] = 0x02,[0x11] = 0x02,[0x12] = 0x02,[0x13] = 0x02, 1623 - [0x14] = 0x03,[0x15] = 0x02,[0x16] = 0x01,[0x17] = 0x03, 1624 - [0x18] = 0x02,[0x19] = 0x02,[0x1a] = 0x02,[0x1b] = 0x02, 1625 - [0x1c] = 0x02,[0x1d] = 0x02,[0x1e] = 0x05,[0x1f] = 0x05, 1626 - [0x40] = 0x01,[0x41] = 0x01,[0x42] = 0x01,[0x43] = 0x01, 1627 - [0x44] = 0x12,[0x45] = 0x0d,[0x46] = 0x11,[0x47] = 0x04, 1628 - [0x48] = 0x01,[0x49] = 0x01,[0x4a] = 0x01,[0x4b] = 0x01, 1629 - [0x4c] = 0x01,[0x4d] = 0x01,[0x53] = 0x06,[0x57] = 0x06, 1630 - [0x5b] = 0x05,[0x5f] = 0x05,[0x84] = 0x13,[0x8c] = 0x13, 1631 - [0x94] = 0x09,[0x95] = 0x08,[0x96] = 0x07,[0x98] = 0x0c, 1632 - [0x99] = 0x0b,[0x9a] = 0x0a 1633 - }; 1634 - static const void *jump_table[256]= { 1635 - [0x00] = emu_lpebr,[0x01] = emu_lnebr,[0x02] = emu_ltebr, 1636 - [0x03] = emu_lcebr,[0x04] = emu_ldebr,[0x05] = emu_lxdbr, 1637 - [0x06] = emu_lxebr,[0x07] = emu_mxdbr,[0x08] = emu_kebr, 1638 - [0x09] = emu_cebr, [0x0a] = emu_aebr, [0x0b] = emu_sebr, 1639 - [0x0c] = emu_mdebr,[0x0d] = emu_debr, [0x0e] = emu_maebr, 1640 - [0x0f] = emu_msebr,[0x10] = emu_lpdbr,[0x11] = emu_lndbr, 1641 - [0x12] = emu_ltdbr,[0x13] = emu_lcdbr,[0x14] = emu_sqebr, 1642 - [0x15] = emu_sqdbr,[0x16] = emu_sqxbr,[0x17] = emu_meebr, 1643 - [0x18] = emu_kdbr, [0x19] = emu_cdbr, [0x1a] = emu_adbr, 1644 - [0x1b] = emu_sdbr, [0x1c] = emu_mdbr, [0x1d] = emu_ddbr, 1645 - [0x1e] = emu_madbr,[0x1f] = emu_msdbr,[0x40] = emu_lpxbr, 1646 - [0x41] = emu_lnxbr,[0x42] = emu_ltxbr,[0x43] = emu_lcxbr, 1647 - [0x44] = emu_ledbr,[0x45] = emu_ldxbr,[0x46] = emu_lexbr, 1648 - [0x47] = emu_fixbr,[0x48] = emu_kxbr, [0x49] = emu_cxbr, 1649 - [0x4a] = emu_axbr, [0x4b] = emu_sxbr, [0x4c] = emu_mxbr, 1650 - [0x4d] = emu_dxbr, [0x53] = emu_diebr,[0x57] = emu_fiebr, 1651 - [0x5b] = emu_didbr,[0x5f] = emu_fidbr,[0x84] = emu_sfpc, 1652 - [0x8c] = emu_efpc, [0x94] = emu_cefbr,[0x95] = emu_cdfbr, 1653 - [0x96] = emu_cxfbr,[0x98] = emu_cfebr,[0x99] = emu_cfdbr, 1654 - [0x9a] = emu_cfxbr 1655 - }; 1656 - 1657 - switch (format_table[opcode[1]]) { 1658 - case 1: /* RRE format, long double operation */ 1659 - if (opcode[3] & 0x22) 1660 - return SIGILL; 1661 - emu_store_regd((opcode[3] >> 4) & 15); 1662 - emu_store_regd(((opcode[3] >> 4) & 15) + 2); 1663 - emu_store_regd(opcode[3] & 15); 1664 - emu_store_regd((opcode[3] & 15) + 2); 1665 - /* call the emulation function */ 1666 - _fex = ((int (*)(struct pt_regs *,int, int)) 1667 - jump_table[opcode[1]]) 1668 - (regs, opcode[3] >> 4, opcode[3] & 15); 1669 - emu_load_regd((opcode[3] >> 4) & 15); 1670 - emu_load_regd(((opcode[3] >> 4) & 15) + 2); 1671 - emu_load_regd(opcode[3] & 15); 1672 - emu_load_regd((opcode[3] & 15) + 2); 1673 - break; 1674 - case 2: /* RRE format, double operation */ 1675 - emu_store_regd((opcode[3] >> 4) & 15); 1676 - emu_store_regd(opcode[3] & 15); 1677 - /* call the emulation function */ 1678 - _fex = ((int (*)(struct pt_regs *, int, int)) 1679 - jump_table[opcode[1]]) 1680 - (regs, opcode[3] >> 4, opcode[3] & 15); 1681 - emu_load_regd((opcode[3] >> 4) & 15); 1682 - emu_load_regd(opcode[3] & 15); 1683 - break; 1684 - case 3: /* RRE format, float operation */ 1685 - emu_store_rege((opcode[3] >> 4) & 15); 1686 - emu_store_rege(opcode[3] & 15); 1687 - /* call the emulation function */ 1688 - _fex = ((int (*)(struct pt_regs *, int, int)) 1689 - jump_table[opcode[1]]) 1690 - (regs, opcode[3] >> 4, opcode[3] & 15); 1691 - emu_load_rege((opcode[3] >> 4) & 15); 1692 - emu_load_rege(opcode[3] & 15); 1693 - break; 1694 - case 4: /* RRF format, long double operation */ 1695 - if (opcode[3] & 0x22) 1696 - return SIGILL; 1697 - emu_store_regd((opcode[3] >> 4) & 15); 1698 - emu_store_regd(((opcode[3] >> 4) & 15) + 2); 1699 - emu_store_regd(opcode[3] & 15); 1700 - emu_store_regd((opcode[3] & 15) + 2); 1701 - /* call the emulation function */ 1702 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1703 - jump_table[opcode[1]]) 1704 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1705 - emu_load_regd((opcode[3] >> 4) & 15); 1706 - emu_load_regd(((opcode[3] >> 4) & 15) + 2); 1707 - emu_load_regd(opcode[3] & 15); 1708 - emu_load_regd((opcode[3] & 15) + 2); 1709 - break; 1710 - case 5: /* RRF format, double operation */ 1711 - emu_store_regd((opcode[2] >> 4) & 15); 1712 - emu_store_regd((opcode[3] >> 4) & 15); 1713 - emu_store_regd(opcode[3] & 15); 1714 - /* call the emulation function */ 1715 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1716 - jump_table[opcode[1]]) 1717 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1718 - emu_load_regd((opcode[2] >> 4) & 15); 1719 - emu_load_regd((opcode[3] >> 4) & 15); 1720 - emu_load_regd(opcode[3] & 15); 1721 - break; 1722 - case 6: /* RRF format, float operation */ 1723 - emu_store_rege((opcode[2] >> 4) & 15); 1724 - emu_store_rege((opcode[3] >> 4) & 15); 1725 - emu_store_rege(opcode[3] & 15); 1726 - /* call the emulation function */ 1727 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1728 - jump_table[opcode[1]]) 1729 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1730 - emu_load_rege((opcode[2] >> 4) & 15); 1731 - emu_load_rege((opcode[3] >> 4) & 15); 1732 - emu_load_rege(opcode[3] & 15); 1733 - break; 1734 - case 7: /* RRE format, cxfbr instruction */ 1735 - /* call the emulation function */ 1736 - if (opcode[3] & 0x20) 1737 - return SIGILL; 1738 - _fex = ((int (*)(struct pt_regs *, int, int)) 1739 - jump_table[opcode[1]]) 1740 - (regs, opcode[3] >> 4, opcode[3] & 15); 1741 - emu_load_regd((opcode[3] >> 4) & 15); 1742 - emu_load_regd(((opcode[3] >> 4) & 15) + 2); 1743 - break; 1744 - case 8: /* RRE format, cdfbr instruction */ 1745 - /* call the emulation function */ 1746 - _fex = ((int (*)(struct pt_regs *, int, int)) 1747 - jump_table[opcode[1]]) 1748 - (regs, opcode[3] >> 4, opcode[3] & 15); 1749 - emu_load_regd((opcode[3] >> 4) & 15); 1750 - break; 1751 - case 9: /* RRE format, cefbr instruction */ 1752 - /* call the emulation function */ 1753 - _fex = ((int (*)(struct pt_regs *, int, int)) 1754 - jump_table[opcode[1]]) 1755 - (regs, opcode[3] >> 4, opcode[3] & 15); 1756 - emu_load_rege((opcode[3] >> 4) & 15); 1757 - break; 1758 - case 10: /* RRF format, cfxbr instruction */ 1759 - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) 1760 - /* mask of { 2,3,8-15 } is invalid */ 1761 - return SIGILL; 1762 - if (opcode[3] & 2) 1763 - return SIGILL; 1764 - emu_store_regd(opcode[3] & 15); 1765 - emu_store_regd((opcode[3] & 15) + 2); 1766 - /* call the emulation function */ 1767 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1768 - jump_table[opcode[1]]) 1769 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1770 - break; 1771 - case 11: /* RRF format, cfdbr instruction */ 1772 - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) 1773 - /* mask of { 2,3,8-15 } is invalid */ 1774 - return SIGILL; 1775 - emu_store_regd(opcode[3] & 15); 1776 - /* call the emulation function */ 1777 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1778 - jump_table[opcode[1]]) 1779 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1780 - break; 1781 - case 12: /* RRF format, cfebr instruction */ 1782 - if ((opcode[2] & 128) == 128 || (opcode[2] & 96) == 32) 1783 - /* mask of { 2,3,8-15 } is invalid */ 1784 - return SIGILL; 1785 - emu_store_rege(opcode[3] & 15); 1786 - /* call the emulation function */ 1787 - _fex = ((int (*)(struct pt_regs *, int, int, int)) 1788 - jump_table[opcode[1]]) 1789 - (regs, opcode[3] >> 4, opcode[3] & 15, opcode[2] >> 4); 1790 - break; 1791 - case 13: /* RRE format, ldxbr & mdxbr instruction */ 1792 - /* double store but long double load */ 1793 - if (opcode[3] & 0x20) 1794 - return SIGILL; 1795 - emu_store_regd((opcode[3] >> 4) & 15); 1796 - emu_store_regd(opcode[3] & 15); 1797 - /* call the emulation function */ 1798 - _fex = ((int (*)(struct pt_regs *, int, int)) 1799 - jump_table[opcode[1]]) 1800 - (regs, opcode[3] >> 4, opcode[3] & 15); 1801 - emu_load_regd((opcode[3] >> 4) & 15); 1802 - emu_load_regd(((opcode[3] >> 4) & 15) + 2); 1803 - break; 1804 - case 14: /* RRE format, ldxbr & mdxbr instruction */ 1805 - /* float store but long double load */ 1806 - if (opcode[3] & 0x20) 1807 - return SIGILL; 1808 - emu_store_rege((opcode[3] >> 4) & 15); 1809 - emu_store_rege(opcode[3] & 15); 1810 - /* call the emulation function */ 1811 - _fex = ((int (*)(struct pt_regs *, int, int)) 1812 - jump_table[opcode[1]]) 1813 - (regs, opcode[3] >> 4, opcode[3] & 15); 1814 - emu_load_regd((opcode[3] >> 4) & 15); 1815 - emu_load_regd(((opcode[3] >> 4) & 15) + 2); 1816 - break; 1817 - case 15: /* RRE format, ldebr & mdebr instruction */ 1818 - /* float store but double load */ 1819 - emu_store_rege((opcode[3] >> 4) & 15); 1820 - emu_store_rege(opcode[3] & 15); 1821 - /* call the emulation function */ 1822 - _fex = ((int (*)(struct pt_regs *, int, int)) 1823 - jump_table[opcode[1]]) 1824 - (regs, opcode[3] >> 4, opcode[3] & 15); 1825 - emu_load_regd((opcode[3] >> 4) & 15); 1826 - break; 1827 - case 16: /* RRE format, ldxbr instruction */ 1828 - /* long double store but double load */ 1829 - if (opcode[3] & 2) 1830 - return SIGILL; 1831 - emu_store_regd(opcode[3] & 15); 1832 - emu_store_regd((opcode[3] & 15) + 2); 1833 - /* call the emulation function */ 1834 - _fex = ((int (*)(struct pt_regs *, int, int)) 1835 - jump_table[opcode[1]]) 1836 - (regs, opcode[3] >> 4, opcode[3] & 15); 1837 - emu_load_regd((opcode[3] >> 4) & 15); 1838 - break; 1839 - case 17: /* RRE format, ldxbr instruction */ 1840 - /* long double store but float load */ 1841 - if (opcode[3] & 2) 1842 - return SIGILL; 1843 - emu_store_regd(opcode[3] & 15); 1844 - emu_store_regd((opcode[3] & 15) + 2); 1845 - /* call the emulation function */ 1846 - _fex = ((int (*)(struct pt_regs *, int, int)) 1847 - jump_table[opcode[1]]) 1848 - (regs, opcode[3] >> 4, opcode[3] & 15); 1849 - emu_load_rege((opcode[3] >> 4) & 15); 1850 - break; 1851 - case 18: /* RRE format, ledbr instruction */ 1852 - /* double store but float load */ 1853 - emu_store_regd(opcode[3] & 15); 1854 - /* call the emulation function */ 1855 - _fex = ((int (*)(struct pt_regs *, int, int)) 1856 - jump_table[opcode[1]]) 1857 - (regs, opcode[3] >> 4, opcode[3] & 15); 1858 - emu_load_rege((opcode[3] >> 4) & 15); 1859 - break; 1860 - case 19: /* RRE format, efpc & sfpc instruction */ 1861 - /* call the emulation function */ 1862 - _fex = ((int (*)(struct pt_regs *, int, int)) 1863 - jump_table[opcode[1]]) 1864 - (regs, opcode[3] >> 4, opcode[3] & 15); 1865 - break; 1866 - default: /* invalid operation */ 1867 - return SIGILL; 1868 - } 1869 - if (_fex != 0) { 1870 - current->thread.fp_regs.fpc |= _fex; 1871 - if (current->thread.fp_regs.fpc & (_fex << 8)) 1872 - return SIGFPE; 1873 - } 1874 - return 0; 1875 - } 1876 - 1877 - static void* calc_addr(struct pt_regs *regs, int rx, int rb, int disp) 1878 - { 1879 - addr_t addr; 1880 - 1881 - rx &= 15; 1882 - rb &= 15; 1883 - addr = disp & 0xfff; 1884 - addr += (rx != 0) ? regs->gprs[rx] : 0; /* + index */ 1885 - addr += (rb != 0) ? regs->gprs[rb] : 0; /* + base */ 1886 - return (void*) addr; 1887 - } 1888 - 1889 - int math_emu_ed(__u8 *opcode, struct pt_regs * regs) { 1890 - int _fex = 0; 1891 - 1892 - static const __u8 format_table[256] = { 1893 - [0x04] = 0x06,[0x05] = 0x05,[0x06] = 0x07,[0x07] = 0x05, 1894 - [0x08] = 0x02,[0x09] = 0x02,[0x0a] = 0x02,[0x0b] = 0x02, 1895 - [0x0c] = 0x06,[0x0d] = 0x02,[0x0e] = 0x04,[0x0f] = 0x04, 1896 - [0x10] = 0x08,[0x11] = 0x09,[0x12] = 0x0a,[0x14] = 0x02, 1897 - [0x15] = 0x01,[0x17] = 0x02,[0x18] = 0x01,[0x19] = 0x01, 1898 - [0x1a] = 0x01,[0x1b] = 0x01,[0x1c] = 0x01,[0x1d] = 0x01, 1899 - [0x1e] = 0x03,[0x1f] = 0x03, 1900 - }; 1901 - static const void *jump_table[]= { 1902 - [0x04] = emu_ldeb,[0x05] = emu_lxdb,[0x06] = emu_lxeb, 1903 - [0x07] = emu_mxdb,[0x08] = emu_keb, [0x09] = emu_ceb, 1904 - [0x0a] = emu_aeb, [0x0b] = emu_seb, [0x0c] = emu_mdeb, 1905 - [0x0d] = emu_deb, [0x0e] = emu_maeb,[0x0f] = emu_mseb, 1906 - [0x10] = emu_tceb,[0x11] = emu_tcdb,[0x12] = emu_tcxb, 1907 - [0x14] = emu_sqeb,[0x15] = emu_sqdb,[0x17] = emu_meeb, 1908 - [0x18] = emu_kdb, [0x19] = emu_cdb, [0x1a] = emu_adb, 1909 - [0x1b] = emu_sdb, [0x1c] = emu_mdb, [0x1d] = emu_ddb, 1910 - [0x1e] = emu_madb,[0x1f] = emu_msdb 1911 - }; 1912 - 1913 - switch (format_table[opcode[5]]) { 1914 - case 1: /* RXE format, double constant */ { 1915 - __u64 *dxb, temp; 1916 - __u32 opc; 1917 - 1918 - emu_store_regd((opcode[1] >> 4) & 15); 1919 - opc = *((__u32 *) opcode); 1920 - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 1921 - mathemu_copy_from_user(&temp, dxb, 8); 1922 - /* call the emulation function */ 1923 - _fex = ((int (*)(struct pt_regs *, int, double *)) 1924 - jump_table[opcode[5]]) 1925 - (regs, opcode[1] >> 4, (double *) &temp); 1926 - emu_load_regd((opcode[1] >> 4) & 15); 1927 - break; 1928 - } 1929 - case 2: /* RXE format, float constant */ { 1930 - __u32 *dxb, temp; 1931 - __u32 opc; 1932 - 1933 - emu_store_rege((opcode[1] >> 4) & 15); 1934 - opc = *((__u32 *) opcode); 1935 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 1936 - mathemu_get_user(temp, dxb); 1937 - /* call the emulation function */ 1938 - _fex = ((int (*)(struct pt_regs *, int, float *)) 1939 - jump_table[opcode[5]]) 1940 - (regs, opcode[1] >> 4, (float *) &temp); 1941 - emu_load_rege((opcode[1] >> 4) & 15); 1942 - break; 1943 - } 1944 - case 3: /* RXF format, double constant */ { 1945 - __u64 *dxb, temp; 1946 - __u32 opc; 1947 - 1948 - emu_store_regd((opcode[1] >> 4) & 15); 1949 - emu_store_regd((opcode[4] >> 4) & 15); 1950 - opc = *((__u32 *) opcode); 1951 - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 1952 - mathemu_copy_from_user(&temp, dxb, 8); 1953 - /* call the emulation function */ 1954 - _fex = ((int (*)(struct pt_regs *, int, double *, int)) 1955 - jump_table[opcode[5]]) 1956 - (regs, opcode[1] >> 4, (double *) &temp, opcode[4] >> 4); 1957 - emu_load_regd((opcode[1] >> 4) & 15); 1958 - break; 1959 - } 1960 - case 4: /* RXF format, float constant */ { 1961 - __u32 *dxb, temp; 1962 - __u32 opc; 1963 - 1964 - emu_store_rege((opcode[1] >> 4) & 15); 1965 - emu_store_rege((opcode[4] >> 4) & 15); 1966 - opc = *((__u32 *) opcode); 1967 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 1968 - mathemu_get_user(temp, dxb); 1969 - /* call the emulation function */ 1970 - _fex = ((int (*)(struct pt_regs *, int, float *, int)) 1971 - jump_table[opcode[5]]) 1972 - (regs, opcode[1] >> 4, (float *) &temp, opcode[4] >> 4); 1973 - emu_load_rege((opcode[4] >> 4) & 15); 1974 - break; 1975 - } 1976 - case 5: /* RXE format, double constant */ 1977 - /* store double and load long double */ 1978 - { 1979 - __u64 *dxb, temp; 1980 - __u32 opc; 1981 - if ((opcode[1] >> 4) & 0x20) 1982 - return SIGILL; 1983 - emu_store_regd((opcode[1] >> 4) & 15); 1984 - opc = *((__u32 *) opcode); 1985 - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 1986 - mathemu_copy_from_user(&temp, dxb, 8); 1987 - /* call the emulation function */ 1988 - _fex = ((int (*)(struct pt_regs *, int, double *)) 1989 - jump_table[opcode[5]]) 1990 - (regs, opcode[1] >> 4, (double *) &temp); 1991 - emu_load_regd((opcode[1] >> 4) & 15); 1992 - emu_load_regd(((opcode[1] >> 4) & 15) + 2); 1993 - break; 1994 - } 1995 - case 6: /* RXE format, float constant */ 1996 - /* store float and load double */ 1997 - { 1998 - __u32 *dxb, temp; 1999 - __u32 opc; 2000 - emu_store_rege((opcode[1] >> 4) & 15); 2001 - opc = *((__u32 *) opcode); 2002 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2003 - mathemu_get_user(temp, dxb); 2004 - /* call the emulation function */ 2005 - _fex = ((int (*)(struct pt_regs *, int, float *)) 2006 - jump_table[opcode[5]]) 2007 - (regs, opcode[1] >> 4, (float *) &temp); 2008 - emu_load_regd((opcode[1] >> 4) & 15); 2009 - break; 2010 - } 2011 - case 7: /* RXE format, float constant */ 2012 - /* store float and load long double */ 2013 - { 2014 - __u32 *dxb, temp; 2015 - __u32 opc; 2016 - if ((opcode[1] >> 4) & 0x20) 2017 - return SIGILL; 2018 - emu_store_rege((opcode[1] >> 4) & 15); 2019 - opc = *((__u32 *) opcode); 2020 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2021 - mathemu_get_user(temp, dxb); 2022 - /* call the emulation function */ 2023 - _fex = ((int (*)(struct pt_regs *, int, float *)) 2024 - jump_table[opcode[5]]) 2025 - (regs, opcode[1] >> 4, (float *) &temp); 2026 - emu_load_regd((opcode[1] >> 4) & 15); 2027 - emu_load_regd(((opcode[1] >> 4) & 15) + 2); 2028 - break; 2029 - } 2030 - case 8: /* RXE format, RX address used as int value */ { 2031 - __u64 dxb; 2032 - __u32 opc; 2033 - 2034 - emu_store_rege((opcode[1] >> 4) & 15); 2035 - opc = *((__u32 *) opcode); 2036 - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); 2037 - /* call the emulation function */ 2038 - _fex = ((int (*)(struct pt_regs *, int, long)) 2039 - jump_table[opcode[5]]) 2040 - (regs, opcode[1] >> 4, dxb); 2041 - break; 2042 - } 2043 - case 9: /* RXE format, RX address used as int value */ { 2044 - __u64 dxb; 2045 - __u32 opc; 2046 - 2047 - emu_store_regd((opcode[1] >> 4) & 15); 2048 - opc = *((__u32 *) opcode); 2049 - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); 2050 - /* call the emulation function */ 2051 - _fex = ((int (*)(struct pt_regs *, int, long)) 2052 - jump_table[opcode[5]]) 2053 - (regs, opcode[1] >> 4, dxb); 2054 - break; 2055 - } 2056 - case 10: /* RXE format, RX address used as int value */ { 2057 - __u64 dxb; 2058 - __u32 opc; 2059 - 2060 - if ((opcode[1] >> 4) & 2) 2061 - return SIGILL; 2062 - emu_store_regd((opcode[1] >> 4) & 15); 2063 - emu_store_regd(((opcode[1] >> 4) & 15) + 2); 2064 - opc = *((__u32 *) opcode); 2065 - dxb = (__u64) calc_addr(regs, opc >> 16, opc >> 12, opc); 2066 - /* call the emulation function */ 2067 - _fex = ((int (*)(struct pt_regs *, int, long)) 2068 - jump_table[opcode[5]]) 2069 - (regs, opcode[1] >> 4, dxb); 2070 - break; 2071 - } 2072 - default: /* invalid operation */ 2073 - return SIGILL; 2074 - } 2075 - if (_fex != 0) { 2076 - current->thread.fp_regs.fpc |= _fex; 2077 - if (current->thread.fp_regs.fpc & (_fex << 8)) 2078 - return SIGFPE; 2079 - } 2080 - return 0; 2081 - } 2082 - 2083 - /* 2084 - * Emulate LDR Rx,Ry with Rx or Ry not in {0, 2, 4, 6} 2085 - */ 2086 - int math_emu_ldr(__u8 *opcode) { 2087 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2088 - __u16 opc = *((__u16 *) opcode); 2089 - 2090 - if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2091 - /* we got an exception therefore ry can't be in {0,2,4,6} */ 2092 - asm volatile( /* load rx from fp_regs.fprs[ry] */ 2093 - " bras 1,0f\n" 2094 - " ld 0,0(%1)\n" 2095 - "0: ex %0,0(1)" 2096 - : /* no output */ 2097 - : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].d) 2098 - : "1"); 2099 - } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2100 - asm volatile ( /* store ry to fp_regs.fprs[rx] */ 2101 - " bras 1,0f\n" 2102 - " std 0,0(%1)\n" 2103 - "0: ex %0,0(1)" 2104 - : /* no output */ 2105 - : "a" ((opc & 0xf) << 4), 2106 - "a" (&fp_regs->fprs[(opc & 0xf0)>>4].d) 2107 - : "1"); 2108 - } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2109 - fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2110 - return 0; 2111 - } 2112 - 2113 - /* 2114 - * Emulate LER Rx,Ry with Rx or Ry not in {0, 2, 4, 6} 2115 - */ 2116 - int math_emu_ler(__u8 *opcode) { 2117 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2118 - __u16 opc = *((__u16 *) opcode); 2119 - 2120 - if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ 2121 - /* we got an exception therefore ry can't be in {0,2,4,6} */ 2122 - asm volatile( /* load rx from fp_regs.fprs[ry] */ 2123 - " bras 1,0f\n" 2124 - " le 0,0(%1)\n" 2125 - "0: ex %0,0(1)" 2126 - : /* no output */ 2127 - : "a" (opc & 0xf0), "a" (&fp_regs->fprs[opc & 0xf].f) 2128 - : "1"); 2129 - } else if ((opc & 0x9) == 0) { /* test if ry in {0,2,4,6} */ 2130 - asm volatile( /* store ry to fp_regs.fprs[rx] */ 2131 - " bras 1,0f\n" 2132 - " ste 0,0(%1)\n" 2133 - "0: ex %0,0(1)" 2134 - : /* no output */ 2135 - : "a" ((opc & 0xf) << 4), 2136 - "a" (&fp_regs->fprs[(opc & 0xf0) >> 4].f) 2137 - : "1"); 2138 - } else /* move fp_regs.fprs[ry] to fp_regs.fprs[rx] */ 2139 - fp_regs->fprs[(opc & 0xf0) >> 4] = fp_regs->fprs[opc & 0xf]; 2140 - return 0; 2141 - } 2142 - 2143 - /* 2144 - * Emulate LD R,D(X,B) with R not in {0, 2, 4, 6} 2145 - */ 2146 - int math_emu_ld(__u8 *opcode, struct pt_regs * regs) { 2147 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2148 - __u32 opc = *((__u32 *) opcode); 2149 - __u64 *dxb; 2150 - 2151 - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2152 - mathemu_copy_from_user(&fp_regs->fprs[(opc >> 20) & 0xf].d, dxb, 8); 2153 - return 0; 2154 - } 2155 - 2156 - /* 2157 - * Emulate LE R,D(X,B) with R not in {0, 2, 4, 6} 2158 - */ 2159 - int math_emu_le(__u8 *opcode, struct pt_regs * regs) { 2160 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2161 - __u32 opc = *((__u32 *) opcode); 2162 - __u32 *mem, *dxb; 2163 - 2164 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2165 - mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); 2166 - mathemu_get_user(mem[0], dxb); 2167 - return 0; 2168 - } 2169 - 2170 - /* 2171 - * Emulate STD R,D(X,B) with R not in {0, 2, 4, 6} 2172 - */ 2173 - int math_emu_std(__u8 *opcode, struct pt_regs * regs) { 2174 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2175 - __u32 opc = *((__u32 *) opcode); 2176 - __u64 *dxb; 2177 - 2178 - dxb = (__u64 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2179 - mathemu_copy_to_user(dxb, &fp_regs->fprs[(opc >> 20) & 0xf].d, 8); 2180 - return 0; 2181 - } 2182 - 2183 - /* 2184 - * Emulate STE R,D(X,B) with R not in {0, 2, 4, 6} 2185 - */ 2186 - int math_emu_ste(__u8 *opcode, struct pt_regs * regs) { 2187 - s390_fp_regs *fp_regs = &current->thread.fp_regs; 2188 - __u32 opc = *((__u32 *) opcode); 2189 - __u32 *mem, *dxb; 2190 - 2191 - dxb = (__u32 *) calc_addr(regs, opc >> 16, opc >> 12, opc); 2192 - mem = (__u32 *) (&fp_regs->fprs[(opc >> 20) & 0xf].f); 2193 - mathemu_put_user(mem[0], dxb); 2194 - return 0; 2195 - } 2196 - 2197 - /* 2198 - * Emulate LFPC D(B) 2199 - */ 2200 - int math_emu_lfpc(__u8 *opcode, struct pt_regs *regs) { 2201 - __u32 opc = *((__u32 *) opcode); 2202 - __u32 *dxb, temp; 2203 - 2204 - dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); 2205 - mathemu_get_user(temp, dxb); 2206 - if ((temp & ~FPC_VALID_MASK) != 0) 2207 - return SIGILL; 2208 - current->thread.fp_regs.fpc = temp; 2209 - return 0; 2210 - } 2211 - 2212 - /* 2213 - * Emulate STFPC D(B) 2214 - */ 2215 - int math_emu_stfpc(__u8 *opcode, struct pt_regs *regs) { 2216 - __u32 opc = *((__u32 *) opcode); 2217 - __u32 *dxb; 2218 - 2219 - dxb= (__u32 *) calc_addr(regs, 0, opc>>12, opc); 2220 - mathemu_put_user(current->thread.fp_regs.fpc, dxb); 2221 - return 0; 2222 - } 2223 - 2224 - /* 2225 - * Emulate SRNM D(B) 2226 - */ 2227 - int math_emu_srnm(__u8 *opcode, struct pt_regs *regs) { 2228 - __u32 opc = *((__u32 *) opcode); 2229 - __u32 temp; 2230 - 2231 - temp = calc_addr(regs, 0, opc>>12, opc); 2232 - current->thread.fp_regs.fpc &= ~3; 2233 - current->thread.fp_regs.fpc |= (temp & 3); 2234 - return 0; 2235 - } 2236 - 2237 - /* broken compiler ... */ 2238 - long long 2239 - __negdi2 (long long u) 2240 - { 2241 - 2242 - union lll { 2243 - long long ll; 2244 - long s[2]; 2245 - }; 2246 - 2247 - union lll w,uu; 2248 - 2249 - uu.ll = u; 2250 - 2251 - w.s[1] = -uu.s[1]; 2252 - w.s[0] = -uu.s[0] - ((int) w.s[1] != 0); 2253 - 2254 - return w.ll; 2255 - }
+2 -22
arch/s390/mm/dump_pagetables.c
··· 18 18 KERNEL_END_NR, 19 19 VMEMMAP_NR, 20 20 VMALLOC_NR, 21 - #ifdef CONFIG_64BIT 22 21 MODULES_NR, 23 - #endif 24 22 }; 25 23 26 24 static struct addr_marker address_markers[] = { ··· 27 29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 28 30 [VMEMMAP_NR] = {0, "vmemmap Area"}, 29 31 [VMALLOC_NR] = {0, "vmalloc Area"}, 30 - #ifdef CONFIG_64BIT 31 32 [MODULES_NR] = {0, "Modules Area"}, 32 - #endif 33 33 { -1, NULL } 34 34 }; 35 35 ··· 123 127 } 124 128 } 125 129 126 - #ifdef CONFIG_64BIT 127 - #define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT 128 - #else 129 - #define _PMD_PROT_MASK 0 130 - #endif 131 - 132 130 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, 133 131 pud_t *pud, unsigned long addr) 134 132 { ··· 135 145 pmd = pmd_offset(pud, addr); 136 146 if (!pmd_none(*pmd)) { 137 147 if (pmd_large(*pmd)) { 138 - prot = pmd_val(*pmd) & _PMD_PROT_MASK; 148 + prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; 139 149 note_page(m, st, prot, 3); 140 150 } else 141 151 walk_pte_level(m, st, pmd, addr); ··· 144 154 addr += PMD_SIZE; 145 155 } 146 156 } 147 - 148 - #ifdef CONFIG_64BIT 149 - #define _PUD_PROT_MASK _REGION3_ENTRY_RO 150 - #else 151 - #define _PUD_PROT_MASK 0 152 - #endif 153 157 154 158 static void walk_pud_level(struct seq_file *m, struct pg_state *st, 155 159 pgd_t *pgd, unsigned long addr) ··· 157 173 pud = pud_offset(pgd, addr); 158 174 if (!pud_none(*pud)) 159 175 if (pud_large(*pud)) { 160 - prot = pud_val(*pud) & _PUD_PROT_MASK; 176 + prot = pud_val(*pud) & _REGION3_ENTRY_RO; 161 177 note_page(m, st, prot, 2); 162 178 } else 163 179 walk_pmd_level(m, st, pud, addr); ··· 214 230 * kernel ASCE. We need this to keep the page table walker functions 215 231 * from accessing non-existent entries. 216 232 */ 217 - #ifdef CONFIG_32BIT 218 - max_addr = 1UL << 31; 219 - #else 220 233 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 221 234 max_addr = 1UL << (max_addr * 11 + 31); 222 235 address_markers[MODULES_NR].start_address = MODULES_VADDR; 223 - #endif 224 236 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 225 237 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 226 238 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
-14
arch/s390/mm/extmem.c
··· 51 51 struct qrange range[6]; 52 52 }; 53 53 54 - #ifdef CONFIG_64BIT 55 54 struct qrange_old { 56 55 unsigned int start; /* last byte type */ 57 56 unsigned int end; /* last byte reserved */ ··· 64 65 int segrcnt; 65 66 struct qrange_old range[6]; 66 67 }; 67 - #endif 68 68 69 69 struct qin64 { 70 70 char qopcode; ··· 101 103 static int 102 104 dcss_set_subcodes(void) 103 105 { 104 - #ifdef CONFIG_64BIT 105 106 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); 106 107 unsigned long rx, ry; 107 108 int rc; ··· 132 135 segext_scode = DCSS_SEGEXTX; 133 136 return 0; 134 137 } 135 - #endif 136 138 /* Diag x'64' new subcodes are not supported, set to old subcodes */ 137 139 loadshr_scode = DCSS_LOADNOLY; 138 140 loadnsr_scode = DCSS_LOADNSR; ··· 204 208 rx = (unsigned long) parameter; 205 209 ry = (unsigned long) *func; 206 210 207 - #ifdef CONFIG_64BIT 208 211 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 209 212 if (*func > DCSS_SEGEXT) 210 213 asm volatile( ··· 220 225 " ipm %2\n" 221 226 " srl %2,28\n" 222 227 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 223 - #else 224 - asm volatile( 225 - " diag %0,%1,0x64\n" 226 - " ipm %2\n" 227 - " srl %2,28\n" 228 - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 229 - #endif 230 228 *ret1 = rx; 231 229 *ret2 = ry; 232 230 return rc; ··· 269 281 goto out_free; 270 282 } 271 283 272 - #ifdef CONFIG_64BIT 273 284 /* Only old format of output area of Diagnose x'64' is supported, 274 285 copy data for the new format. */ 275 286 if (segext_scode == DCSS_SEGEXT) { ··· 294 307 } 295 308 kfree(qout_old); 296 309 } 297 - #endif 298 310 if (qout->segcnt > 6) { 299 311 rc = -EOPNOTSUPP; 300 312 goto out_free;
-36
arch/s390/mm/fault.c
··· 36 36 #include <asm/facility.h> 37 37 #include "../kernel/entry.h" 38 38 39 - #ifndef CONFIG_64BIT 40 - #define __FAIL_ADDR_MASK 0x7ffff000 41 - #define __SUBCODE_MASK 0x0200 42 - #define __PF_RES_FIELD 0ULL 43 - #else /* CONFIG_64BIT */ 44 39 #define __FAIL_ADDR_MASK -4096L 45 40 #define __SUBCODE_MASK 0x0600 46 41 #define __PF_RES_FIELD 0x8000000000000000ULL 47 - #endif /* CONFIG_64BIT */ 48 42 49 43 #define VM_FAULT_BADCONTEXT 0x010000 50 44 #define VM_FAULT_BADMAP 0x020000 ··· 48 54 49 55 static unsigned long store_indication __read_mostly; 50 56 51 - #ifdef CONFIG_64BIT 52 57 static int __init fault_init(void) 53 58 { 54 59 if (test_facility(75)) ··· 55 62 return 0; 56 63 } 57 64 early_initcall(fault_init); 58 - #endif 59 65 60 66 static inline int notify_page_fault(struct pt_regs *regs) 61 67 { ··· 125 133 return probe_kernel_address((unsigned long *)p, dummy); 126 134 } 127 135 128 - #ifdef CONFIG_64BIT 129 136 static void dump_pagetable(unsigned long asce, unsigned long address) 130 137 { 131 138 unsigned long *table = __va(asce & PAGE_MASK); ··· 177 186 bad: 178 187 pr_cont("BAD\n"); 179 188 } 180 - 181 - #else /* CONFIG_64BIT */ 182 - 183 - static void dump_pagetable(unsigned long asce, unsigned long address) 184 - { 185 - unsigned long *table = __va(asce & PAGE_MASK); 186 - 187 - pr_alert("AS:%08lx ", asce); 188 - table = table + ((address >> 20) & 0x7ff); 189 - if (bad_address(table)) 190 - goto bad; 191 - pr_cont("S:%08lx ", *table); 192 - if (*table & _SEGMENT_ENTRY_INVALID) 193 - goto out; 194 - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 195 - table = table + ((address >> 12) & 0xff); 196 - if (bad_address(table)) 197 - goto bad; 198 - pr_cont("P:%08lx ", *table); 199 - out: 200 - pr_cont("\n"); 201 - return; 202 - bad: 203 - pr_cont("BAD\n"); 204 - } 205 - 206 - #endif /* CONFIG_64BIT */ 207 189 208 190 static void dump_fault_info(struct pt_regs *regs) 209 191 {
-4
arch/s390/mm/gup.c
··· 106 106 pmd_t *pmdp, pmd; 107 107 108 108 pmdp = (pmd_t *) pudp; 109 - #ifdef CONFIG_64BIT 110 109 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 111 110 pmdp = (pmd_t *) pud_deref(pud); 112 111 pmdp += pmd_index(addr); 113 - #endif 114 112 do { 115 113 pmd = *pmdp; 116 114 barrier(); ··· 143 145 pud_t *pudp, pud; 144 146 145 147 pudp = (pud_t *) pgdp; 146 - #ifdef CONFIG_64BIT 147 148 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 148 149 pudp = (pud_t *) pgd_deref(pgd); 149 150 pudp += pud_index(addr); 150 - #endif 151 151 do { 152 152 pud = *pudp; 153 153 barrier();
-5
arch/s390/mm/init.c
··· 105 105 unsigned long pgd_type, asce_bits; 106 106 107 107 init_mm.pgd = swapper_pg_dir; 108 - #ifdef CONFIG_64BIT 109 108 if (VMALLOC_END > (1UL << 42)) { 110 109 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 111 110 pgd_type = _REGION2_ENTRY_EMPTY; ··· 112 113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 113 114 pgd_type = _REGION3_ENTRY_EMPTY; 114 115 } 115 - #else 116 - asce_bits = _ASCE_TABLE_LENGTH; 117 - pgd_type = _SEGMENT_ENTRY_EMPTY; 118 - #endif 119 116 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 120 117 clear_table((unsigned long *) init_mm.pgd, pgd_type, 121 118 sizeof(unsigned long)*2048);
-4
arch/s390/mm/mem_detect.c
··· 36 36 memsize = rzm * rnmax; 37 37 if (!rzm) 38 38 rzm = 1ULL << 17; 39 - if (IS_ENABLED(CONFIG_32BIT)) { 40 - rzm = min(ADDR2G, rzm); 41 - memsize = min(ADDR2G, memsize); 42 - } 43 39 max_physmem_end = memsize; 44 40 addr = 0; 45 41 /* keep memblock lists close to the kernel */
-25
arch/s390/mm/mmap.c
··· 190 190 return base + mmap_rnd(); 191 191 } 192 192 193 - #ifndef CONFIG_64BIT 194 - 195 - /* 196 - * This function, called very early during the creation of a new 197 - * process VM image, sets up which VM layout function to use: 198 - */ 199 - void arch_pick_mmap_layout(struct mm_struct *mm) 200 - { 201 - /* 202 - * Fall back to the standard layout if the personality 203 - * bit is set, or if the expected stack growth is unlimited: 204 - */ 205 - if (mmap_is_legacy()) { 206 - mm->mmap_base = mmap_base_legacy(); 207 - mm->get_unmapped_area = arch_get_unmapped_area; 208 - } else { 209 - mm->mmap_base = mmap_base(); 210 - mm->get_unmapped_area = arch_get_unmapped_area_topdown; 211 - } 212 - } 213 - 214 - #else 215 - 216 193 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 217 194 { 218 195 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) ··· 294 317 return 0; 295 318 } 296 319 early_initcall(setup_mmap_rnd); 297 - 298 - #endif
+1 -1
arch/s390/mm/pageattr.c
··· 109 109 { 110 110 int i; 111 111 112 - if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { 112 + if (test_facility(13)) { 113 113 __ptep_ipte_range(address, nr - 1, pte); 114 114 return; 115 115 }
-8
arch/s390/mm/pgtable.c
··· 27 27 #include <asm/tlbflush.h> 28 28 #include <asm/mmu_context.h> 29 29 30 - #ifndef CONFIG_64BIT 31 - #define ALLOC_ORDER 1 32 - #define FRAG_MASK 0x0f 33 - #else 34 30 #define ALLOC_ORDER 2 35 31 #define FRAG_MASK 0x03 36 - #endif 37 - 38 32 39 33 unsigned long *crst_table_alloc(struct mm_struct *mm) 40 34 { ··· 44 50 free_pages((unsigned long) table, ALLOC_ORDER); 45 51 } 46 52 47 - #ifdef CONFIG_64BIT 48 53 static void __crst_table_upgrade(void *arg) 49 54 { 50 55 struct mm_struct *mm = arg; ··· 133 140 if (current->active_mm == mm) 134 141 set_user_asce(mm); 135 142 } 136 - #endif 137 143 138 144 #ifdef CONFIG_PGSTE 139 145
+2 -8
arch/s390/mm/vmem.c
··· 38 38 { 39 39 pud_t *pud = NULL; 40 40 41 - #ifdef CONFIG_64BIT 42 41 pud = vmem_alloc_pages(2); 43 42 if (!pud) 44 43 return NULL; 45 44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 46 - #endif 47 45 return pud; 48 46 } 49 47 ··· 49 51 { 50 52 pmd_t *pmd = NULL; 51 53 52 - #ifdef CONFIG_64BIT 53 54 pmd = vmem_alloc_pages(2); 54 55 if (!pmd) 55 56 return NULL; 56 57 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 57 - #endif 58 58 return pmd; 59 59 } 60 60 ··· 94 98 pgd_populate(&init_mm, pg_dir, pu_dir); 95 99 } 96 100 pu_dir = pud_offset(pg_dir, address); 97 - #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 101 + #ifndef CONFIG_DEBUG_PAGEALLOC 98 102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 99 103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 100 104 pud_val(*pu_dir) = __pa(address) | ··· 111 115 pud_populate(&init_mm, pu_dir, pm_dir); 112 116 } 113 117 pm_dir = pmd_offset(pu_dir, address); 114 - #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 118 + #ifndef CONFIG_DEBUG_PAGEALLOC 115 119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 116 120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 117 121 pmd_val(*pm_dir) = __pa(address) | ··· 218 222 219 223 pm_dir = pmd_offset(pu_dir, address); 220 224 if (pmd_none(*pm_dir)) { 221 - #ifdef CONFIG_64BIT 222 225 /* Use 1MB frames for vmemmap if available. We always 223 226 * use large frames even if they are only partially 224 227 * used. ··· 235 240 address = (address + PMD_SIZE) & PMD_MASK; 236 241 continue; 237 242 } 238 - #endif 239 243 pt_dir = vmem_pte_alloc(address); 240 244 if (!pt_dir) 241 245 goto out;
+1 -1
arch/s390/oprofile/Makefile
··· 7 7 timer_int.o ) 8 8 9 9 oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 10 - oprofile-$(CONFIG_64BIT) += hwsampler.o 10 + oprofile-y += hwsampler.o
-11
arch/s390/oprofile/init.c
··· 21 21 22 22 extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); 23 23 24 - #ifdef CONFIG_64BIT 25 - 26 24 #include "hwsampler.h" 27 25 #include "op_counter.h" 28 26 ··· 493 495 hwsampler_shutdown(); 494 496 } 495 497 496 - #endif /* CONFIG_64BIT */ 497 - 498 498 int __init oprofile_arch_init(struct oprofile_operations *ops) 499 499 { 500 500 ops->backtrace = s390_backtrace; 501 - 502 - #ifdef CONFIG_64BIT 503 501 504 502 /* 505 503 * -ENODEV is not reported to the caller. The module itself ··· 505 511 hwsampler_available = oprofile_hwsampler_init(ops) == 0; 506 512 507 513 return 0; 508 - #else 509 - return -ENODEV; 510 - #endif 511 514 } 512 515 513 516 void oprofile_arch_exit(void) 514 517 { 515 - #ifdef CONFIG_64BIT 516 518 oprofile_hwsampler_exit(); 517 - #endif 518 519 }
-2
drivers/s390/block/dasd.c
··· 1237 1237 */ 1238 1238 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1239 1239 { 1240 - #ifdef CONFIG_64BIT 1241 1240 struct ccw1 *ccw; 1242 1241 1243 1242 /* Clear any idals used for the request. */ ··· 1244 1245 do { 1245 1246 clear_normalized_cda(ccw); 1246 1247 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1247 - #endif 1248 1248 kfree(cqr->cpaddr); 1249 1249 kfree(cqr->data); 1250 1250 kfree(cqr);
-42
drivers/s390/block/dasd_diag.h
··· 38 38 u8 rdev_features; 39 39 } __attribute__ ((packed, aligned(4))); 40 40 41 - 42 - #ifdef CONFIG_64BIT 43 41 #define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT 44 42 45 43 typedef u64 blocknum_t; ··· 78 80 struct dasd_diag_bio *bio_list; 79 81 u8 spare4[8]; 80 82 } __attribute__ ((packed, aligned(8))); 81 - #else /* CONFIG_64BIT */ 82 - #define DASD_DIAG_FLAGA_DEFAULT 0x0 83 - 84 - typedef u32 blocknum_t; 85 - typedef s32 sblocknum_t; 86 - 87 - struct dasd_diag_bio { 88 - u8 type; 89 - u8 status; 90 - u16 spare1; 91 - blocknum_t block_number; 92 - u32 alet; 93 - void *buffer; 94 - } __attribute__ ((packed, aligned(8))); 95 - 96 - struct dasd_diag_init_io { 97 - u16 dev_nr; 98 - u8 flaga; 99 - u8 spare1[21]; 100 - u32 block_size; 101 - blocknum_t offset; 102 - sblocknum_t start_block; 103 - blocknum_t end_block; 104 - u8 spare2[24]; 105 - } __attribute__ ((packed, aligned(8))); 106 - 107 - struct dasd_diag_rw_io { 108 - u16 dev_nr; 109 - u8 flaga; 110 - u8 spare1[21]; 111 - u8 key; 112 - u8 flags; 113 - u8 spare2[2]; 114 - u32 block_count; 115 - u32 alet; 116 - struct dasd_diag_bio *bio_list; 117 - u32 interrupt_params; 118 - u8 spare3[20]; 119 - } __attribute__ ((packed, aligned(8))); 120 - #endif /* CONFIG_64BIT */
-6
drivers/s390/block/dasd_eckd.c
··· 1633 1633 1634 1634 static u32 get_fcx_max_data(struct dasd_device *device) 1635 1635 { 1636 - #if defined(CONFIG_64BIT) 1637 1636 int tpm, mdc; 1638 1637 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1639 1638 struct dasd_eckd_private *private; ··· 1656 1657 return 0; 1657 1658 } else 1658 1659 return mdc * FCX_MAX_DATA_FACTOR; 1659 - #else 1660 - return 0; 1661 - #endif 1662 1660 } 1663 1661 1664 1662 /* ··· 2611 2615 /* Eckd can only do full blocks. */ 2612 2616 return ERR_PTR(-EINVAL); 2613 2617 count += bv.bv_len >> (block->s2b_shift + 9); 2614 - #if defined(CONFIG_64BIT) 2615 2618 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 2616 2619 cidaw += bv.bv_len >> (block->s2b_shift + 9); 2617 - #endif 2618 2620 } 2619 2621 /* Paranoia. */ 2620 2622 if (count != last_rec - first_rec + 1)
-2
drivers/s390/block/dasd_fba.c
··· 287 287 /* Fba can only do full blocks. */ 288 288 return ERR_PTR(-EINVAL); 289 289 count += bv.bv_len >> (block->s2b_shift + 9); 290 - #if defined(CONFIG_64BIT) 291 290 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 292 291 cidaw += bv.bv_len / blksize; 293 - #endif 294 292 } 295 293 /* Paranoia. */ 296 294 if (count != last_rec - first_rec + 1)
+1 -1
drivers/s390/char/Kconfig
··· 115 115 config HMC_DRV 116 116 def_tristate m 117 117 prompt "Support for file transfers from HMC drive CD/DVD-ROM" 118 - depends on S390 && 64BIT 118 + depends on S390 119 119 select CRC16 120 120 help 121 121 This option enables support for file transfers from a Hardware
-4
drivers/s390/char/sclp_sdias.c
··· 178 178 sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; 179 179 sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; 180 180 sccb.evbuf.event_id = 4712; 181 - #ifdef CONFIG_64BIT 182 181 sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; 183 - #else 184 - sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32; 185 - #endif 186 182 sccb.evbuf.event_status = 0; 187 183 sccb.evbuf.blk_cnt = nr_blks; 188 184 sccb.evbuf.asa = (unsigned long)dest;
-32
drivers/s390/char/zcore.c
··· 212 212 .dump_level = 0, 213 213 .page_size = PAGE_SIZE, 214 214 .mem_start = 0, 215 - #ifdef CONFIG_64BIT 216 215 .build_arch = DUMP_ARCH_S390X, 217 - #else 218 - .build_arch = DUMP_ARCH_S390, 219 - #endif 220 216 }; 221 217 222 218 /* ··· 512 516 .llseek = no_llseek, 513 517 }; 514 518 515 - #ifdef CONFIG_32BIT 516 - 517 - static void __init set_lc_mask(struct save_area *map) 518 - { 519 - memset(&map->ext_save, 0xff, sizeof(map->ext_save)); 520 - memset(&map->timer, 0xff, sizeof(map->timer)); 521 - memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); 522 - memset(&map->psw, 0xff, sizeof(map->psw)); 523 - memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); 524 - memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); 525 - memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); 526 - memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); 527 - memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); 528 - } 529 - 530 - #else /* CONFIG_32BIT */ 531 - 532 519 static void __init set_lc_mask(struct save_area *map) 533 520 { 534 521 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); ··· 525 546 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); 526 547 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); 527 548 } 528 - 529 - #endif /* CONFIG_32BIT */ 530 549 531 550 /* 532 551 * Initialize dump globals for a given architecture ··· 665 688 if (rc) 666 689 goto fail; 667 690 668 - #ifdef CONFIG_64BIT 669 691 if (arch == ARCH_S390) { 670 692 pr_alert("The 64-bit dump tool cannot be used for a " 671 693 "32-bit system\n"); 672 694 rc = -EINVAL; 673 695 goto fail; 674 696 } 675 - #else /* CONFIG_64BIT */ 676 - if (arch == ARCH_S390X) { 677 - pr_alert("The 32-bit dump tool cannot be used for a " 678 - "64-bit system\n"); 679 - rc = -EINVAL; 680 - goto fail; 681 - } 682 - #endif /* CONFIG_64BIT */ 683 697 684 698 rc = get_mem_info(&mem_size, &mem_end); 685 699 if (rc)
-2
drivers/s390/cio/cio.c
··· 143 143 orb->cmd.spnd = priv->options.suspend; 144 144 orb->cmd.ssic = priv->options.suspend && priv->options.inter; 145 145 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 146 - #ifdef CONFIG_64BIT 147 146 /* 148 147 * for 64 bit we always support 64 bit IDAWs with 4k page size only 149 148 */ 150 149 orb->cmd.c64 = 1; 151 150 orb->cmd.i2k = 0; 152 - #endif 153 151 orb->cmd.key = key >> 4; 154 152 /* issue "Start Subchannel" */ 155 153 orb->cmd.cpa = (__u32) __pa(cpa);
-7
drivers/s390/cio/qdio.h
··· 84 84 #define QDIO_SIGA_WRITEQ 0x04 85 85 #define QDIO_SIGA_QEBSM_FLAG 0x80 86 86 87 - #ifdef CONFIG_64BIT 88 87 static inline int do_sqbs(u64 token, unsigned char state, int queue, 89 88 int *start, int *count) 90 89 { ··· 121 122 122 123 return (_ccq >> 32) & 0xff; 123 124 } 124 - #else 125 - static inline int do_sqbs(u64 token, unsigned char state, int queue, 126 - int *start, int *count) { return 0; } 127 - static inline int do_eqbs(u64 token, unsigned char *state, int queue, 128 - int *start, int *count, int ack) { return 0; } 129 - #endif /* CONFIG_64BIT */ 130 125 131 126 struct qdio_irq; 132 127
-3
drivers/s390/cio/qdio_setup.c
··· 91 91 */ 92 92 static inline int qebsm_possible(void) 93 93 { 94 - #ifdef CONFIG_64BIT 95 94 return css_general_characteristics.qebsm; 96 - #endif 97 - return 0; 98 95 } 99 96 100 97 /*
-20
drivers/s390/crypto/ap_bus.c
··· 174 174 * 175 175 * Returns 1 if AP configuration information is available. 176 176 */ 177 - #ifdef CONFIG_64BIT 178 177 static int ap_configuration_available(void) 179 178 { 180 179 return test_facility(2) && test_facility(12); 181 180 } 182 - #endif 183 181 184 182 /** 185 183 * ap_test_queue(): Test adjunct processor queue. ··· 237 239 return reg1; 238 240 } 239 241 240 - #ifdef CONFIG_64BIT 241 242 /** 242 243 * ap_queue_interruption_control(): Enable interruption for a specific AP. 243 244 * @qid: The AP queue number ··· 258 261 : "cc" ); 259 262 return reg1_out; 260 263 } 261 - #endif 262 264 263 - #ifdef CONFIG_64BIT 264 265 static inline struct ap_queue_status 265 266 __ap_query_functions(ap_qid_t qid, unsigned int *functions) 266 267 { ··· 277 282 *functions = (unsigned int)(reg2 >> 32); 278 283 return reg1; 279 284 } 280 - #endif 281 285 282 - #ifdef CONFIG_64BIT 283 286 static inline int __ap_query_configuration(struct ap_config_info *config) 284 287 { 285 288 register unsigned long reg0 asm ("0") = 0x04000000UL; ··· 295 302 296 303 return reg1; 297 304 } 298 - #endif 299 305 300 306 /** 301 307 * ap_query_functions(): Query supported functions. ··· 309 317 */ 310 318 static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 311 319 { 312 - #ifdef CONFIG_64BIT 313 320 struct ap_queue_status status; 314 321 int i; 315 322 status = __ap_query_functions(qid, functions); ··· 339 348 } 340 349 } 341 350 return -EBUSY; 342 - #else 343 - return -EINVAL; 344 - #endif 345 351 } 346 352 347 353 /** ··· 352 364 */ 353 365 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 354 366 { 355 - #ifdef CONFIG_64BIT 356 367 struct ap_queue_status status; 357 368 int t_depth, t_device_type, rc, i; 358 369 ··· 391 404 } 392 405 } 393 406 return rc; 394 - #else 395 - return -EINVAL; 396 - #endif 397 407 } 398 408 399 409 /** ··· 1222 1238 */ 1223 1239 static void ap_query_configuration(void) 1224 1240 { 1225 - #ifdef CONFIG_64BIT 1226 1241 if (ap_configuration_available()) { 1227 1242 if (!ap_configuration) 1228 1243 ap_configuration = ··· 1231 1248 __ap_query_configuration(ap_configuration); 1232 1249 } else 1233 1250 ap_configuration = NULL; 1234 - #else 1235 - ap_configuration = NULL; 1236 - #endif 1237 1251 } 1238 1252 1239 1253 /**
-12
drivers/s390/net/ctcm_mpc.c
··· 130 130 __u32 ct, sw, rm, dup; 131 131 char *ptr, *rptr; 132 132 char tbuf[82], tdup[82]; 133 - #ifdef CONFIG_64BIT 134 133 char addr[22]; 135 - #else 136 - char addr[12]; 137 - #endif 138 134 char boff[12]; 139 135 char bhex[82], duphex[82]; 140 136 char basc[40]; ··· 143 147 144 148 for (ct = 0; ct < len; ct++, ptr++, rptr++) { 145 149 if (sw == 0) { 146 - #ifdef CONFIG_64BIT 147 150 sprintf(addr, "%16.16llx", (__u64)rptr); 148 - #else 149 - sprintf(addr, "%8.8X", (__u32)rptr); 150 - #endif 151 151 152 152 sprintf(boff, "%4.4X", (__u32)ct); 153 153 bhex[0] = '\0'; ··· 154 162 if (sw == 8) 155 163 strcat(bhex, " "); 156 164 157 - #if CONFIG_64BIT 158 165 sprintf(tbuf, "%2.2llX", (__u64)*ptr); 159 - #else 160 - sprintf(tbuf, "%2.2X", (__u32)*ptr); 161 - #endif 162 166 163 167 tbuf[2] = '\0'; 164 168 strcat(bhex, tbuf);