···11+/*22+ * Copyright IBM Corp. 1999, 200933+ *44+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>55+ */66+77+#ifndef __ASM_BARRIER_H88+#define __ASM_BARRIER_H99+1010+/*1111+ * Force strict CPU ordering.1212+ * And yes, this is required on UP too when we're talking1313+ * to devices.1414+ *1515+ * This is very similar to the ppc eieio/sync instruction in that is1616+ * does a checkpoint syncronisation & makes sure that 1717+ * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).1818+ */1919+2020+#define eieio() asm volatile("bcr 15,0" : : : "memory")2121+#define SYNC_OTHER_CORES(x) eieio()2222+#define mb() eieio()2323+#define rmb() eieio()2424+#define wmb() eieio()2525+#define read_barrier_depends() do { } while(0)2626+#define smp_mb() mb()2727+#define smp_rmb() rmb()2828+#define smp_wmb() wmb()2929+#define smp_read_barrier_depends() read_barrier_depends()3030+#define smp_mb__before_clear_bit() smp_mb()3131+#define smp_mb__after_clear_bit() smp_mb()3232+3333+#define set_mb(var, value) do { var = value; mb(); } while (0)3434+3535+#endif /* __ASM_BARRIER_H */
···129129typedef s390_compat_regs compat_elf_gregset_t;130130131131#include <linux/sched.h> /* for task_struct */132132-#include <asm/system.h> /* for save_access_regs */133132#include <asm/mmu_context.h>134133135134#include <asm/vdso.h>
+12
arch/s390/include/asm/exec.h
···11+/*22+ * Copyright IBM Corp. 1999, 200933+ *44+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>55+ */66+77+#ifndef __ASM_EXEC_H88+#define __ASM_EXEC_H99+1010+extern unsigned long arch_align_stack(unsigned long sp);1111+1212+#endif /* __ASM_EXEC_H */
+63
arch/s390/include/asm/facility.h
···11+/*22+ * Copyright IBM Corp. 1999, 200933+ *44+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>55+ */66+77+#ifndef __ASM_FACILITY_H88+#define __ASM_FACILITY_H99+1010+#include <linux/string.h>1111+#include <linux/preempt.h>1212+#include <asm/lowcore.h>1313+1414+#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */1515+1616+/*1717+ * The test_facility function uses the bit odering where the MSB is bit 0.1818+ * That makes it easier to query facility bits with the bit number as1919+ * documented in the Principles of Operation.2020+ */2121+static inline int test_facility(unsigned long nr)2222+{2323+ unsigned char *ptr;2424+2525+ if (nr >= MAX_FACILITY_BIT)2626+ return 0;2727+ ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);2828+ return (*ptr & (0x80 >> (nr & 7))) != 0;2929+}3030+3131+/**3232+ * stfle - Store facility list extended3333+ * @stfle_fac_list: array where facility list can be stored3434+ * @size: size of passed in array in double words3535+ */3636+static inline void stfle(u64 *stfle_fac_list, int size)3737+{3838+ unsigned long nr;3939+4040+ preempt_disable();4141+ S390_lowcore.stfl_fac_list = 0;4242+ asm volatile(4343+ " .insn s,0xb2b10000,0(0)\n" /* stfl */4444+ "0:\n"4545+ EX_TABLE(0b, 0b)4646+ : "=m" (S390_lowcore.stfl_fac_list));4747+ nr = 4; /* bytes stored by stfl */4848+ memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);4949+ if (S390_lowcore.stfl_fac_list & 0x01000000) {5050+ /* More facility bits available with stfle */5151+ register unsigned long reg0 asm("0") = size - 1;5252+5353+ asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */5454+ : "+d" (reg0)5555+ : "a" (stfle_fac_list)5656+ : "memory", "cc");5757+ nr = (reg0 + 1) * 8; /* # bytes stored by stfle */5858+ }5959+ memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);6060+ preempt_enable();6161+}6262+6363+#endif /* __ASM_FACILITY_H */
+14
arch/s390/include/asm/mmu.h
···2121 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \2222 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),23232424+static inline int tprot(unsigned long addr)2525+{2626+ int rc = -EFAULT;2727+2828+ asm volatile(2929+ " tprot 0(%1),0\n"3030+ "0: ipm %0\n"3131+ " srl %0,28\n"3232+ "1:\n"3333+ EX_TABLE(0b,1b)3434+ : "+d" (rc) : "a" (addr) : "cc");3535+ return rc;3636+}3737+2438#endif
···1414#define __ASM_S390_PROCESSOR_H15151616#include <linux/linkage.h>1717+#include <linux/irqflags.h>1718#include <asm/cpu.h>1819#include <asm/page.h>1920#include <asm/ptrace.h>···156155 (task_stack_page(tsk) + THREAD_SIZE) - 1)157156#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)158157#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])158158+159159+static inline unsigned short stap(void)160160+{161161+ unsigned short cpu_address;162162+163163+ asm volatile("stap %0" : "=m" (cpu_address));164164+ return cpu_address;165165+}159166160167/*161168 * Give up the time slice of the virtual PU.···311302#endif /* __s390x__ */312303 while (1);313304}305305+306306+/*307307+ * Use to set psw mask except for the first byte which308308+ * won't be changed by this function.309309+ */310310+static inline void311311+__set_psw_mask(unsigned long mask)312312+{313313+ __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));314314+}315315+316316+#define local_mcck_enable() \317317+ __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)318318+#define local_mcck_disable() \319319+ __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)314320315321/*316322 * Basic Machine Check/Program Check Handler.
···1212#include <asm/timer.h>1313#include <asm/vdso.h>1414#include <asm/pgtable.h>1515-#include <asm/system.h>16151716/*1817 * Make sure that the compiler is new enough. We want a compiler that
···1212#include <linux/types.h>1313#include <linux/errno.h>1414#include <linux/gfp.h>1515-#include <asm/system.h>1515+#include <asm/ctl_reg.h>16161717/*1818 * This function writes to kernel memory bypassing DAT and possible