Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 patches from Martin Schwidefsky:
"The biggest patch is the rework of the smp code, something I wanted to
do for some time. There are some patches for our various dump methods
and one new thing: z/VM LGR detection. LGR stands for linux-guest-
relocation and is the guest migration feature of z/VM. For debugging
purposes we keep a log of the systems where a specific guest has lived."

Fix up trivial conflict in arch/s390/kernel/smp.c due to the scheduler
cleanup having removed some code next to removed s390 code.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
[S390] kernel: Pass correct stack for smp_call_ipl_cpu()
[S390] Ensure that vmcore_info pointer is never accessed directly
[S390] dasd: prevent validate server for offline devices
[S390] Remove monolithic build option for zcrypt driver.
[S390] stack dump: fix indentation in output
[S390] kernel: Add OS info memory interface
[S390] Use block_sigmask()
[S390] kernel: Add z/VM LGR detection
[S390] irq: external interrupt code passing
[S390] irq: set __ARCH_IRQ_EXIT_IRQS_DISABLED
[S390] zfcpdump: Implement async sdias event processing
[S390] Use copy_to_absolute_zero() instead of "stura/sturg"
[S390] rework idle code
[S390] rework smp code
[S390] rename lowcore field
[S390] Fix gcc 4.6.0 compile warning

+1713 -1645
+1 -8
arch/s390/include/asm/cputime.h
··· 170 170 unsigned int sequence; 171 171 unsigned long long idle_count; 172 172 unsigned long long idle_enter; 173 + unsigned long long idle_exit; 173 174 unsigned long long idle_time; 174 175 int nohz_delay; 175 176 }; 176 177 177 178 DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 178 179 179 - void vtime_start_cpu(__u64 int_clock, __u64 enter_timer); 180 180 cputime64_t s390_get_idle_time(int cpu); 181 181 182 182 #define arch_idle_time(cpu) s390_get_idle_time(cpu) 183 - 184 - static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock, 185 - __u64 enter_timer) 186 - { 187 - if (regs->psw.mask & PSW_MASK_WAIT) 188 - vtime_start_cpu(int_clock, enter_timer); 189 - } 190 183 191 184 static inline int s390_nohz_delay(int cpu) 192 185 {
+1
arch/s390/include/asm/debug.h
··· 131 131 132 132 void debug_set_level(debug_info_t* id, int new_level); 133 133 134 + void debug_set_critical(void); 134 135 void debug_stop_all(void); 135 136 136 137 static inline debug_entry_t*
+1
arch/s390/include/asm/hardirq.h
··· 18 18 19 19 #define __ARCH_IRQ_STAT 20 20 #define __ARCH_HAS_DO_SOFTIRQ 21 + #define __ARCH_IRQ_EXIT_IRQS_DISABLED 21 22 22 23 #define HARDIRQ_BITS 8 23 24
+1
arch/s390/include/asm/ipl.h
··· 169 169 extern int diag308(unsigned long subcode, void *addr); 170 170 extern void diag308_reset(void); 171 171 extern void store_status(void); 172 + extern void lgr_info_log(void); 172 173 173 174 #endif /* _ASM_S390_IPL_H */
+6 -1
arch/s390/include/asm/irq.h
··· 34 34 NR_IRQS, 35 35 }; 36 36 37 - typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); 37 + struct ext_code { 38 + unsigned short subcode; 39 + unsigned short code; 40 + }; 41 + 42 + typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); 38 43 39 44 int register_external_interrupt(u16 code, ext_int_handler_t handler); 40 45 int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
+65 -54
arch/s390/include/asm/lowcore.h
··· 1 1 /* 2 - * Copyright IBM Corp. 1999,2010 2 + * Copyright IBM Corp. 1999,2012 3 3 * Author(s): Hartmut Penner <hp@de.ibm.com>, 4 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 5 5 * Denis Joseph Barrow, ··· 11 11 #include <linux/types.h> 12 12 #include <asm/ptrace.h> 13 13 #include <asm/cpu.h> 14 - 15 - void restart_int_handler(void); 16 - void ext_int_handler(void); 17 - void system_call(void); 18 - void pgm_check_handler(void); 19 - void mcck_int_handler(void); 20 - void io_int_handler(void); 21 - void psw_restart_int_handler(void); 22 14 23 15 #ifdef CONFIG_32BIT 24 16 ··· 48 56 psw_t mcck_new_psw; /* 0x0070 */ 49 57 psw_t io_new_psw; /* 0x0078 */ 50 58 __u32 ext_params; /* 0x0080 */ 51 - __u16 cpu_addr; /* 0x0084 */ 59 + __u16 ext_cpu_addr; /* 0x0084 */ 52 60 __u16 ext_int_code; /* 0x0086 */ 53 61 __u16 svc_ilc; /* 0x0088 */ 54 62 __u16 svc_code; /* 0x008a */ ··· 109 117 __u64 steal_timer; /* 0x0288 */ 110 118 __u64 last_update_timer; /* 0x0290 */ 111 119 __u64 last_update_clock; /* 0x0298 */ 120 + __u64 int_clock; /* 0x02a0 */ 121 + __u64 mcck_clock; /* 0x02a8 */ 122 + __u64 clock_comparator; /* 0x02b0 */ 112 123 113 124 /* Current process. */ 114 - __u32 current_task; /* 0x02a0 */ 115 - __u32 thread_info; /* 0x02a4 */ 116 - __u32 kernel_stack; /* 0x02a8 */ 125 + __u32 current_task; /* 0x02b8 */ 126 + __u32 thread_info; /* 0x02bc */ 127 + __u32 kernel_stack; /* 0x02c0 */ 117 128 118 - /* Interrupt and panic stack. */ 119 - __u32 async_stack; /* 0x02ac */ 120 - __u32 panic_stack; /* 0x02b0 */ 129 + /* Interrupt, panic and restart stack. */ 130 + __u32 async_stack; /* 0x02c4 */ 131 + __u32 panic_stack; /* 0x02c8 */ 132 + __u32 restart_stack; /* 0x02cc */ 133 + 134 + /* Restart function and parameter. */ 135 + __u32 restart_fn; /* 0x02d0 */ 136 + __u32 restart_data; /* 0x02d4 */ 137 + __u32 restart_source; /* 0x02d8 */ 121 138 122 139 /* Address space pointer. */ 123 - __u32 kernel_asce; /* 0x02b4 */ 124 - __u32 user_asce; /* 0x02b8 */ 125 - __u32 current_pid; /* 0x02bc */ 140 + __u32 kernel_asce; /* 0x02dc */ 141 + __u32 user_asce; /* 0x02e0 */ 142 + __u32 current_pid; /* 0x02e4 */ 126 143 127 144 /* SMP info area */ 128 - __u32 cpu_nr; /* 0x02c0 */ 129 - __u32 softirq_pending; /* 0x02c4 */ 130 - __u32 percpu_offset; /* 0x02c8 */ 131 - __u32 ext_call_fast; /* 0x02cc */ 132 - __u64 int_clock; /* 0x02d0 */ 133 - __u64 mcck_clock; /* 0x02d8 */ 134 - __u64 clock_comparator; /* 0x02e0 */ 135 - __u32 machine_flags; /* 0x02e8 */ 136 - __u32 ftrace_func; /* 0x02ec */ 137 - __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */ 145 + __u32 cpu_nr; /* 0x02e8 */ 146 + __u32 softirq_pending; /* 0x02ec */ 147 + __u32 percpu_offset; /* 0x02f0 */ 148 + __u32 machine_flags; /* 0x02f4 */ 149 + __u32 ftrace_func; /* 0x02f8 */ 150 + __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ 138 151 139 152 /* Interrupt response block */ 140 153 __u8 irb[64]; /* 0x0300 */ ··· 154 157 __u32 ipib; /* 0x0e00 */ 155 158 __u32 ipib_checksum; /* 0x0e04 */ 156 159 __u32 vmcore_info; /* 0x0e08 */ 157 - __u8 pad_0x0e0c[0x0f00-0x0e0c]; /* 0x0e0c */ 160 + __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */ 161 + __u32 os_info; /* 0x0e18 */ 162 + __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */ 158 163 159 164 /* Extended facility list */ 160 165 __u64 stfle_fac_list[32]; /* 0x0f00 */ ··· 188 189 __u32 ipl_parmblock_ptr; /* 0x0014 */ 189 190 __u8 pad_0x0018[0x0080-0x0018]; /* 0x0018 */ 190 191 __u32 ext_params; /* 0x0080 */ 191 - __u16 cpu_addr; /* 0x0084 */ 192 + __u16 ext_cpu_addr; /* 0x0084 */ 192 193 __u16 ext_int_code; /* 0x0086 */ 193 194 __u16 svc_ilc; /* 0x0088 */ 194 195 __u16 svc_code; /* 0x008a */ ··· 253 254 __u64 steal_timer; /* 0x02e0 */ 254 255 __u64 last_update_timer; /* 0x02e8 */ 255 256 __u64 last_update_clock; /* 0x02f0 */ 257 + __u64 int_clock; /* 0x02f8 */ 258 + __u64 mcck_clock; /* 0x0300 */ 259 + __u64 clock_comparator; /* 0x0308 */ 256 260 257 261 /* Current process. */ 258 - __u64 current_task; /* 0x02f8 */ 259 - __u64 thread_info; /* 0x0300 */ 260 - __u64 kernel_stack; /* 0x0308 */ 262 + __u64 current_task; /* 0x0310 */ 263 + __u64 thread_info; /* 0x0318 */ 264 + __u64 kernel_stack; /* 0x0320 */ 261 265 262 - /* Interrupt and panic stack. */ 263 - __u64 async_stack; /* 0x0310 */ 264 - __u64 panic_stack; /* 0x0318 */ 266 + /* Interrupt, panic and restart stack. */ 267 + __u64 async_stack; /* 0x0328 */ 268 + __u64 panic_stack; /* 0x0330 */ 269 + __u64 restart_stack; /* 0x0338 */ 270 + 271 + /* Restart function and parameter. */ 272 + __u64 restart_fn; /* 0x0340 */ 273 + __u64 restart_data; /* 0x0348 */ 274 + __u64 restart_source; /* 0x0350 */ 265 275 266 276 /* Address space pointer. */ 267 - __u64 kernel_asce; /* 0x0320 */ 268 - __u64 user_asce; /* 0x0328 */ 269 - __u64 current_pid; /* 0x0330 */ 277 + __u64 kernel_asce; /* 0x0358 */ 278 + __u64 user_asce; /* 0x0360 */ 279 + __u64 current_pid; /* 0x0368 */ 270 280 271 281 /* SMP info area */ 272 - __u32 cpu_nr; /* 0x0338 */ 273 - __u32 softirq_pending; /* 0x033c */ 274 - __u64 percpu_offset; /* 0x0340 */ 275 - __u64 ext_call_fast; /* 0x0348 */ 276 - __u64 int_clock; /* 0x0350 */ 277 - __u64 mcck_clock; /* 0x0358 */ 278 - __u64 clock_comparator; /* 0x0360 */ 279 - __u64 vdso_per_cpu_data; /* 0x0368 */ 280 - __u64 machine_flags; /* 0x0370 */ 281 - __u64 ftrace_func; /* 0x0378 */ 282 - __u64 gmap; /* 0x0380 */ 283 - __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */ 282 + __u32 cpu_nr; /* 0x0370 */ 283 + __u32 softirq_pending; /* 0x0374 */ 284 + __u64 percpu_offset; /* 0x0378 */ 285 + __u64 vdso_per_cpu_data; /* 0x0380 */ 286 + __u64 machine_flags; /* 0x0388 */ 287 + __u64 ftrace_func; /* 0x0390 */ 288 + __u64 gmap; /* 0x0398 */ 289 + __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ 284 290 285 291 /* Interrupt response block. */ 286 292 __u8 irb[64]; /* 0x0400 */ ··· 302 298 */ 303 299 __u64 ipib; /* 0x0e00 */ 304 300 __u32 ipib_checksum; /* 0x0e08 */ 305 - __u64 vmcore_info; /* 0x0e0c */ 306 - __u8 pad_0x0e14[0x0f00-0x0e14]; /* 0x0e14 */ 301 + /* 302 + * Because the vmcore_info pointer is not 8 byte aligned it never 303 + * should not be accessed directly. For accessing the pointer, first 304 + * copy it to a local pointer variable. 305 + */ 306 + __u8 vmcore_info[8]; /* 0x0e0c */ 307 + __u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */ 308 + __u64 os_info; /* 0x0e18 */ 309 + __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */ 307 310 308 311 /* Extended facility list */ 309 312 __u64 stfle_fac_list[32]; /* 0x0f00 */
+50
arch/s390/include/asm/os_info.h
··· 1 + /* 2 + * OS info memory interface 3 + * 4 + * Copyright IBM Corp. 2012 5 + * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> 6 + */ 7 + #ifndef _ASM_S390_OS_INFO_H 8 + #define _ASM_S390_OS_INFO_H 9 + 10 + #define OS_INFO_VERSION_MAJOR 1 11 + #define OS_INFO_VERSION_MINOR 1 12 + #define OS_INFO_MAGIC 0x4f53494e464f535aULL /* OSINFOSZ */ 13 + 14 + #define OS_INFO_VMCOREINFO 0 15 + #define OS_INFO_REIPL_BLOCK 1 16 + #define OS_INFO_INIT_FN 2 17 + 18 + struct os_info_entry { 19 + u64 addr; 20 + u64 size; 21 + u32 csum; 22 + } __packed; 23 + 24 + struct os_info { 25 + u64 magic; 26 + u32 csum; 27 + u16 version_major; 28 + u16 version_minor; 29 + u64 crashkernel_addr; 30 + u64 crashkernel_size; 31 + struct os_info_entry entry[3]; 32 + u8 reserved[4004]; 33 + } __packed; 34 + 35 + void os_info_init(void); 36 + void os_info_entry_add(int nr, void *ptr, u64 len); 37 + void os_info_crashkernel_add(unsigned long base, unsigned long size); 38 + u32 os_info_csum(struct os_info *os_info); 39 + 40 + #ifdef CONFIG_CRASH_DUMP 41 + void *os_info_old_entry(int nr, unsigned long *size); 42 + int copy_from_oldmem(void *dest, void *src, size_t count); 43 + #else 44 + static inline void *os_info_old_entry(int nr, unsigned long *size) 45 + { 46 + return NULL; 47 + } 48 + #endif 49 + 50 + #endif /* _ASM_S390_OS_INFO_H */
-132
arch/s390/include/asm/sigp.h
··· 1 - /* 2 - * Routines and structures for signalling other processors. 3 - * 4 - * Copyright IBM Corp. 1999,2010 5 - * Author(s): Denis Joseph Barrow, 6 - * Martin Schwidefsky <schwidefsky@de.ibm.com>, 7 - * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 - */ 9 - 10 - #ifndef __ASM_SIGP_H 11 - #define __ASM_SIGP_H 12 - 13 - #include <asm/system.h> 14 - 15 - /* Get real cpu address from logical cpu number. */ 16 - extern unsigned short __cpu_logical_map[]; 17 - 18 - static inline int cpu_logical_map(int cpu) 19 - { 20 - #ifdef CONFIG_SMP 21 - return __cpu_logical_map[cpu]; 22 - #else 23 - return stap(); 24 - #endif 25 - } 26 - 27 - enum { 28 - sigp_sense = 1, 29 - sigp_external_call = 2, 30 - sigp_emergency_signal = 3, 31 - sigp_start = 4, 32 - sigp_stop = 5, 33 - sigp_restart = 6, 34 - sigp_stop_and_store_status = 9, 35 - sigp_initial_cpu_reset = 11, 36 - sigp_cpu_reset = 12, 37 - sigp_set_prefix = 13, 38 - sigp_store_status_at_address = 14, 39 - sigp_store_extended_status_at_address = 15, 40 - sigp_set_architecture = 18, 41 - sigp_conditional_emergency_signal = 19, 42 - sigp_sense_running = 21, 43 - }; 44 - 45 - enum { 46 - sigp_order_code_accepted = 0, 47 - sigp_status_stored = 1, 48 - sigp_busy = 2, 49 - sigp_not_operational = 3, 50 - }; 51 - 52 - /* 53 - * Definitions for external call. 54 - */ 55 - enum { 56 - ec_schedule = 0, 57 - ec_call_function, 58 - ec_call_function_single, 59 - ec_stop_cpu, 60 - }; 61 - 62 - /* 63 - * Signal processor. 64 - */ 65 - static inline int raw_sigp(u16 cpu, int order) 66 - { 67 - register unsigned long reg1 asm ("1") = 0; 68 - int ccode; 69 - 70 - asm volatile( 71 - " sigp %1,%2,0(%3)\n" 72 - " ipm %0\n" 73 - " srl %0,28\n" 74 - : "=d" (ccode) 75 - : "d" (reg1), "d" (cpu), 76 - "a" (order) : "cc" , "memory"); 77 - return ccode; 78 - } 79 - 80 - /* 81 - * Signal processor with parameter. 82 - */ 83 - static inline int raw_sigp_p(u32 parameter, u16 cpu, int order) 84 - { 85 - register unsigned int reg1 asm ("1") = parameter; 86 - int ccode; 87 - 88 - asm volatile( 89 - " sigp %1,%2,0(%3)\n" 90 - " ipm %0\n" 91 - " srl %0,28\n" 92 - : "=d" (ccode) 93 - : "d" (reg1), "d" (cpu), 94 - "a" (order) : "cc" , "memory"); 95 - return ccode; 96 - } 97 - 98 - /* 99 - * Signal processor with parameter and return status. 100 - */ 101 - static inline int raw_sigp_ps(u32 *status, u32 parm, u16 cpu, int order) 102 - { 103 - register unsigned int reg1 asm ("1") = parm; 104 - int ccode; 105 - 106 - asm volatile( 107 - " sigp %1,%2,0(%3)\n" 108 - " ipm %0\n" 109 - " srl %0,28\n" 110 - : "=d" (ccode), "+d" (reg1) 111 - : "d" (cpu), "a" (order) 112 - : "cc" , "memory"); 113 - *status = reg1; 114 - return ccode; 115 - } 116 - 117 - static inline int sigp(int cpu, int order) 118 - { 119 - return raw_sigp(cpu_logical_map(cpu), order); 120 - } 121 - 122 - static inline int sigp_p(u32 parameter, int cpu, int order) 123 - { 124 - return raw_sigp_p(parameter, cpu_logical_map(cpu), order); 125 - } 126 - 127 - static inline int sigp_ps(u32 *status, u32 parm, int cpu, int order) 128 - { 129 - return raw_sigp_ps(status, parm, cpu_logical_map(cpu), order); 130 - } 131 - 132 - #endif /* __ASM_SIGP_H */
+22 -41
arch/s390/include/asm/smp.h
··· 1 1 /* 2 - * Copyright IBM Corp. 1999,2009 2 + * Copyright IBM Corp. 1999,2012 3 3 * Author(s): Denis Joseph Barrow, 4 4 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 5 5 * Heiko Carstens <heiko.carstens@de.ibm.com>, ··· 10 10 #ifdef CONFIG_SMP 11 11 12 12 #include <asm/system.h> 13 - #include <asm/sigp.h> 14 - 15 - extern void machine_restart_smp(char *); 16 - extern void machine_halt_smp(void); 17 - extern void machine_power_off_smp(void); 18 13 19 14 #define raw_smp_processor_id() (S390_lowcore.cpu_nr) 20 15 21 - extern int __cpu_disable (void); 22 - extern void __cpu_die (unsigned int cpu); 23 - extern int __cpu_up (unsigned int cpu); 24 - 25 16 extern struct mutex smp_cpu_state_mutex; 17 + extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 18 + 19 + extern int __cpu_up(unsigned int cpu); 26 20 27 21 extern void arch_send_call_function_single_ipi(int cpu); 28 22 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 29 23 30 - extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 24 + extern void smp_call_online_cpu(void (*func)(void *), void *); 25 + extern void smp_call_ipl_cpu(void (*func)(void *), void *); 31 26 32 - extern void smp_switch_to_ipl_cpu(void (*func)(void *), void *); 33 - extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp, 34 - int from, int to); 35 - extern void smp_restart_with_online_cpu(void); 36 - extern void smp_restart_cpu(void); 37 - 38 - /* 39 - * returns 1 if (virtual) cpu is scheduled 40 - * returns 0 otherwise 41 - */ 42 - static inline int smp_vcpu_scheduled(int cpu) 43 - { 44 - u32 status; 45 - 46 - switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) { 47 - case sigp_status_stored: 48 - /* Check for running status */ 49 - if (status & 0x400) 50 - return 0; 51 - break; 52 - case sigp_not_operational: 53 - return 0; 54 - default: 55 - break; 56 - } 57 - return 1; 58 - } 27 + extern int smp_find_processor_id(u16 address); 28 + extern int smp_store_status(int cpu); 29 + extern int smp_vcpu_scheduled(int cpu); 30 + extern void smp_yield_cpu(int cpu); 31 + extern void smp_yield(void); 32 + extern void smp_stop_cpu(void); 59 33 60 34 #else /* CONFIG_SMP */ 61 35 62 - static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 36 + static inline void smp_call_ipl_cpu(void (*func)(void *), void *data) 63 37 { 64 38 func(data); 65 39 } 66 40 67 - static inline void smp_restart_with_online_cpu(void) 41 + static inline void smp_call_online_cpu(void (*func)(void *), void *data) 68 42 { 43 + func(data); 69 44 } 70 45 71 - #define smp_vcpu_scheduled (1) 46 + static inline int smp_find_processor_id(int address) { return 0; } 47 + static inline int smp_vcpu_scheduled(int cpu) { return 1; } 48 + static inline void smp_yield_cpu(int cpu) { } 49 + static inline void smp_yield(void) { } 50 + static inline void smp_stop_cpu(void) { } 72 51 73 52 #endif /* CONFIG_SMP */ 74 53 75 54 #ifdef CONFIG_HOTPLUG_CPU 76 55 extern int smp_rescan_cpus(void); 77 56 extern void __noreturn cpu_die(void); 57 + extern void __cpu_die(unsigned int cpu); 58 + extern int __cpu_disable(void); 78 59 #else 79 60 static inline int smp_rescan_cpus(void) { return 0; } 80 61 static inline void cpu_die(void) { }
+34
arch/s390/include/asm/system.h
··· 7 7 #ifndef __ASM_SYSTEM_H 8 8 #define __ASM_SYSTEM_H 9 9 10 + #include <linux/preempt.h> 10 11 #include <linux/kernel.h> 11 12 #include <linux/errno.h> 13 + #include <linux/string.h> 12 14 #include <asm/types.h> 13 15 #include <asm/ptrace.h> 14 16 #include <asm/setup.h> ··· 248 246 return 0; 249 247 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3); 250 248 return (*ptr & (0x80 >> (nr & 7))) != 0; 249 + } 250 + 251 + /** 252 + * stfle - Store facility list extended 253 + * @stfle_fac_list: array where facility list can be stored 254 + * @size: size of passed in array in double words 255 + */ 256 + static inline void stfle(u64 *stfle_fac_list, int size) 257 + { 258 + unsigned long nr; 259 + 260 + preempt_disable(); 261 + S390_lowcore.stfl_fac_list = 0; 262 + asm volatile( 263 + " .insn s,0xb2b10000,0(0)\n" /* stfl */ 264 + "0:\n" 265 + EX_TABLE(0b, 0b) 266 + : "=m" (S390_lowcore.stfl_fac_list)); 267 + nr = 4; /* bytes stored by stfl */ 268 + memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); 269 + if (S390_lowcore.stfl_fac_list & 0x01000000) { 270 + /* More facility bits available with stfle */ 271 + register unsigned long reg0 asm("0") = size - 1; 272 + 273 + asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ 274 + : "+d" (reg0) 275 + : "a" (stfle_fac_list) 276 + : "memory", "cc"); 277 + nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ 278 + } 279 + memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); 280 + preempt_enable(); 251 281 } 252 282 253 283 static inline unsigned short stap(void)
+2 -2
arch/s390/include/asm/timer.h
··· 33 33 spinlock_t lock; 34 34 __u64 timer; /* last programmed timer */ 35 35 __u64 elapsed; /* elapsed time of timer expire values */ 36 - __u64 idle; /* temp var for idle */ 37 - int do_spt; /* =1: reprogram cpu timer in idle */ 36 + __u64 idle_enter; /* cpu timer on idle enter */ 37 + __u64 idle_exit; /* cpu timer on idle exit */ 38 38 }; 39 39 40 40 extern void init_virt_timer(struct vtimer_list *timer);
+2 -2
arch/s390/include/asm/vdso.h
··· 40 40 extern struct vdso_data *vdso_data; 41 41 42 42 #ifdef CONFIG_64BIT 43 - int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore); 44 - void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore); 43 + int vdso_alloc_per_cpu(struct _lowcore *lowcore); 44 + void vdso_free_per_cpu(struct _lowcore *lowcore); 45 45 #endif 46 46 47 47 #endif /* __ASSEMBLY__ */
+1 -3
arch/s390/kernel/Makefile
··· 23 23 obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 24 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 25 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 26 - sysinfo.o jump_label.o 26 + sysinfo.o jump_label.o lgr.o os_info.o 27 27 28 28 obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 29 29 obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) ··· 34 34 obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 35 35 obj-$(CONFIG_SMP) += smp.o 36 36 obj-$(CONFIG_SCHED_BOOK) += topology.o 37 - obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ 38 - switch_cpu.o) 39 37 obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o 40 38 obj-$(CONFIG_AUDIT) += audit.o 41 39 compat-obj-$(CONFIG_AUDIT) += compat_audit.o
+16 -11
arch/s390/kernel/asm-offsets.c
··· 8 8 9 9 #include <linux/kbuild.h> 10 10 #include <linux/sched.h> 11 + #include <asm/cputime.h> 12 + #include <asm/timer.h> 11 13 #include <asm/vdso.h> 12 - #include <asm/sigp.h> 13 14 #include <asm/pgtable.h> 15 + #include <asm/system.h> 14 16 15 17 /* 16 18 * Make sure that the compiler is new enough. We want a compiler that ··· 72 70 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 73 71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 74 72 BLANK(); 75 - /* constants for SIGP */ 76 - DEFINE(__SIGP_STOP, sigp_stop); 77 - DEFINE(__SIGP_RESTART, sigp_restart); 78 - DEFINE(__SIGP_SENSE, sigp_sense); 79 - DEFINE(__SIGP_INITIAL_CPU_RESET, sigp_initial_cpu_reset); 80 - BLANK(); 73 + /* idle data offsets */ 74 + DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter)); 75 + DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit)); 76 + /* vtimer queue offsets */ 77 + DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter)); 78 + DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit)); 81 79 /* lowcore offsets */ 82 80 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 83 - DEFINE(__LC_CPU_ADDRESS, offsetof(struct _lowcore, cpu_addr)); 81 + DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); 84 82 DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); 85 83 DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); 86 84 DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); ··· 97 95 DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); 98 96 DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); 99 97 DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); 100 - DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 101 - BLANK(); 102 - DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); 103 98 DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); 104 99 DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); 105 100 DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); 106 101 DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); 107 102 DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); 108 103 DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); 104 + DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); 109 105 DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); 110 106 DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); 111 107 DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); 112 108 DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); 113 109 DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); 110 + BLANK(); 114 111 DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); 115 112 DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); 116 113 DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); ··· 130 129 DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); 131 130 DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); 132 131 DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); 132 + DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); 133 + DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); 133 134 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 134 135 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 135 136 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 136 137 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 137 138 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 138 139 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 140 + DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 141 + BLANK(); 139 142 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 140 143 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 141 144 DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
+1 -5
arch/s390/kernel/compat_signal.c
··· 581 581 int handle_signal32(unsigned long sig, struct k_sigaction *ka, 582 582 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 583 583 { 584 - sigset_t blocked; 585 584 int ret; 586 585 587 586 /* Set up the stack frame */ ··· 590 591 ret = setup_frame32(sig, ka, oldset, regs); 591 592 if (ret) 592 593 return ret; 593 - sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); 594 - if (!(ka->sa.sa_flags & SA_NODEFER)) 595 - sigaddset(&blocked, sig); 596 - set_current_blocked(&blocked); 594 + block_sigmask(ka, sig); 597 595 return 0; 598 596 } 599 597
+27 -10
arch/s390/kernel/crash_dump.c
··· 14 14 #include <linux/bootmem.h> 15 15 #include <linux/elf.h> 16 16 #include <asm/ipl.h> 17 + #include <asm/os_info.h> 17 18 18 19 #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) 19 20 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) ··· 52 51 /* 53 52 * Copy memory from old kernel 54 53 */ 55 - static int copy_from_oldmem(void *dest, void *src, size_t count) 54 + int copy_from_oldmem(void *dest, void *src, size_t count) 56 55 { 57 56 unsigned long copied = 0; 58 57 int rc; ··· 225 224 } 226 225 227 226 /* 228 - * Initialize vmcoreinfo note (new kernel) 227 + * Get vmcoreinfo using lowcore->vmcore_info (new kernel) 229 228 */ 230 - static void *nt_vmcoreinfo(void *ptr) 229 + static void *get_vmcoreinfo_old(unsigned long *size) 231 230 { 232 231 char nt_name[11], *vmcoreinfo; 233 232 Elf64_Nhdr note; 234 233 void *addr; 235 234 236 235 if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) 237 - return ptr; 236 + return NULL; 238 237 memset(nt_name, 0, sizeof(nt_name)); 239 238 if (copy_from_oldmem(&note, addr, sizeof(note))) 240 - return ptr; 239 + return NULL; 241 240 if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) 242 - return ptr; 241 + return NULL; 243 242 if (strcmp(nt_name, "VMCOREINFO") != 0) 244 - return ptr; 245 - vmcoreinfo = kzalloc_panic(note.n_descsz + 1); 243 + return NULL; 244 + vmcoreinfo = kzalloc_panic(note.n_descsz); 246 245 if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) 246 + return NULL; 247 + *size = note.n_descsz; 248 + return vmcoreinfo; 249 + } 250 + 251 + /* 252 + * Initialize vmcoreinfo note (new kernel) 253 + */ 254 + static void *nt_vmcoreinfo(void *ptr) 255 + { 256 + unsigned long size; 257 + void *vmcoreinfo; 258 + 259 + vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size); 260 + if (!vmcoreinfo) 261 + vmcoreinfo = get_vmcoreinfo_old(&size); 262 + if (!vmcoreinfo) 247 263 return ptr; 248 - vmcoreinfo[note.n_descsz + 1] = 0; 249 - return nt_init(ptr, 0, vmcoreinfo, note.n_descsz, "VMCOREINFO"); 264 + return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); 250 265 } 251 266 252 267 /*
+31 -9
arch/s390/kernel/debug.c
··· 2 2 * arch/s390/kernel/debug.c 3 3 * S/390 debug facility 4 4 * 5 - * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, 6 - * IBM Corporation 5 + * Copyright IBM Corp. 1999, 2012 6 + * 7 7 * Author(s): Michael Holzheu (holzheu@de.ibm.com), 8 8 * Holger Smolinski (Holger.Smolinski@de.ibm.com) 9 9 * ··· 167 167 static DEFINE_MUTEX(debug_mutex); 168 168 169 169 static int initialized; 170 + static int debug_critical; 170 171 171 172 static const struct file_operations debug_file_ops = { 172 173 .owner = THIS_MODULE, ··· 933 932 } 934 933 935 934 935 + void debug_set_critical(void) 936 + { 937 + debug_critical = 1; 938 + } 939 + 936 940 /* 937 941 * debug_event_common: 938 942 * - write debug entry with given size ··· 951 945 952 946 if (!debug_active || !id->areas) 953 947 return NULL; 954 - spin_lock_irqsave(&id->lock, flags); 948 + if (debug_critical) { 949 + if (!spin_trylock_irqsave(&id->lock, flags)) 950 + return NULL; 951 + } else 952 + spin_lock_irqsave(&id->lock, flags); 955 953 active = get_active_entry(id); 956 954 memset(DEBUG_DATA(active), 0, id->buf_size); 957 955 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); ··· 978 968 979 969 if (!debug_active || !id->areas) 980 970 return NULL; 981 - spin_lock_irqsave(&id->lock, flags); 971 + if (debug_critical) { 972 + if (!spin_trylock_irqsave(&id->lock, flags)) 973 + return NULL; 974 + } else 975 + spin_lock_irqsave(&id->lock, flags); 982 976 active = get_active_entry(id); 983 977 memset(DEBUG_DATA(active), 0, id->buf_size); 984 978 memcpy(DEBUG_DATA(active), buf, min(len, id->buf_size)); ··· 1027 1013 return NULL; 1028 1014 numargs=debug_count_numargs(string); 1029 1015 1030 - spin_lock_irqsave(&id->lock, flags); 1016 + if (debug_critical) { 1017 + if (!spin_trylock_irqsave(&id->lock, flags)) 1018 + return NULL; 1019 + } else 1020 + spin_lock_irqsave(&id->lock, flags); 1031 1021 active = get_active_entry(id); 1032 1022 curr_event=(debug_sprintf_entry_t *) DEBUG_DATA(active); 1033 1023 va_start(ap,string); ··· 1065 1047 1066 1048 numargs=debug_count_numargs(string); 1067 1049 1068 - spin_lock_irqsave(&id->lock, flags); 1050 + if (debug_critical) { 1051 + if (!spin_trylock_irqsave(&id->lock, flags)) 1052 + return NULL; 1053 + } else 1054 + spin_lock_irqsave(&id->lock, flags); 1069 1055 active = get_active_entry(id); 1070 1056 curr_event=(debug_sprintf_entry_t *)DEBUG_DATA(active); 1071 1057 va_start(ap,string); ··· 1450 1428 rc += sprintf(out_buf + rc, "| "); 1451 1429 for (i = 0; i < id->buf_size; i++) { 1452 1430 unsigned char c = in_buf[i]; 1453 - if (!isprint(c)) 1454 - rc += sprintf(out_buf + rc, "."); 1455 - else 1431 + if (isascii(c) && isprint(c)) 1456 1432 rc += sprintf(out_buf + rc, "%c", c); 1433 + else 1434 + rc += sprintf(out_buf + rc, "."); 1457 1435 } 1458 1436 rc += sprintf(out_buf + rc, "\n"); 1459 1437 return rc;
+3 -19
arch/s390/kernel/early.c
··· 29 29 #include <asm/sysinfo.h> 30 30 #include <asm/cpcmd.h> 31 31 #include <asm/sclp.h> 32 + #include <asm/system.h> 32 33 #include "entry.h" 33 34 34 35 /* ··· 263 262 264 263 static noinline __init void setup_facility_list(void) 265 264 { 266 - unsigned long nr; 267 - 268 - S390_lowcore.stfl_fac_list = 0; 269 - asm volatile( 270 - " .insn s,0xb2b10000,0(0)\n" /* stfl */ 271 - "0:\n" 272 - EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list)); 273 - memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); 274 - nr = 4; /* # bytes stored by stfl */ 275 - if (test_facility(7)) { 276 - /* More facility bits available with stfle */ 277 - register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1; 278 - asm volatile(".insn s,0xb2b00000,%0" /* stfle */ 279 - : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0) 280 - : : "cc"); 281 - nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ 282 - } 283 - memset((char *) S390_lowcore.stfle_fac_list + nr, 0, 284 - MAX_FACILITY_BIT/8 - nr); 265 + stfle(S390_lowcore.stfle_fac_list, 266 + ARRAY_SIZE(S390_lowcore.stfle_fac_list)); 285 267 } 286 268 287 269 static noinline __init void setup_hpage(void)
+88 -71
arch/s390/kernel/entry.S
··· 2 2 * arch/s390/kernel/entry.S 3 3 * S390 low-level entry points. 4 4 * 5 - * Copyright (C) IBM Corp. 1999,2006 5 + * Copyright (C) IBM Corp. 1999,2012 6 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 7 * Hartmut Penner (hp@de.ibm.com), 8 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), ··· 105 105 106 106 .macro ADD64 high,low,timer 107 107 al \high,\timer 108 - al \low,\timer+4 108 + al \low,4+\timer 109 109 brc 12,.+8 110 110 ahi \high,1 111 111 .endm 112 112 113 113 .macro SUB64 high,low,timer 114 114 sl \high,\timer 115 - sl \low,\timer+4 115 + sl \low,4+\timer 116 116 brc 3,.+8 117 117 ahi \high,-1 118 118 .endm ··· 471 471 jnz io_work # there is work to do (signals etc.) 472 472 io_restore: 473 473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 474 - ni __LC_RETURN_PSW+1,0xfd # clean wait state bit 475 474 stpt __LC_EXIT_TIMER 476 475 lm %r0,%r15,__PT_R0(%r11) 477 476 lpsw __LC_RETURN_PSW ··· 605 606 stm %r8,%r9,__PT_PSW(%r11) 606 607 TRACE_IRQS_OFF 607 608 lr %r2,%r11 # pass pointer to pt_regs 608 - l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 609 + l %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 609 610 l %r4,__LC_EXT_PARAMS # get external parameters 610 611 l %r1,BASED(.Ldo_extint) 611 612 basr %r14,%r1 # call do_extint 612 613 j io_return 614 + 615 + /* 616 + * Load idle PSW. The second "half" of this function is in cleanup_idle. 617 + */ 618 + ENTRY(psw_idle) 619 + st %r4,__SF_EMPTY(%r15) 620 + basr %r1,0 621 + la %r1,psw_idle_lpsw+4-.(%r1) 622 + st %r1,__SF_EMPTY+4(%r15) 623 + oi __SF_EMPTY+4(%r15),0x80 624 + la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1) 625 + stck __IDLE_ENTER(%r2) 626 + ltr %r5,%r5 627 + stpt __VQ_IDLE_ENTER(%r3) 628 + jz psw_idle_lpsw 629 + spt 0(%r1) 630 + psw_idle_lpsw: 631 + lpsw __SF_EMPTY(%r15) 632 + br %r14 633 + psw_idle_end: 613 634 614 635 __critical_end: 615 636 ··· 692 673 TRACE_IRQS_ON 693 674 mcck_return: 694 675 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 695 - ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 696 676 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 697 677 jno 0f 698 678 lm %r0,%r15,__PT_R0(%r11) ··· 709 691 0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 710 692 j mcck_skip 711 693 712 - /* 713 - * Restart interruption handler, kick starter for additional CPUs 714 - */ 715 - #ifdef CONFIG_SMP 716 - __CPUINIT 717 - ENTRY(restart_int_handler) 718 - basr %r1,0 719 - restart_base: 720 - spt restart_vtime-restart_base(%r1) 721 - stck __LC_LAST_UPDATE_CLOCK 722 - mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) 723 - mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) 724 - l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp 725 - lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs 726 - lam %a0,%a15,__LC_AREGS_SAVE_AREA 727 - lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone 728 - l %r1,__LC_THREAD_INFO 729 - mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) 730 - mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 731 - xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 732 - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 733 - basr %r14,0 734 - l %r14,restart_addr-.(%r14) 735 - basr %r14,%r14 # call start_secondary 736 - restart_addr: 737 - .long start_secondary 738 - .align 8 739 - restart_vtime: 740 - .long 0x7fffffff,0xffffffff 741 - .previous 742 - #else 743 - /* 744 - * If we do not run with SMP enabled, let the new CPU crash ... 745 - */ 746 - ENTRY(restart_int_handler) 747 - basr %r1,0 748 - restart_base: 749 - lpsw restart_crash-restart_base(%r1) 750 - .align 8 751 - restart_crash: 752 - .long 0x000a0000,0x00000000 753 - restart_go: 754 - #endif 755 - 756 694 # 757 695 # PSW restart interrupt handler 758 696 # 759 - ENTRY(psw_restart_int_handler) 697 + ENTRY(restart_int_handler) 760 698 st %r15,__LC_SAVE_AREA_RESTART 761 - basr %r15,0 762 - 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack 763 - l %r15,0(%r15) 699 + l %r15,__LC_RESTART_STACK 764 700 ahi %r15,-__PT_SIZE # create pt_regs on stack 701 + xc 0(__PT_SIZE,%r15),0(%r15) 765 702 stm %r0,%r14,__PT_R0(%r15) 766 703 mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART 767 704 mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw 768 - ahi %r15,-STACK_FRAME_OVERHEAD 769 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 770 - basr %r14,0 771 - 1: l %r14,.Ldo_restart-1b(%r14) 772 - basr %r14,%r14 773 - basr %r14,0 # load disabled wait PSW if 774 - 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns 775 - .align 4 776 - .Ldo_restart: 777 - .long do_restart 778 - .Lrestart_stack: 779 - .long restart_stack 780 - .align 8 781 - restart_psw_crash: 782 - .long 0x000a0000,0x00000000 + restart_psw_crash 705 + ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 706 + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 707 + lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu 708 + ltr %r3,%r3 # test source cpu address 709 + jm 1f # negative -> skip source stop 710 + 0: sigp %r4,%r3,1 # sigp sense to source cpu 711 + brc 10,0b # wait for status stored 712 + 1: basr %r14,%r1 # call function 713 + stap __SF_EMPTY(%r15) # store cpu address 714 + lh %r3,__SF_EMPTY(%r15) 715 + 2: sigp %r4,%r3,5 # sigp stop to current cpu 716 + brc 2,2b 717 + 3: j 3b 783 718 784 719 .section .kprobes.text, "ax" 785 720 ··· 766 795 .long io_tif + 0x80000000 767 796 .long io_restore + 0x80000000 768 797 .long io_done + 0x80000000 798 + .long psw_idle + 0x80000000 799 + .long psw_idle_end + 0x80000000 769 800 770 801 cleanup_critical: 771 802 cl %r9,BASED(cleanup_table) # system_call ··· 786 813 jl cleanup_io_tif 787 814 cl %r9,BASED(cleanup_table+28) # io_done 788 815 jl cleanup_io_restore 816 + cl %r9,BASED(cleanup_table+32) # psw_idle 817 + jl 0f 818 + cl %r9,BASED(cleanup_table+36) # psw_idle_end 819 + jl cleanup_idle 789 820 0: br %r14 790 821 791 822 cleanup_system_call: ··· 873 896 jhe 0f 874 897 l %r9,12(%r11) # get saved r11 pointer to pt_regs 875 898 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 876 - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit 877 899 mvc 0(32,%r11),__PT_R8(%r9) 878 900 lm %r0,%r7,__PT_R0(%r9) 879 901 0: lm %r8,%r9,__LC_RETURN_PSW ··· 880 904 cleanup_io_restore_insn: 881 905 .long io_done - 4 + 0x80000000 882 906 907 + cleanup_idle: 908 + # copy interrupt clock & cpu timer 909 + mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK 910 + mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER 911 + chi %r11,__LC_SAVE_AREA_ASYNC 912 + je 0f 913 + mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 914 + mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER 915 + 0: # check if stck has been executed 916 + cl %r9,BASED(cleanup_idle_insn) 917 + jhe 1f 918 + mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) 919 + mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) 920 + j 2f 921 + 1: # check if the cpu timer has been reprogrammed 922 + ltr %r5,%r5 923 + jz 2f 924 + spt __VQ_IDLE_ENTER(%r3) 925 + 2: # account system time going idle 926 + lm %r9,%r10,__LC_STEAL_TIMER 927 + ADD64 %r9,%r10,__IDLE_ENTER(%r2) 928 + SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 929 + stm %r9,%r10,__LC_STEAL_TIMER 930 + mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) 931 + lm %r9,%r10,__LC_SYSTEM_TIMER 932 + ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 933 + SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3) 934 + stm %r9,%r10,__LC_SYSTEM_TIMER 935 + mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) 936 + # prepare return psw 937 + n %r8,BASED(cleanup_idle_wait) # clear wait state bit 938 + l %r9,24(%r11) # return from psw_idle 939 + br %r14 940 + cleanup_idle_insn: 941 + .long psw_idle_lpsw + 0x80000000 942 + cleanup_idle_wait: 943 + .long 0xfffdffff 944 + 883 945 /* 884 946 * Integer constants 885 947 */ 886 948 .align 4 887 - .Lnr_syscalls: .long NR_syscalls 949 + .Lnr_syscalls: 950 + .long NR_syscalls 951 + .Lvtimer_max: 952 + .quad 0x7fffffffffffffff 888 953 889 954 /* 890 955 * Symbol constants
+14 -3
arch/s390/kernel/entry.h
··· 4 4 #include <linux/types.h> 5 5 #include <linux/signal.h> 6 6 #include <asm/ptrace.h> 7 - 7 + #include <asm/cputime.h> 8 + #include <asm/timer.h> 8 9 9 10 extern void (*pgm_check_table[128])(struct pt_regs *); 10 11 extern void *restart_stack; 12 + 13 + void system_call(void); 14 + void pgm_check_handler(void); 15 + void ext_int_handler(void); 16 + void io_int_handler(void); 17 + void mcck_int_handler(void); 18 + void restart_int_handler(void); 19 + void restart_call_handler(void); 20 + void psw_idle(struct s390_idle_data *, struct vtimer_queue *, 21 + unsigned long, int); 11 22 12 23 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 13 24 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); ··· 35 24 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 36 25 void do_notify_resume(struct pt_regs *regs); 37 26 38 - void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); 27 + struct ext_code; 28 + void do_extint(struct pt_regs *regs, struct ext_code, unsigned int, unsigned long); 39 29 void do_restart(void); 40 - int __cpuinit start_secondary(void *cpuvoid); 41 30 void __init startup_init(void); 42 31 void die(struct pt_regs *regs, const char *str); 43 32
+80 -59
arch/s390/kernel/entry64.S
··· 2 2 * arch/s390/kernel/entry64.S 3 3 * S390 low-level entry points. 4 4 * 5 - * Copyright (C) IBM Corp. 1999,2010 5 + * Copyright (C) IBM Corp. 1999,2012 6 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 7 * Hartmut Penner (hp@de.ibm.com), 8 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), ··· 489 489 lg %r14,__LC_VDSO_PER_CPU 490 490 lmg %r0,%r10,__PT_R0(%r11) 491 491 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 492 - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit 493 492 stpt __LC_EXIT_TIMER 494 493 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 495 494 lmg %r11,%r15,__PT_R11(%r11) ··· 624 625 TRACE_IRQS_OFF 625 626 lghi %r1,4096 626 627 lgr %r2,%r11 # pass pointer to pt_regs 627 - llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code 628 + llgf %r3,__LC_EXT_CPU_ADDR # get cpu address + interruption code 628 629 llgf %r4,__LC_EXT_PARAMS # get external parameter 629 630 lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter 630 631 brasl %r14,do_extint 631 632 j io_return 633 + 634 + /* 635 + * Load idle PSW. The second "half" of this function is in cleanup_idle. 636 + */ 637 + ENTRY(psw_idle) 638 + stg %r4,__SF_EMPTY(%r15) 639 + larl %r1,psw_idle_lpsw+4 640 + stg %r1,__SF_EMPTY+8(%r15) 641 + larl %r1,.Lvtimer_max 642 + stck __IDLE_ENTER(%r2) 643 + ltr %r5,%r5 644 + stpt __VQ_IDLE_ENTER(%r3) 645 + jz psw_idle_lpsw 646 + spt 0(%r1) 647 + psw_idle_lpsw: 648 + lpswe __SF_EMPTY(%r15) 649 + br %r14 650 + psw_idle_end: 632 651 633 652 __critical_end: 634 653 ··· 713 696 lg %r14,__LC_VDSO_PER_CPU 714 697 lmg %r0,%r10,__PT_R0(%r11) 715 698 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 716 - ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 717 699 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 718 700 jno 0f 719 701 stpt __LC_EXIT_TIMER ··· 729 713 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 730 714 j mcck_skip 731 715 732 - /* 733 - * Restart interruption handler, kick starter for additional CPUs 734 - */ 735 - #ifdef CONFIG_SMP 736 - __CPUINIT 737 - ENTRY(restart_int_handler) 738 - basr %r1,0 739 - restart_base: 740 - spt restart_vtime-restart_base(%r1) 741 - stck __LC_LAST_UPDATE_CLOCK 742 - mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) 743 - mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) 744 - lghi %r10,__LC_GPREGS_SAVE_AREA 745 - lg %r15,120(%r10) # load ksp 746 - lghi %r10,__LC_CREGS_SAVE_AREA 747 - lctlg %c0,%c15,0(%r10) # get new ctl regs 748 - lghi %r10,__LC_AREGS_SAVE_AREA 749 - lam %a0,%a15,0(%r10) 750 - lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone 751 - lg %r1,__LC_THREAD_INFO 752 - mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) 753 - mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) 754 - xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER 755 - ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 756 - brasl %r14,start_secondary 757 - .align 8 758 - restart_vtime: 759 - .long 0x7fffffff,0xffffffff 760 - .previous 761 - #else 762 - /* 763 - * If we do not run with SMP enabled, let the new CPU crash ... 764 - */ 765 - ENTRY(restart_int_handler) 766 - basr %r1,0 767 - restart_base: 768 - lpswe restart_crash-restart_base(%r1) 769 - .align 8 770 - restart_crash: 771 - .long 0x000a0000,0x00000000,0x00000000,0x00000000 772 - restart_go: 773 - #endif 774 - 775 716 # 776 717 # PSW restart interrupt handler 777 718 # 778 - ENTRY(psw_restart_int_handler) 719 + ENTRY(restart_int_handler) 779 720 stg %r15,__LC_SAVE_AREA_RESTART 780 - larl %r15,restart_stack # load restart stack 781 - lg %r15,0(%r15) 721 + lg %r15,__LC_RESTART_STACK 782 722 aghi %r15,-__PT_SIZE # create pt_regs on stack 723 + xc 0(__PT_SIZE,%r15),0(%r15) 783 724 stmg %r0,%r14,__PT_R0(%r15) 784 725 mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART 785 726 mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw 786 - aghi %r15,-STACK_FRAME_OVERHEAD 787 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 788 - brasl %r14,do_restart 789 - larl %r14,restart_psw_crash # load disabled wait PSW if 790 - lpswe 0(%r14) # do_restart returns 791 - .align 8 792 - restart_psw_crash: 793 - .quad 0x0002000080000000,0x0000000000000000 + restart_psw_crash 727 + aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack 728 + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 729 + lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu 730 + ltgr %r3,%r3 # test source cpu address 731 + jm 1f # negative -> skip source stop 732 + 0: sigp %r4,%r3,1 # sigp sense to source cpu 733 + brc 10,0b # wait for status stored 734 + 1: basr %r14,%r1 # call function 735 + stap __SF_EMPTY(%r15) # store cpu address 736 + llgh %r3,__SF_EMPTY(%r15) 737 + 2: sigp %r4,%r3,5 # sigp stop to current cpu 738 + brc 2,2b 739 + 3: j 3b 794 740 795 741 .section .kprobes.text, "ax" 796 742 ··· 786 808 .quad io_tif 787 809 .quad io_restore 788 810 .quad io_done 811 + .quad psw_idle 812 + .quad psw_idle_end 789 813 790 814 cleanup_critical: 791 815 clg %r9,BASED(cleanup_table) # system_call ··· 806 826 jl cleanup_io_tif 807 827 clg %r9,BASED(cleanup_table+56) # io_done 808 828 jl cleanup_io_restore 829 + clg %r9,BASED(cleanup_table+64) # psw_idle 830 + jl 0f 831 + clg %r9,BASED(cleanup_table+72) # psw_idle_end 832 + jl cleanup_idle 809 833 0: br %r14 810 834 811 835 ··· 899 915 je 0f 900 916 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 901 917 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 902 - ni __LC_RETURN_PSW+1,0xfd # clear wait state bit 903 918 mvc 0(64,%r11),__PT_R8(%r9) 904 919 lmg %r0,%r7,__PT_R0(%r9) 905 920 0: lmg %r8,%r9,__LC_RETURN_PSW 906 921 br %r14 907 922 cleanup_io_restore_insn: 908 923 .quad io_done - 4 924 + 925 + cleanup_idle: 926 + # copy interrupt clock & cpu timer 927 + mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK 928 + mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER 929 + cghi %r11,__LC_SAVE_AREA_ASYNC 930 + je 0f 931 + mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 932 + mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER 933 + 0: # check if stck & stpt have been executed 934 + clg %r9,BASED(cleanup_idle_insn) 935 + jhe 1f 936 + mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) 937 + mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) 938 + j 2f 939 + 1: # check if the cpu timer has been reprogrammed 940 + ltr %r5,%r5 941 + jz 2f 942 + spt __VQ_IDLE_ENTER(%r3) 943 + 2: # account system time going idle 944 + lg %r9,__LC_STEAL_TIMER 945 + alg %r9,__IDLE_ENTER(%r2) 946 + slg %r9,__LC_LAST_UPDATE_CLOCK 947 + stg %r9,__LC_STEAL_TIMER 948 + mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) 949 + lg %r9,__LC_SYSTEM_TIMER 950 + alg %r9,__LC_LAST_UPDATE_TIMER 951 + slg %r9,__VQ_IDLE_ENTER(%r3) 952 + stg %r9,__LC_SYSTEM_TIMER 953 + mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) 954 + # prepare return psw 955 + nihh %r8,0xfffd # clear wait state bit 956 + lg %r9,48(%r11) # return from psw_idle 957 + br %r14 958 + cleanup_idle_insn: 959 + .quad psw_idle_lpsw 909 960 910 961 /* 911 962 * Integer constants ··· 950 931 .quad __critical_start 951 932 .Lcritical_length: 952 933 .quad __critical_end - __critical_start 934 + .Lvtimer_max: 935 + .quad 0x7fffffffffffffff 953 936 954 937 955 938 #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+58 -41
arch/s390/kernel/ipl.c
··· 2 2 * arch/s390/kernel/ipl.c 3 3 * ipl/reipl/dump support for Linux on s390. 4 4 * 5 - * Copyright IBM Corp. 2005,2007 5 + * Copyright IBM Corp. 2005,2012 6 6 * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 7 * Heiko Carstens <heiko.carstens@de.ibm.com> 8 8 * Volker Sameske <sameske@de.ibm.com> ··· 17 17 #include <linux/fs.h> 18 18 #include <linux/gfp.h> 19 19 #include <linux/crash_dump.h> 20 + #include <linux/debug_locks.h> 20 21 #include <asm/ipl.h> 21 22 #include <asm/smp.h> 22 23 #include <asm/setup.h> ··· 26 25 #include <asm/ebcdic.h> 27 26 #include <asm/reset.h> 28 27 #include <asm/sclp.h> 29 - #include <asm/sigp.h> 30 28 #include <asm/checksum.h> 29 + #include <asm/debug.h> 30 + #include <asm/os_info.h> 31 31 #include "entry.h" 32 32 33 33 #define IPL_PARM_BLOCK_VERSION 0 ··· 573 571 574 572 static void ipl_run(struct shutdown_trigger *trigger) 575 573 { 576 - smp_switch_to_ipl_cpu(__ipl_run, NULL); 574 + smp_call_ipl_cpu(__ipl_run, NULL); 577 575 } 578 576 579 577 static int __init ipl_init(void) ··· 952 950 .attrs = reipl_nss_attrs, 953 951 }; 954 952 953 + static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block) 954 + { 955 + reipl_block_actual = reipl_block; 956 + os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual, 957 + reipl_block->hdr.len); 958 + } 959 + 955 960 /* reipl type */ 956 961 957 962 static int reipl_set_type(enum ipl_type type) ··· 974 965 reipl_method = REIPL_METHOD_CCW_VM; 975 966 else 976 967 reipl_method = REIPL_METHOD_CCW_CIO; 977 - reipl_block_actual = reipl_block_ccw; 968 + set_reipl_block_actual(reipl_block_ccw); 978 969 break; 979 970 case IPL_TYPE_FCP: 980 971 if (diag308_set_works) ··· 983 974 reipl_method = REIPL_METHOD_FCP_RO_VM; 984 975 else 985 976 reipl_method = REIPL_METHOD_FCP_RO_DIAG; 986 - reipl_block_actual = reipl_block_fcp; 977 + set_reipl_block_actual(reipl_block_fcp); 987 978 break; 988 979 case IPL_TYPE_FCP_DUMP: 989 980 reipl_method = REIPL_METHOD_FCP_DUMP; ··· 993 984 reipl_method = REIPL_METHOD_NSS_DIAG; 994 985 else 995 986 reipl_method = REIPL_METHOD_NSS; 996 - reipl_block_actual = reipl_block_nss; 987 + set_reipl_block_actual(reipl_block_nss); 997 988 break; 998 989 case IPL_TYPE_UNKNOWN: 999 990 reipl_method = REIPL_METHOD_DEFAULT; ··· 1110 1101 1111 1102 static void reipl_run(struct shutdown_trigger *trigger) 1112 1103 { 1113 - smp_switch_to_ipl_cpu(__reipl_run, NULL); 1104 + smp_call_ipl_cpu(__reipl_run, NULL); 1114 1105 } 1115 1106 1116 1107 static void reipl_block_ccw_init(struct ipl_parameter_block *ipb) ··· 1265 1256 return 0; 1266 1257 } 1267 1258 1259 + static int __init reipl_type_init(void) 1260 + { 1261 + enum ipl_type reipl_type = ipl_info.type; 1262 + struct ipl_parameter_block *reipl_block; 1263 + unsigned long size; 1264 + 1265 + reipl_block = os_info_old_entry(OS_INFO_REIPL_BLOCK, &size); 1266 + if (!reipl_block) 1267 + goto out; 1268 + /* 1269 + * If we have an OS info reipl block, this will be used 1270 + */ 1271 + if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) { 1272 + memcpy(reipl_block_fcp, reipl_block, size); 1273 + reipl_type = IPL_TYPE_FCP; 1274 + } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) { 1275 + memcpy(reipl_block_ccw, reipl_block, size); 1276 + reipl_type = IPL_TYPE_CCW; 1277 + } 1278 + out: 1279 + return reipl_set_type(reipl_type); 1280 + } 1281 + 1268 1282 static int __init reipl_init(void) 1269 1283 { 1270 1284 int rc; ··· 1309 1277 rc = reipl_nss_init(); 1310 1278 if (rc) 1311 1279 return rc; 1312 - rc = reipl_set_type(ipl_info.type); 1313 - if (rc) 1314 - return rc; 1315 - return 0; 1280 + return reipl_type_init(); 1316 1281 } 1317 1282 1318 1283 static struct shutdown_action __refdata reipl_action = { ··· 1450 1421 if (dump_method == DUMP_METHOD_NONE) 1451 1422 return; 1452 1423 smp_send_stop(); 1453 - smp_switch_to_ipl_cpu(__dump_run, NULL); 1424 + smp_call_ipl_cpu(__dump_run, NULL); 1454 1425 } 1455 1426 1456 1427 static int __init dump_ccw_init(void) ··· 1528 1499 1529 1500 static void dump_reipl_run(struct shutdown_trigger *trigger) 1530 1501 { 1531 - preempt_disable(); 1532 - /* 1533 - * Bypass dynamic address translation (DAT) when storing IPL parameter 1534 - * information block address and checksum into the prefix area 1535 - * (corresponding to absolute addresses 0-8191). 1536 - * When enhanced DAT applies and the STE format control in one, 1537 - * the absolute address is formed without prefixing. In this case a 1538 - * normal store (stg/st) into the prefix area would no more match to 1539 - * absolute addresses 0-8191. 1540 - */ 1541 - #ifdef CONFIG_64BIT 1542 - asm volatile("sturg %0,%1" 1543 - :: "a" ((unsigned long) reipl_block_actual), 1544 - "a" (&lowcore_ptr[smp_processor_id()]->ipib)); 1545 - #else 1546 - asm volatile("stura %0,%1" 1547 - :: "a" ((unsigned long) reipl_block_actual), 1548 - "a" (&lowcore_ptr[smp_processor_id()]->ipib)); 1549 - #endif 1550 - asm volatile("stura %0,%1" 1551 - :: "a" (csum_partial(reipl_block_actual, 1552 - reipl_block_actual->hdr.len, 0)), 1553 - "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum)); 1554 - preempt_enable(); 1502 + u32 csum; 1503 + 1504 + csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); 1505 + copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum)); 1506 + copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual, 1507 + sizeof(reipl_block_actual)); 1555 1508 dump_run(trigger); 1556 1509 } 1557 1510 ··· 1634 1623 if (strcmp(trigger->name, ON_PANIC_STR) == 0 || 1635 1624 strcmp(trigger->name, ON_RESTART_STR) == 0) 1636 1625 disabled_wait((unsigned long) __builtin_return_address(0)); 1637 - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 1638 - cpu_relax(); 1639 - for (;;); 1626 + smp_stop_cpu(); 1640 1627 } 1641 1628 1642 1629 static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR, ··· 1722 1713 1723 1714 static void do_panic(void) 1724 1715 { 1716 + lgr_info_log(); 1725 1717 on_panic_trigger.action->fn(&on_panic_trigger); 1726 1718 stop_run(&on_panic_trigger); 1727 1719 } ··· 1748 1738 static struct kobj_attribute on_restart_attr = 1749 1739 __ATTR(on_restart, 0644, on_restart_show, on_restart_store); 1750 1740 1751 - void do_restart(void) 1741 + static void __do_restart(void *ignore) 1752 1742 { 1753 - smp_restart_with_online_cpu(); 1754 1743 smp_send_stop(); 1755 1744 #ifdef CONFIG_CRASH_DUMP 1756 1745 crash_kexec(NULL); 1757 1746 #endif 1758 1747 on_restart_trigger.action->fn(&on_restart_trigger); 1759 1748 stop_run(&on_restart_trigger); 1749 + } 1750 + 1751 + void do_restart(void) 1752 + { 1753 + tracing_off(); 1754 + debug_locks_off(); 1755 + lgr_info_log(); 1756 + smp_call_online_cpu(__do_restart, NULL); 1760 1757 } 1761 1758 1762 1759 /* on halt */
+5 -9
arch/s390/kernel/irq.c
··· 202 202 } 203 203 EXPORT_SYMBOL(unregister_external_interrupt); 204 204 205 - void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, 205 + void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, 206 206 unsigned int param32, unsigned long param64) 207 207 { 208 208 struct pt_regs *old_regs; 209 - unsigned short code; 210 209 struct ext_int_info *p; 211 210 int index; 212 211 213 - code = (unsigned short) ext_int_code; 214 212 old_regs = set_irq_regs(regs); 215 - s390_idle_check(regs, S390_lowcore.int_clock, 216 - S390_lowcore.async_enter_timer); 217 213 irq_enter(); 218 214 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 219 215 /* Serve timer interrupts first. */ 220 216 clock_comparator_work(); 221 217 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 222 - if (code != 0x1004) 218 + if (ext_code.code != 0x1004) 223 219 __get_cpu_var(s390_idle).nohz_delay = 1; 224 220 225 - index = ext_hash(code); 221 + index = ext_hash(ext_code.code); 226 222 rcu_read_lock(); 227 223 list_for_each_entry_rcu(p, &ext_int_hash[index], entry) 228 - if (likely(p->code == code)) 229 - p->handler(ext_int_code, param32, param64); 224 + if (likely(p->code == ext_code.code)) 225 + p->handler(ext_code, param32, param64); 230 226 rcu_read_unlock(); 231 227 irq_exit(); 232 228 set_irq_regs(old_regs);
+200
arch/s390/kernel/lgr.c
··· 1 + /* 2 + * Linux Guest Relocation (LGR) detection 3 + * 4 + * Copyright IBM Corp. 2012 5 + * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> 6 + */ 7 + 8 + #include <linux/module.h> 9 + #include <linux/timer.h> 10 + #include <linux/slab.h> 11 + #include <asm/sysinfo.h> 12 + #include <asm/ebcdic.h> 13 + #include <asm/system.h> 14 + #include <asm/debug.h> 15 + #include <asm/ipl.h> 16 + 17 + #define LGR_TIMER_INTERVAL_SECS (30 * 60) 18 + #define VM_LEVEL_MAX 2 /* Maximum is 8, but we only record two levels */ 19 + 20 + /* 21 + * LGR info: Contains stfle and stsi data 22 + */ 23 + struct lgr_info { 24 + /* Bit field with facility information: 4 DWORDs are stored */ 25 + u64 stfle_fac_list[4]; 26 + /* Level of system (1 = CEC, 2 = LPAR, 3 = z/VM */ 27 + u32 level; 28 + /* Level 1: CEC info (stsi 1.1.1) */ 29 + char manufacturer[16]; 30 + char type[4]; 31 + char sequence[16]; 32 + char plant[4]; 33 + char model[16]; 34 + /* Level 2: LPAR info (stsi 2.2.2) */ 35 + u16 lpar_number; 36 + char name[8]; 37 + /* Level 3: VM info (stsi 3.2.2) */ 38 + u8 vm_count; 39 + struct { 40 + char name[8]; 41 + char cpi[16]; 42 + } vm[VM_LEVEL_MAX]; 43 + } __packed __aligned(8); 44 + 45 + /* 46 + * LGR globals 47 + */ 48 + static void *lgr_page; 49 + static struct lgr_info lgr_info_last; 50 + static struct lgr_info lgr_info_cur; 51 + static struct debug_info *lgr_dbf; 52 + 53 + /* 54 + * Return number of valid stsi levels 55 + */ 56 + static inline int stsi_0(void) 57 + { 58 + int rc = stsi(NULL, 0, 0, 0); 59 + 60 + return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); 61 + } 62 + 63 + /* 64 + * Copy buffer and then convert it to ASCII 65 + */ 66 + static void cpascii(char *dst, char *src, int size) 67 + { 68 + memcpy(dst, src, size); 69 + EBCASC(dst, size); 70 + } 71 + 72 + /* 73 + * Fill LGR info with 1.1.1 stsi data 74 + */ 75 + static void lgr_stsi_1_1_1(struct lgr_info *lgr_info) 76 + { 77 + struct sysinfo_1_1_1 *si = lgr_page; 78 + 79 + if (stsi(si, 1, 1, 1) == -ENOSYS) 80 + return; 81 + cpascii(lgr_info->manufacturer, si->manufacturer, 82 + sizeof(si->manufacturer)); 83 + cpascii(lgr_info->type, si->type, sizeof(si->type)); 84 + cpascii(lgr_info->model, si->model, sizeof(si->model)); 85 + cpascii(lgr_info->sequence, si->sequence, sizeof(si->sequence)); 86 + cpascii(lgr_info->plant, si->plant, sizeof(si->plant)); 87 + } 88 + 89 + /* 90 + * Fill LGR info with 2.2.2 stsi data 91 + */ 92 + static void lgr_stsi_2_2_2(struct lgr_info *lgr_info) 93 + { 94 + struct sysinfo_2_2_2 *si = lgr_page; 95 + 96 + if (stsi(si, 2, 2, 2) == -ENOSYS) 97 + return; 98 + cpascii(lgr_info->name, si->name, sizeof(si->name)); 99 + memcpy(&lgr_info->lpar_number, &si->lpar_number, 100 + sizeof(lgr_info->lpar_number)); 101 + } 102 + 103 + /* 104 + * Fill LGR info with 3.2.2 stsi data 105 + */ 106 + static void lgr_stsi_3_2_2(struct lgr_info *lgr_info) 107 + { 108 + struct sysinfo_3_2_2 *si = lgr_page; 109 + int i; 110 + 111 + if (stsi(si, 3, 2, 2) == -ENOSYS) 112 + return; 113 + for (i = 0; i < min_t(u8, si->count, VM_LEVEL_MAX); i++) { 114 + cpascii(lgr_info->vm[i].name, si->vm[i].name, 115 + sizeof(si->vm[i].name)); 116 + cpascii(lgr_info->vm[i].cpi, si->vm[i].cpi, 117 + sizeof(si->vm[i].cpi)); 118 + } 119 + lgr_info->vm_count = si->count; 120 + } 121 + 122 + /* 123 + * Fill LGR info with current data 124 + */ 125 + static void lgr_info_get(struct lgr_info *lgr_info) 126 + { 127 + memset(lgr_info, 0, sizeof(*lgr_info)); 128 + stfle(lgr_info->stfle_fac_list, ARRAY_SIZE(lgr_info->stfle_fac_list)); 129 + lgr_info->level = stsi_0(); 130 + if (lgr_info->level == -ENOSYS) 131 + return; 132 + if (lgr_info->level >= 1) 133 + lgr_stsi_1_1_1(lgr_info); 134 + if (lgr_info->level >= 2) 135 + lgr_stsi_2_2_2(lgr_info); 136 + if (lgr_info->level >= 3) 137 + lgr_stsi_3_2_2(lgr_info); 138 + } 139 + 140 + /* 141 + * Check if LGR info has changed and if yes log new LGR info to s390dbf 142 + */ 143 + void lgr_info_log(void) 144 + { 145 + static DEFINE_SPINLOCK(lgr_info_lock); 146 + unsigned long flags; 147 + 148 + if (!spin_trylock_irqsave(&lgr_info_lock, flags)) 149 + return; 150 + lgr_info_get(&lgr_info_cur); 151 + if (memcmp(&lgr_info_last, &lgr_info_cur, sizeof(lgr_info_cur)) != 0) { 152 + debug_event(lgr_dbf, 1, &lgr_info_cur, sizeof(lgr_info_cur)); 153 + lgr_info_last = lgr_info_cur; 154 + } 155 + spin_unlock_irqrestore(&lgr_info_lock, flags); 156 + } 157 + EXPORT_SYMBOL_GPL(lgr_info_log); 158 + 159 + static void lgr_timer_set(void); 160 + 161 + /* 162 + * LGR timer callback 163 + */ 164 + static void lgr_timer_fn(unsigned long ignored) 165 + { 166 + lgr_info_log(); 167 + lgr_timer_set(); 168 + } 169 + 170 + static struct timer_list lgr_timer = 171 + TIMER_DEFERRED_INITIALIZER(lgr_timer_fn, 0, 0); 172 + 173 + /* 174 + * Setup next LGR timer 175 + */ 176 + static void lgr_timer_set(void) 177 + { 178 + mod_timer(&lgr_timer, jiffies + LGR_TIMER_INTERVAL_SECS * HZ); 179 + } 180 + 181 + /* 182 + * Initialize LGR: Add s390dbf, write initial lgr_info and setup timer 183 + */ 184 + static int __init lgr_init(void) 185 + { 186 + lgr_page = (void *) __get_free_pages(GFP_KERNEL, 0); 187 + if (!lgr_page) 188 + return -ENOMEM; 189 + lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info)); 190 + if (!lgr_dbf) { 191 + free_page((unsigned long) lgr_page); 192 + return -ENOMEM; 193 + } 194 + debug_register_view(lgr_dbf, &debug_hex_ascii_view); 195 + lgr_info_get(&lgr_info_last); 196 + debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last)); 197 + lgr_timer_set(); 198 + return 0; 199 + } 200 + module_init(lgr_init);
+14 -38
arch/s390/kernel/machine_kexec.c
··· 14 14 #include <linux/delay.h> 15 15 #include <linux/reboot.h> 16 16 #include <linux/ftrace.h> 17 + #include <linux/debug_locks.h> 17 18 #include <asm/cio.h> 18 19 #include <asm/setup.h> 19 20 #include <asm/pgtable.h> ··· 50 49 } 51 50 52 51 /* 53 - * Store status of next available physical CPU 54 - */ 55 - static int store_status_next(int start_cpu, int this_cpu) 56 - { 57 - struct save_area *sa = (void *) 4608 + store_prefix(); 58 - int cpu, rc; 59 - 60 - for (cpu = start_cpu; cpu < 65536; cpu++) { 61 - if (cpu == this_cpu) 62 - continue; 63 - do { 64 - rc = raw_sigp(cpu, sigp_stop_and_store_status); 65 - } while (rc == sigp_busy); 66 - if (rc != sigp_order_code_accepted) 67 - continue; 68 - if (sa->pref_reg) 69 - return cpu; 70 - } 71 - return -1; 72 - } 73 - 74 - /* 75 52 * Initialize CPU ELF notes 76 53 */ 77 54 void setup_regs(void) 78 55 { 79 56 unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; 80 - int cpu, this_cpu, phys_cpu = 0, first = 1; 57 + int cpu, this_cpu; 81 58 82 - this_cpu = stap(); 83 - 84 - if (!S390_lowcore.prefixreg_save_area) 85 - first = 0; 59 + this_cpu = smp_find_processor_id(stap()); 60 + add_elf_notes(this_cpu); 86 61 for_each_online_cpu(cpu) { 87 - if (first) { 88 - add_elf_notes(cpu); 89 - first = 0; 62 + if (cpu == this_cpu) 90 63 continue; 91 - } 92 - phys_cpu = store_status_next(phys_cpu, this_cpu); 93 - if (phys_cpu == -1) 94 - break; 64 + if (smp_store_status(cpu)) 65 + continue; 95 66 add_elf_notes(cpu); 96 - phys_cpu++; 97 67 } 98 68 /* Copy dump CPU store status info to absolute zero */ 99 69 memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); ··· 210 238 struct kimage *image = data; 211 239 212 240 pfault_fini(); 213 - if (image->type == KEXEC_TYPE_CRASH) 241 + tracing_off(); 242 + debug_locks_off(); 243 + if (image->type == KEXEC_TYPE_CRASH) { 244 + lgr_info_log(); 214 245 s390_reset_system(__do_machine_kdump, data); 215 - else 246 + } else { 216 247 s390_reset_system(__do_machine_kexec, data); 248 + } 217 249 disabled_wait((unsigned long) __builtin_return_address(0)); 218 250 } 219 251 ··· 231 255 return; 232 256 tracer_disable(); 233 257 smp_send_stop(); 234 - smp_switch_to_ipl_cpu(__machine_kexec, image); 258 + smp_call_ipl_cpu(__machine_kexec, image); 235 259 }
-2
arch/s390/kernel/nmi.c
··· 254 254 int umode; 255 255 256 256 nmi_enter(); 257 - s390_idle_check(regs, S390_lowcore.mcck_clock, 258 - S390_lowcore.mcck_enter_timer); 259 257 kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++; 260 258 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 261 259 mcck = &__get_cpu_var(cpu_mcck);
+169
arch/s390/kernel/os_info.c
··· 1 + /* 2 + * OS info memory interface 3 + * 4 + * Copyright IBM Corp. 2012 5 + * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com> 6 + */ 7 + 8 + #define KMSG_COMPONENT "os_info" 9 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 + 11 + #include <linux/crash_dump.h> 12 + #include <linux/kernel.h> 13 + #include <asm/checksum.h> 14 + #include <asm/lowcore.h> 15 + #include <asm/system.h> 16 + #include <asm/os_info.h> 17 + 18 + /* 19 + * OS info structure has to be page aligned 20 + */ 21 + static struct os_info os_info __page_aligned_data; 22 + 23 + /* 24 + * Compute checksum over OS info structure 25 + */ 26 + u32 os_info_csum(struct os_info *os_info) 27 + { 28 + int size = sizeof(*os_info) - offsetof(struct os_info, version_major); 29 + return csum_partial(&os_info->version_major, size, 0); 30 + } 31 + 32 + /* 33 + * Add crashkernel info to OS info and update checksum 34 + */ 35 + void os_info_crashkernel_add(unsigned long base, unsigned long size) 36 + { 37 + os_info.crashkernel_addr = (u64)(unsigned long)base; 38 + os_info.crashkernel_size = (u64)(unsigned long)size; 39 + os_info.csum = os_info_csum(&os_info); 40 + } 41 + 42 + /* 43 + * Add OS info entry and update checksum 44 + */ 45 + void os_info_entry_add(int nr, void *ptr, u64 size) 46 + { 47 + os_info.entry[nr].addr = (u64)(unsigned long)ptr; 48 + os_info.entry[nr].size = size; 49 + os_info.entry[nr].csum = csum_partial(ptr, size, 0); 50 + os_info.csum = os_info_csum(&os_info); 51 + } 52 + 53 + /* 54 + * Initialize OS info struture and set lowcore pointer 55 + */ 56 + void __init os_info_init(void) 57 + { 58 + void *ptr = &os_info; 59 + 60 + os_info.version_major = OS_INFO_VERSION_MAJOR; 61 + os_info.version_minor = OS_INFO_VERSION_MINOR; 62 + os_info.magic = OS_INFO_MAGIC; 63 + os_info.csum = os_info_csum(&os_info); 64 + copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr)); 65 + } 66 + 67 + #ifdef CONFIG_CRASH_DUMP 68 + 69 + static struct os_info *os_info_old; 70 + 71 + /* 72 + * Allocate and copy OS info entry from oldmem 73 + */ 74 + static void os_info_old_alloc(int nr, int align) 75 + { 76 + unsigned long addr, size = 0; 77 + char *buf, *buf_align, *msg; 78 + u32 csum; 79 + 80 + addr = os_info_old->entry[nr].addr; 81 + if (!addr) { 82 + msg = "not available"; 83 + goto fail; 84 + } 85 + size = os_info_old->entry[nr].size; 86 + buf = kmalloc(size + align - 1, GFP_KERNEL); 87 + if (!buf) { 88 + msg = "alloc failed"; 89 + goto fail; 90 + } 91 + buf_align = PTR_ALIGN(buf, align); 92 + if (copy_from_oldmem(buf_align, (void *) addr, size)) { 93 + msg = "copy failed"; 94 + goto fail_free; 95 + } 96 + csum = csum_partial(buf_align, size, 0); 97 + if (csum != os_info_old->entry[nr].csum) { 98 + msg = "checksum failed"; 99 + goto fail_free; 100 + } 101 + os_info_old->entry[nr].addr = (u64)(unsigned long)buf_align; 102 + msg = "copied"; 103 + goto out; 104 + fail_free: 105 + kfree(buf); 106 + fail: 107 + os_info_old->entry[nr].addr = 0; 108 + out: 109 + pr_info("entry %i: %s (addr=0x%lx size=%lu)\n", 110 + nr, msg, addr, size); 111 + } 112 + 113 + /* 114 + * Initialize os info and os info entries from oldmem 115 + */ 116 + static void os_info_old_init(void) 117 + { 118 + static int os_info_init; 119 + unsigned long addr; 120 + 121 + if (os_info_init) 122 + return; 123 + if (!OLDMEM_BASE) 124 + goto fail; 125 + if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr))) 126 + goto fail; 127 + if (addr == 0 || addr % PAGE_SIZE) 128 + goto fail; 129 + os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL); 130 + if (!os_info_old) 131 + goto fail; 132 + if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old))) 133 + goto fail_free; 134 + if (os_info_old->magic != OS_INFO_MAGIC) 135 + goto fail_free; 136 + if (os_info_old->csum != os_info_csum(os_info_old)) 137 + goto fail_free; 138 + if (os_info_old->version_major > OS_INFO_VERSION_MAJOR) 139 + goto fail_free; 140 + os_info_old_alloc(OS_INFO_VMCOREINFO, 1); 141 + os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1); 142 + os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE); 143 + pr_info("crashkernel: addr=0x%lx size=%lu\n", 144 + (unsigned long) os_info_old->crashkernel_addr, 145 + (unsigned long) os_info_old->crashkernel_size); 146 + os_info_init = 1; 147 + return; 148 + fail_free: 149 + kfree(os_info_old); 150 + fail: 151 + os_info_init = 1; 152 + os_info_old = NULL; 153 + } 154 + 155 + /* 156 + * Return pointer to os infor entry and its size 157 + */ 158 + void *os_info_old_entry(int nr, unsigned long *size) 159 + { 160 + os_info_old_init(); 161 + 162 + if (!os_info_old) 163 + return NULL; 164 + if (!os_info_old->entry[nr].addr) 165 + return NULL; 166 + *size = (unsigned long) os_info_old->entry[nr].size; 167 + return (void *)(unsigned long)os_info_old->entry[nr].addr; 168 + } 169 + #endif
+1 -6
arch/s390/kernel/process.c
··· 77 77 local_irq_enable(); 78 78 return; 79 79 } 80 - trace_hardirqs_on(); 81 - /* Don't trace preempt off for idle. */ 82 - stop_critical_timings(); 83 - /* Stop virtual timer and halt the cpu. */ 80 + /* Halt the cpu and keep track of cpu time accounting. */ 84 81 vtime_stop_cpu(); 85 - /* Reenable preemption tracer. */ 86 - start_critical_timings(); 87 82 } 88 83 89 84 void cpu_idle(void)
+30 -31
arch/s390/kernel/setup.c
··· 2 2 * arch/s390/kernel/setup.c 3 3 * 4 4 * S390 version 5 - * Copyright (C) IBM Corp. 1999,2010 5 + * Copyright (C) IBM Corp. 1999,2012 6 6 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 8 * ··· 62 62 #include <asm/ebcdic.h> 63 63 #include <asm/kvm_virtio.h> 64 64 #include <asm/diag.h> 65 + #include <asm/os_info.h> 66 + #include "entry.h" 65 67 66 68 long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY | 67 69 PSW_MASK_EA | PSW_MASK_BA; ··· 353 351 } 354 352 } 355 353 356 - static void __init 357 - setup_lowcore(void) 354 + void *restart_stack __attribute__((__section__(".data"))); 355 + 356 + static void __init setup_lowcore(void) 358 357 { 359 358 struct _lowcore *lc; 360 359 ··· 366 363 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); 367 364 lc->restart_psw.mask = psw_kernel_bits; 368 365 lc->restart_psw.addr = 369 - PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 366 + PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 370 367 lc->external_new_psw.mask = psw_kernel_bits | 371 368 PSW_MASK_DAT | PSW_MASK_MCHECK; 372 369 lc->external_new_psw.addr = ··· 415 412 lc->last_update_timer = S390_lowcore.last_update_timer; 416 413 lc->last_update_clock = S390_lowcore.last_update_clock; 417 414 lc->ftrace_func = S390_lowcore.ftrace_func; 415 + 416 + restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 417 + restart_stack += ASYNC_SIZE; 418 + 419 + /* 420 + * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant 421 + * restart data to the absolute zero lowcore. This is necesary if 422 + * PSW restart is done on an offline CPU that has lowcore zero. 423 + */ 424 + lc->restart_stack = (unsigned long) restart_stack; 425 + lc->restart_fn = (unsigned long) do_restart; 426 + lc->restart_data = 0; 427 + lc->restart_source = -1UL; 428 + memcpy(&S390_lowcore.restart_stack, &lc->restart_stack, 429 + 4*sizeof(unsigned long)); 430 + copy_to_absolute_zero(&S390_lowcore.restart_psw, 431 + &lc->restart_psw, sizeof(psw_t)); 432 + 418 433 set_prefix((u32)(unsigned long) lc); 419 434 lowcore_ptr[0] = lc; 420 435 } ··· 593 572 } 594 573 } 595 574 596 - void *restart_stack __attribute__((__section__(".data"))); 597 - 598 - /* 599 - * Setup new PSW and allocate stack for PSW restart interrupt 600 - */ 601 - static void __init setup_restart_psw(void) 602 - { 603 - psw_t psw; 604 - 605 - restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 606 - restart_stack += ASYNC_SIZE; 607 - 608 - /* 609 - * Setup restart PSW for absolute zero lowcore. This is necesary 610 - * if PSW restart is done on an offline CPU that has lowcore zero 611 - */ 612 - psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 613 - psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 614 - copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); 615 - } 616 - 617 575 static void __init setup_vmcoreinfo(void) 618 576 { 619 577 #ifdef CONFIG_KEXEC ··· 747 747 { 748 748 #ifdef CONFIG_CRASH_DUMP 749 749 unsigned long long crash_base, crash_size; 750 - char *msg; 750 + char *msg = NULL; 751 751 int rc; 752 752 753 753 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size, ··· 779 779 pr_info("Reserving %lluMB of memory at %lluMB " 780 780 "for crashkernel (System RAM: %luMB)\n", 781 781 crash_size >> 20, crash_base >> 20, memory_end >> 20); 782 + os_info_crashkernel_add(crash_base, crash_size); 782 783 #endif 783 784 } 784 785 785 - static void __init 786 - setup_memory(void) 786 + static void __init setup_memory(void) 787 787 { 788 788 unsigned long bootmap_size; 789 789 unsigned long start_pfn, end_pfn; ··· 1014 1014 * was printed. 1015 1015 */ 1016 1016 1017 - void __init 1018 - setup_arch(char **cmdline_p) 1017 + void __init setup_arch(char **cmdline_p) 1019 1018 { 1020 1019 /* 1021 1020 * print what head.S has found out about the machine ··· 1059 1060 1060 1061 parse_early_param(); 1061 1062 1063 + os_info_init(); 1062 1064 setup_ipl(); 1063 1065 setup_memory_end(); 1064 1066 setup_addressing_mode(); ··· 1068 1068 setup_memory(); 1069 1069 setup_resources(); 1070 1070 setup_vmcoreinfo(); 1071 - setup_restart_psw(); 1072 1071 setup_lowcore(); 1073 1072 1074 1073 cpu_init();
+1 -5
arch/s390/kernel/signal.c
··· 384 384 siginfo_t *info, sigset_t *oldset, 385 385 struct pt_regs *regs) 386 386 { 387 - sigset_t blocked; 388 387 int ret; 389 388 390 389 /* Set up the stack frame */ ··· 393 394 ret = setup_frame(sig, ka, oldset, regs); 394 395 if (ret) 395 396 return ret; 396 - sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); 397 - if (!(ka->sa.sa_flags & SA_NODEFER)) 398 - sigaddset(&blocked, sig); 399 - set_current_blocked(&blocked); 397 + block_sigmask(ka, sig); 400 398 return 0; 401 399 } 402 400
+585 -578
arch/s390/kernel/smp.c
··· 1 1 /* 2 - * arch/s390/kernel/smp.c 2 + * SMP related functions 3 3 * 4 - * Copyright IBM Corp. 1999, 2009 5 - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 - * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 - * Heiko Carstens (heiko.carstens@de.ibm.com) 4 + * Copyright IBM Corp. 1999,2012 5 + * Author(s): Denis Joseph Barrow, 6 + * Martin Schwidefsky <schwidefsky@de.ibm.com>, 7 + * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 8 * 9 9 * based on other smp stuff by 10 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 11 * (c) 1998 Ingo Molnar 12 12 * 13 - * We work with logical cpu numbering everywhere we can. The only 14 - * functions using the real cpu address (got from STAP) are the sigp 15 - * functions. For all other functions we use the identity mapping. 16 - * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 - * used e.g. to find the idle task belonging to a logical cpu. Every array 18 - * in the kernel is sorted by the logical cpu number and not by the physical 19 - * one which is causing all the confusion with __cpu_logical_map and 20 - * cpu_number_map in other architectures. 13 + * The code outside of smp.c uses logical cpu numbers, only smp.c does 14 + * the translation of logical to physical cpu ids. All new code that 15 + * operates on physical cpu numbers needs to go into smp.c. 21 16 */ 22 17 23 18 #define KMSG_COMPONENT "cpu" ··· 26 31 #include <linux/spinlock.h> 27 32 #include <linux/kernel_stat.h> 28 33 #include <linux/delay.h> 29 - #include <linux/cache.h> 30 34 #include <linux/interrupt.h> 31 35 #include <linux/irqflags.h> 32 36 #include <linux/cpu.h> 33 - #include <linux/timex.h> 34 - #include <linux/bootmem.h> 35 37 #include <linux/slab.h> 36 38 #include <linux/crash_dump.h> 37 39 #include <asm/asm-offsets.h> 38 40 #include <asm/ipl.h> 39 41 #include <asm/setup.h> 40 - #include <asm/sigp.h> 41 - #include <asm/pgalloc.h> 42 42 #include <asm/irq.h> 43 - #include <asm/cpcmd.h> 44 43 #include <asm/tlbflush.h> 45 44 #include <asm/timer.h> 46 45 #include <asm/lowcore.h> 47 46 #include <asm/sclp.h> 48 - #include <asm/cputime.h> 49 47 #include <asm/vdso.h> 50 - #include <asm/cpu.h> 48 + #include <asm/debug.h> 49 + #include <asm/os_info.h> 51 50 #include "entry.h" 52 51 53 - /* logical cpu to cpu address */ 54 - unsigned short __cpu_logical_map[NR_CPUS]; 52 + enum { 53 + sigp_sense = 1, 54 + sigp_external_call = 2, 55 + sigp_emergency_signal = 3, 56 + sigp_start = 4, 57 + sigp_stop = 5, 58 + sigp_restart = 6, 59 + sigp_stop_and_store_status = 9, 60 + sigp_initial_cpu_reset = 11, 61 + sigp_cpu_reset = 12, 62 + sigp_set_prefix = 13, 63 + sigp_store_status_at_address = 14, 64 + sigp_store_extended_status_at_address = 15, 65 + sigp_set_architecture = 18, 66 + sigp_conditional_emergency_signal = 19, 67 + sigp_sense_running = 21, 68 + }; 55 69 56 - static struct task_struct *current_set[NR_CPUS]; 70 + enum { 71 + sigp_order_code_accepted = 0, 72 + sigp_status_stored = 1, 73 + sigp_busy = 2, 74 + sigp_not_operational = 3, 75 + }; 57 76 58 - static u8 smp_cpu_type; 59 - static int smp_use_sigp_detection; 77 + enum { 78 + ec_schedule = 0, 79 + ec_call_function, 80 + ec_call_function_single, 81 + ec_stop_cpu, 82 + }; 60 83 61 - enum s390_cpu_state { 84 + enum { 62 85 CPU_STATE_STANDBY, 63 86 CPU_STATE_CONFIGURED, 64 87 }; 65 88 89 + struct pcpu { 90 + struct cpu cpu; 91 + struct task_struct *idle; /* idle process for the cpu */ 92 + struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 93 + unsigned long async_stack; /* async stack for the cpu */ 94 + unsigned long panic_stack; /* panic stack for the cpu */ 95 + unsigned long ec_mask; /* bit mask for ec_xxx functions */ 96 + int state; /* physical cpu state */ 97 + u32 status; /* last status received via sigp */ 98 + u16 address; /* physical cpu address */ 99 + }; 100 + 101 + static u8 boot_cpu_type; 102 + static u16 boot_cpu_address; 103 + static struct pcpu pcpu_devices[NR_CPUS]; 104 + 66 105 DEFINE_MUTEX(smp_cpu_state_mutex); 67 - static int smp_cpu_state[NR_CPUS]; 68 106 69 - static DEFINE_PER_CPU(struct cpu, cpu_devices); 70 - 71 - static void smp_ext_bitcall(int, int); 72 - 73 - static int raw_cpu_stopped(int cpu) 107 + /* 108 + * Signal processor helper functions. 109 + */ 110 + static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) 74 111 { 75 - u32 status; 112 + register unsigned int reg1 asm ("1") = parm; 113 + int cc; 76 114 77 - switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) { 78 - case sigp_status_stored: 79 - /* Check for stopped and check stop state */ 80 - if (status & 0x50) 81 - return 1; 82 - break; 83 - default: 84 - break; 85 - } 86 - return 0; 115 + asm volatile( 116 + " sigp %1,%2,0(%3)\n" 117 + " ipm %0\n" 118 + " srl %0,28\n" 119 + : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); 120 + if (status && cc == 1) 121 + *status = reg1; 122 + return cc; 87 123 } 88 124 89 - static inline int cpu_stopped(int cpu) 125 + static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) 90 126 { 91 - return raw_cpu_stopped(cpu_logical_map(cpu)); 127 + int cc; 128 + 129 + while (1) { 130 + cc = __pcpu_sigp(addr, order, parm, status); 131 + if (cc != sigp_busy) 132 + return cc; 133 + cpu_relax(); 134 + } 135 + } 136 + 137 + static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 138 + { 139 + int cc, retry; 140 + 141 + for (retry = 0; ; retry++) { 142 + cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status); 143 + if (cc != sigp_busy) 144 + break; 145 + if (retry >= 3) 146 + udelay(10); 147 + } 148 + return cc; 149 + } 150 + 151 + static inline int pcpu_stopped(struct pcpu *pcpu) 152 + { 153 + if (__pcpu_sigp(pcpu->address, sigp_sense, 154 + 0, &pcpu->status) != sigp_status_stored) 155 + return 0; 156 + /* Check for stopped and check stop state */ 157 + return !!(pcpu->status & 0x50); 158 + } 159 + 160 + static inline int pcpu_running(struct pcpu *pcpu) 161 + { 162 + if (__pcpu_sigp(pcpu->address, sigp_sense_running, 163 + 0, &pcpu->status) != sigp_status_stored) 164 + return 1; 165 + /* Check for running status */ 166 + return !(pcpu->status & 0x400); 92 167 } 93 168 94 169 /* 95 - * Ensure that PSW restart is done on an online CPU 170 + * Find struct pcpu by cpu address. 96 171 */ 97 - void smp_restart_with_online_cpu(void) 172 + static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) 98 173 { 99 174 int cpu; 100 175 101 - for_each_online_cpu(cpu) { 102 - if (stap() == __cpu_logical_map[cpu]) { 103 - /* We are online: Enable DAT again and return */ 104 - __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 105 - return; 106 - } 176 + for_each_cpu(cpu, mask) 177 + if (pcpu_devices[cpu].address == address) 178 + return pcpu_devices + cpu; 179 + return NULL; 180 + } 181 + 182 + static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 183 + { 184 + int order; 185 + 186 + set_bit(ec_bit, &pcpu->ec_mask); 187 + order = pcpu_running(pcpu) ? 188 + sigp_external_call : sigp_emergency_signal; 189 + pcpu_sigp_retry(pcpu, order, 0); 190 + } 191 + 192 + static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 193 + { 194 + struct _lowcore *lc; 195 + 196 + if (pcpu != &pcpu_devices[0]) { 197 + pcpu->lowcore = (struct _lowcore *) 198 + __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 199 + pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 200 + pcpu->panic_stack = __get_free_page(GFP_KERNEL); 201 + if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) 202 + goto out; 107 203 } 108 - /* We are not online: Do PSW restart on an online CPU */ 109 - while (sigp(cpu, sigp_restart) == sigp_busy) 110 - cpu_relax(); 111 - /* And stop ourself */ 112 - while (raw_sigp(stap(), sigp_stop) == sigp_busy) 113 - cpu_relax(); 114 - for (;;); 204 + lc = pcpu->lowcore; 205 + memcpy(lc, &S390_lowcore, 512); 206 + memset((char *) lc + 512, 0, sizeof(*lc) - 512); 207 + lc->async_stack = pcpu->async_stack + ASYNC_SIZE; 208 + lc->panic_stack = pcpu->panic_stack + PAGE_SIZE; 209 + lc->cpu_nr = cpu; 210 + #ifndef CONFIG_64BIT 211 + if (MACHINE_HAS_IEEE) { 212 + lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); 213 + if (!lc->extended_save_area_addr) 214 + goto out; 215 + } 216 + #else 217 + if (vdso_alloc_per_cpu(lc)) 218 + goto out; 219 + #endif 220 + lowcore_ptr[cpu] = lc; 221 + pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc); 222 + return 0; 223 + out: 224 + if (pcpu != &pcpu_devices[0]) { 225 + free_page(pcpu->panic_stack); 226 + free_pages(pcpu->async_stack, ASYNC_ORDER); 227 + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 228 + } 229 + return -ENOMEM; 115 230 } 116 231 117 - void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) 232 + static void pcpu_free_lowcore(struct pcpu *pcpu) 118 233 { 119 - struct _lowcore *lc, *current_lc; 120 - struct stack_frame *sf; 121 - struct pt_regs *regs; 122 - unsigned long sp; 234 + pcpu_sigp_retry(pcpu, sigp_set_prefix, 0); 235 + lowcore_ptr[pcpu - pcpu_devices] = NULL; 236 + #ifndef CONFIG_64BIT 237 + if (MACHINE_HAS_IEEE) { 238 + struct _lowcore *lc = pcpu->lowcore; 123 239 124 - if (smp_processor_id() == 0) 125 - func(data); 126 - __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | 127 - PSW_MASK_EA | PSW_MASK_BA); 128 - /* Disable lowcore protection */ 129 - __ctl_clear_bit(0, 28); 130 - current_lc = lowcore_ptr[smp_processor_id()]; 131 - lc = lowcore_ptr[0]; 132 - if (!lc) 133 - lc = current_lc; 134 - lc->restart_psw.mask = 135 - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 136 - lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu; 137 - if (!cpu_online(0)) 138 - smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]); 139 - while (sigp(0, sigp_stop_and_store_status) == sigp_busy) 140 - cpu_relax(); 141 - sp = lc->panic_stack; 142 - sp -= sizeof(struct pt_regs); 143 - regs = (struct pt_regs *) sp; 144 - memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs)); 145 - regs->psw = current_lc->psw_save_area; 146 - sp -= STACK_FRAME_OVERHEAD; 147 - sf = (struct stack_frame *) sp; 148 - sf->back_chain = 0; 149 - smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); 240 + free_page((unsigned long) lc->extended_save_area_addr); 241 + lc->extended_save_area_addr = 0; 242 + } 243 + #else 244 + vdso_free_per_cpu(pcpu->lowcore); 245 + #endif 246 + if (pcpu != &pcpu_devices[0]) { 247 + free_page(pcpu->panic_stack); 248 + free_pages(pcpu->async_stack, ASYNC_ORDER); 249 + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 250 + } 150 251 } 151 252 152 - static void smp_stop_cpu(void) 253 + static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 153 254 { 154 - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 155 - cpu_relax(); 255 + struct _lowcore *lc = pcpu->lowcore; 256 + 257 + atomic_inc(&init_mm.context.attach_count); 258 + lc->cpu_nr = cpu; 259 + lc->percpu_offset = __per_cpu_offset[cpu]; 260 + lc->kernel_asce = S390_lowcore.kernel_asce; 261 + lc->machine_flags = S390_lowcore.machine_flags; 262 + lc->ftrace_func = S390_lowcore.ftrace_func; 263 + lc->user_timer = lc->system_timer = lc->steal_timer = 0; 264 + __ctl_store(lc->cregs_save_area, 0, 15); 265 + save_access_regs((unsigned int *) lc->access_regs_save_area); 266 + memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 267 + MAX_FACILITY_BIT/8); 156 268 } 157 269 270 + static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 271 + { 272 + struct _lowcore *lc = pcpu->lowcore; 273 + struct thread_info *ti = task_thread_info(tsk); 274 + 275 + lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE; 276 + lc->thread_info = (unsigned long) task_thread_info(tsk); 277 + lc->current_task = (unsigned long) tsk; 278 + lc->user_timer = ti->user_timer; 279 + lc->system_timer = ti->system_timer; 280 + lc->steal_timer = 0; 281 + } 282 + 283 + static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) 284 + { 285 + struct _lowcore *lc = pcpu->lowcore; 286 + 287 + lc->restart_stack = lc->kernel_stack; 288 + lc->restart_fn = (unsigned long) func; 289 + lc->restart_data = (unsigned long) data; 290 + lc->restart_source = -1UL; 291 + pcpu_sigp_retry(pcpu, sigp_restart, 0); 292 + } 293 + 294 + /* 295 + * Call function via PSW restart on pcpu and stop the current cpu. 296 + */ 297 + static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), 298 + void *data, unsigned long stack) 299 + { 300 + struct _lowcore *lc = pcpu->lowcore; 301 + unsigned short this_cpu; 302 + 303 + __load_psw_mask(psw_kernel_bits); 304 + this_cpu = stap(); 305 + if (pcpu->address == this_cpu) 306 + func(data); /* should not return */ 307 + /* Stop target cpu (if func returns this stops the current cpu). */ 308 + pcpu_sigp_retry(pcpu, sigp_stop, 0); 309 + /* Restart func on the target cpu and stop the current cpu. */ 310 + lc->restart_stack = stack; 311 + lc->restart_fn = (unsigned long) func; 312 + lc->restart_data = (unsigned long) data; 313 + lc->restart_source = (unsigned long) this_cpu; 314 + asm volatile( 315 + "0: sigp 0,%0,6 # sigp restart to target cpu\n" 316 + " brc 2,0b # busy, try again\n" 317 + "1: sigp 0,%1,5 # sigp stop to current cpu\n" 318 + " brc 2,1b # busy, try again\n" 319 + : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc"); 320 + for (;;) ; 321 + } 322 + 323 + /* 324 + * Call function on an online CPU. 325 + */ 326 + void smp_call_online_cpu(void (*func)(void *), void *data) 327 + { 328 + struct pcpu *pcpu; 329 + 330 + /* Use the current cpu if it is online. */ 331 + pcpu = pcpu_find_address(cpu_online_mask, stap()); 332 + if (!pcpu) 333 + /* Use the first online cpu. */ 334 + pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 335 + pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 336 + } 337 + 338 + /* 339 + * Call function on the ipl CPU. 340 + */ 341 + void smp_call_ipl_cpu(void (*func)(void *), void *data) 342 + { 343 + pcpu_delegate(&pcpu_devices[0], func, data, 344 + pcpu_devices->panic_stack + PAGE_SIZE); 345 + } 346 + 347 + int smp_find_processor_id(u16 address) 348 + { 349 + int cpu; 350 + 351 + for_each_present_cpu(cpu) 352 + if (pcpu_devices[cpu].address == address) 353 + return cpu; 354 + return -1; 355 + } 356 + 357 + int smp_vcpu_scheduled(int cpu) 358 + { 359 + return pcpu_running(pcpu_devices + cpu); 360 + } 361 + 362 + void smp_yield(void) 363 + { 364 + if (MACHINE_HAS_DIAG44) 365 + asm volatile("diag 0,0,0x44"); 366 + } 367 + 368 + void smp_yield_cpu(int cpu) 369 + { 370 + if (MACHINE_HAS_DIAG9C) 371 + asm volatile("diag %0,0,0x9c" 372 + : : "d" (pcpu_devices[cpu].address)); 373 + else if (MACHINE_HAS_DIAG44) 374 + asm volatile("diag 0,0,0x44"); 375 + } 376 + 377 + /* 378 + * Send cpus emergency shutdown signal. This gives the cpus the 379 + * opportunity to complete outstanding interrupts. 380 + */ 381 + void smp_emergency_stop(cpumask_t *cpumask) 382 + { 383 + u64 end; 384 + int cpu; 385 + 386 + end = get_clock() + (1000000UL << 12); 387 + for_each_cpu(cpu, cpumask) { 388 + struct pcpu *pcpu = pcpu_devices + cpu; 389 + set_bit(ec_stop_cpu, &pcpu->ec_mask); 390 + while (__pcpu_sigp(pcpu->address, sigp_emergency_signal, 391 + 0, NULL) == sigp_busy && 392 + get_clock() < end) 393 + cpu_relax(); 394 + } 395 + while (get_clock() < end) { 396 + for_each_cpu(cpu, cpumask) 397 + if (pcpu_stopped(pcpu_devices + cpu)) 398 + cpumask_clear_cpu(cpu, cpumask); 399 + if (cpumask_empty(cpumask)) 400 + break; 401 + cpu_relax(); 402 + } 403 + } 404 + 405 + /* 406 + * Stop all cpus but the current one. 407 + */ 158 408 void smp_send_stop(void) 159 409 { 160 410 cpumask_t cpumask; 161 411 int cpu; 162 - u64 end; 163 412 164 413 /* Disable all interrupts/machine checks */ 165 414 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 166 415 trace_hardirqs_off(); 167 416 417 + debug_set_critical(); 168 418 cpumask_copy(&cpumask, cpu_online_mask); 169 419 cpumask_clear_cpu(smp_processor_id(), &cpumask); 170 420 171 - if (oops_in_progress) { 172 - /* 173 - * Give the other cpus the opportunity to complete 174 - * outstanding interrupts before stopping them. 175 - */ 176 - end = get_clock() + (1000000UL << 12); 177 - for_each_cpu(cpu, &cpumask) { 178 - set_bit(ec_stop_cpu, (unsigned long *) 179 - &lowcore_ptr[cpu]->ext_call_fast); 180 - while (sigp(cpu, sigp_emergency_signal) == sigp_busy && 181 - get_clock() < end) 182 - cpu_relax(); 183 - } 184 - while (get_clock() < end) { 185 - for_each_cpu(cpu, &cpumask) 186 - if (cpu_stopped(cpu)) 187 - cpumask_clear_cpu(cpu, &cpumask); 188 - if (cpumask_empty(&cpumask)) 189 - break; 190 - cpu_relax(); 191 - } 192 - } 421 + if (oops_in_progress) 422 + smp_emergency_stop(&cpumask); 193 423 194 424 /* stop all processors */ 195 425 for_each_cpu(cpu, &cpumask) { 196 - while (sigp(cpu, sigp_stop) == sigp_busy) 197 - cpu_relax(); 198 - while (!cpu_stopped(cpu)) 426 + struct pcpu *pcpu = pcpu_devices + cpu; 427 + pcpu_sigp_retry(pcpu, sigp_stop, 0); 428 + while (!pcpu_stopped(pcpu)) 199 429 cpu_relax(); 200 430 } 431 + } 432 + 433 + /* 434 + * Stop the current cpu. 435 + */ 436 + void smp_stop_cpu(void) 437 + { 438 + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); 439 + for (;;) ; 201 440 } 202 441 203 442 /* 204 443 * This is the main routine where commands issued by other 205 444 * cpus are handled. 206 445 */ 207 - 208 - static void do_ext_call_interrupt(unsigned int ext_int_code, 446 + static void do_ext_call_interrupt(struct ext_code ext_code, 209 447 unsigned int param32, unsigned long param64) 210 448 { 211 449 unsigned long bits; 450 + int cpu; 212 451 213 - if ((ext_int_code & 0xffff) == 0x1202) 214 - kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; 452 + cpu = smp_processor_id(); 453 + if (ext_code.code == 0x1202) 454 + kstat_cpu(cpu).irqs[EXTINT_EXC]++; 215 455 else 216 - kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; 456 + kstat_cpu(cpu).irqs[EXTINT_EMS]++; 217 457 /* 218 458 * handle bit signal external calls 219 459 */ 220 - bits = xchg(&S390_lowcore.ext_call_fast, 0); 460 + bits = xchg(&pcpu_devices[cpu].ec_mask, 0); 221 461 222 462 if (test_bit(ec_stop_cpu, &bits)) 223 463 smp_stop_cpu(); ··· 468 238 469 239 } 470 240 471 - /* 472 - * Send an external call sigp to another cpu and return without waiting 473 - * for its completion. 474 - */ 475 - static void smp_ext_bitcall(int cpu, int sig) 476 - { 477 - int order; 478 - 479 - /* 480 - * Set signaling bit in lowcore of target cpu and kick it 481 - */ 482 - set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 483 - while (1) { 484 - order = smp_vcpu_scheduled(cpu) ? 485 - sigp_external_call : sigp_emergency_signal; 486 - if (sigp(cpu, order) != sigp_busy) 487 - break; 488 - udelay(10); 489 - } 490 - } 491 - 492 241 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 493 242 { 494 243 int cpu; 495 244 496 245 for_each_cpu(cpu, mask) 497 - smp_ext_bitcall(cpu, ec_call_function); 246 + pcpu_ec_call(pcpu_devices + cpu, ec_call_function); 498 247 } 499 248 500 249 void arch_send_call_function_single_ipi(int cpu) 501 250 { 502 - smp_ext_bitcall(cpu, ec_call_function_single); 251 + pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 503 252 } 504 253 505 254 #ifndef CONFIG_64BIT ··· 504 295 */ 505 296 void smp_send_reschedule(int cpu) 506 297 { 507 - smp_ext_bitcall(cpu, ec_schedule); 298 + pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 508 299 } 509 300 510 301 /* 511 302 * parameter area for the set/clear control bit callbacks 512 303 */ 513 304 struct ec_creg_mask_parms { 514 - unsigned long orvals[16]; 515 - unsigned long andvals[16]; 305 + unsigned long orval; 306 + unsigned long andval; 307 + int cr; 516 308 }; 517 309 518 310 /* ··· 523 313 { 524 314 struct ec_creg_mask_parms *pp = info; 525 315 unsigned long cregs[16]; 526 - int i; 527 316 528 317 __ctl_store(cregs, 0, 15); 529 - for (i = 0; i <= 15; i++) 530 - cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 318 + cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 531 319 __ctl_load(cregs, 0, 15); 532 320 } 533 321 ··· 534 326 */ 535 327 void smp_ctl_set_bit(int cr, int bit) 536 328 { 537 - struct ec_creg_mask_parms parms; 329 + struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 538 330 539 - memset(&parms.orvals, 0, sizeof(parms.orvals)); 540 - memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 541 - parms.orvals[cr] = 1UL << bit; 542 331 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 543 332 } 544 333 EXPORT_SYMBOL(smp_ctl_set_bit); ··· 545 340 */ 546 341 void smp_ctl_clear_bit(int cr, int bit) 547 342 { 548 - struct ec_creg_mask_parms parms; 343 + struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 549 344 550 - memset(&parms.orvals, 0, sizeof(parms.orvals)); 551 - memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 552 - parms.andvals[cr] = ~(1UL << bit); 553 345 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 554 346 } 555 347 EXPORT_SYMBOL(smp_ctl_clear_bit); 556 348 557 349 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP) 558 350 559 - static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 560 - { 561 - if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) 562 - return; 563 - if (is_kdump_kernel()) 564 - return; 565 - if (cpu >= NR_CPUS) { 566 - pr_warning("CPU %i exceeds the maximum %i and is excluded from " 567 - "the dump\n", cpu, NR_CPUS - 1); 568 - return; 569 - } 570 - zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL); 571 - while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy) 572 - cpu_relax(); 573 - memcpy_real(zfcpdump_save_areas[cpu], 574 - (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 575 - sizeof(struct save_area)); 576 - } 577 - 578 351 struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; 579 352 EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 580 353 581 - #else 582 - 583 - static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 584 - 585 - #endif /* CONFIG_ZFCPDUMP */ 586 - 587 - static int cpu_known(int cpu_id) 354 + static void __init smp_get_save_area(int cpu, u16 address) 588 355 { 589 - int cpu; 356 + void *lc = pcpu_devices[0].lowcore; 357 + struct save_area *save_area; 590 358 591 - for_each_present_cpu(cpu) { 592 - if (__cpu_logical_map[cpu] == cpu_id) 593 - return 1; 359 + if (is_kdump_kernel()) 360 + return; 361 + if (!OLDMEM_BASE && (address == boot_cpu_address || 362 + ipl_info.type != IPL_TYPE_FCP_DUMP)) 363 + return; 364 + if (cpu >= NR_CPUS) { 365 + pr_warning("CPU %i exceeds the maximum %i and is excluded " 366 + "from the dump\n", cpu, NR_CPUS - 1); 367 + return; 594 368 } 369 + save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL); 370 + if (!save_area) 371 + panic("could not allocate memory for save area\n"); 372 + zfcpdump_save_areas[cpu] = save_area; 373 + #ifdef CONFIG_CRASH_DUMP 374 + if (address == boot_cpu_address) { 375 + /* Copy the registers of the boot cpu. */ 376 + copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), 377 + SAVE_AREA_BASE - PAGE_SIZE, 0); 378 + return; 379 + } 380 + #endif 381 + /* Get the registers of a non-boot cpu. */ 382 + __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL); 383 + memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area)); 384 + } 385 + 386 + int smp_store_status(int cpu) 387 + { 388 + struct pcpu *pcpu; 389 + 390 + pcpu = pcpu_devices + cpu; 391 + if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status, 392 + 0, NULL) != sigp_order_code_accepted) 393 + return -EIO; 595 394 return 0; 596 395 } 597 396 598 - static int smp_rescan_cpus_sigp(cpumask_t avail) 599 - { 600 - int cpu_id, logical_cpu; 397 + #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 601 398 602 - logical_cpu = cpumask_first(&avail); 603 - if (logical_cpu >= nr_cpu_ids) 604 - return 0; 605 - for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { 606 - if (cpu_known(cpu_id)) 607 - continue; 608 - __cpu_logical_map[logical_cpu] = cpu_id; 609 - cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); 610 - if (!cpu_stopped(logical_cpu)) 611 - continue; 612 - set_cpu_present(logical_cpu, true); 613 - smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 614 - logical_cpu = cpumask_next(logical_cpu, &avail); 615 - if (logical_cpu >= nr_cpu_ids) 616 - break; 617 - } 618 - return 0; 619 - } 399 + static inline void smp_get_save_area(int cpu, u16 address) { } 620 400 621 - static int smp_rescan_cpus_sclp(cpumask_t avail) 401 + #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */ 402 + 403 + static struct sclp_cpu_info *smp_get_cpu_info(void) 622 404 { 405 + static int use_sigp_detection; 623 406 struct sclp_cpu_info *info; 624 - int cpu_id, logical_cpu, cpu; 625 - int rc; 407 + int address; 626 408 627 - logical_cpu = cpumask_first(&avail); 628 - if (logical_cpu >= nr_cpu_ids) 629 - return 0; 630 - info = kmalloc(sizeof(*info), GFP_KERNEL); 631 - if (!info) 632 - return -ENOMEM; 633 - rc = sclp_get_cpu_info(info); 634 - if (rc) 635 - goto out; 636 - for (cpu = 0; cpu < info->combined; cpu++) { 637 - if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 638 - continue; 639 - cpu_id = info->cpu[cpu].address; 640 - if (cpu_known(cpu_id)) 641 - continue; 642 - __cpu_logical_map[logical_cpu] = cpu_id; 643 - cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); 644 - set_cpu_present(logical_cpu, true); 645 - if (cpu >= info->configured) 646 - smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 647 - else 648 - smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 649 - logical_cpu = cpumask_next(logical_cpu, &avail); 650 - if (logical_cpu >= nr_cpu_ids) 651 - break; 409 + info = kzalloc(sizeof(*info), GFP_KERNEL); 410 + if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { 411 + use_sigp_detection = 1; 412 + for (address = 0; address <= MAX_CPU_ADDRESS; address++) { 413 + if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) == 414 + sigp_not_operational) 415 + continue; 416 + info->cpu[info->configured].address = address; 417 + info->configured++; 418 + } 419 + info->combined = info->configured; 652 420 } 653 - out: 654 - kfree(info); 655 - return rc; 421 + return info; 656 422 } 657 423 658 - static int __smp_rescan_cpus(void) 659 - { 660 - cpumask_t avail; 424 + static int __devinit smp_add_present_cpu(int cpu); 661 425 426 + static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info, 427 + int sysfs_add) 428 + { 429 + struct pcpu *pcpu; 430 + cpumask_t avail; 431 + int cpu, nr, i; 432 + 433 + nr = 0; 662 434 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 663 - if (smp_use_sigp_detection) 664 - return smp_rescan_cpus_sigp(avail); 665 - else 666 - return smp_rescan_cpus_sclp(avail); 435 + cpu = cpumask_first(&avail); 436 + for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 437 + if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) 438 + continue; 439 + if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) 440 + continue; 441 + pcpu = pcpu_devices + cpu; 442 + pcpu->address = info->cpu[i].address; 443 + pcpu->state = (cpu >= info->configured) ? 444 + CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 445 + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 446 + set_cpu_present(cpu, true); 447 + if (sysfs_add && smp_add_present_cpu(cpu) != 0) 448 + set_cpu_present(cpu, false); 449 + else 450 + nr++; 451 + cpu = cpumask_next(cpu, &avail); 452 + } 453 + return nr; 667 454 } 668 455 669 456 static void __init smp_detect_cpus(void) 670 457 { 671 458 unsigned int cpu, c_cpus, s_cpus; 672 459 struct sclp_cpu_info *info; 673 - u16 boot_cpu_addr, cpu_addr; 674 460 675 - c_cpus = 1; 676 - s_cpus = 0; 677 - boot_cpu_addr = __cpu_logical_map[0]; 678 - info = kmalloc(sizeof(*info), GFP_KERNEL); 461 + info = smp_get_cpu_info(); 679 462 if (!info) 680 463 panic("smp_detect_cpus failed to allocate memory\n"); 681 - #ifdef CONFIG_CRASH_DUMP 682 - if (OLDMEM_BASE && !is_kdump_kernel()) { 683 - struct save_area *save_area; 684 - 685 - save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); 686 - if (!save_area) 687 - panic("could not allocate memory for save area\n"); 688 - copy_oldmem_page(1, (void *) save_area, sizeof(*save_area), 689 - 0x200, 0); 690 - zfcpdump_save_areas[0] = save_area; 691 - } 692 - #endif 693 - /* Use sigp detection algorithm if sclp doesn't work. */ 694 - if (sclp_get_cpu_info(info)) { 695 - smp_use_sigp_detection = 1; 696 - for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { 697 - if (cpu == boot_cpu_addr) 698 - continue; 699 - if (!raw_cpu_stopped(cpu)) 700 - continue; 701 - smp_get_save_area(c_cpus, cpu); 702 - c_cpus++; 703 - } 704 - goto out; 705 - } 706 - 707 464 if (info->has_cpu_type) { 708 465 for (cpu = 0; cpu < info->combined; cpu++) { 709 - if (info->cpu[cpu].address == boot_cpu_addr) { 710 - smp_cpu_type = info->cpu[cpu].type; 711 - break; 712 - } 466 + if (info->cpu[cpu].address != boot_cpu_address) 467 + continue; 468 + /* The boot cpu dictates the cpu type. */ 469 + boot_cpu_type = info->cpu[cpu].type; 470 + break; 713 471 } 714 472 } 715 - 473 + c_cpus = s_cpus = 0; 716 474 for (cpu = 0; cpu < info->combined; cpu++) { 717 - if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type) 475 + if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) 718 476 continue; 719 - cpu_addr = info->cpu[cpu].address; 720 - if (cpu_addr == boot_cpu_addr) 721 - continue; 722 - if (!raw_cpu_stopped(cpu_addr)) { 477 + if (cpu < info->configured) { 478 + smp_get_save_area(c_cpus, info->cpu[cpu].address); 479 + c_cpus++; 480 + } else 723 481 s_cpus++; 724 - continue; 725 - } 726 - smp_get_save_area(c_cpus, cpu_addr); 727 - c_cpus++; 728 482 } 729 - out: 730 - kfree(info); 731 483 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 732 484 get_online_cpus(); 733 - __smp_rescan_cpus(); 485 + __smp_rescan_cpus(info, 0); 734 486 put_online_cpus(); 487 + kfree(info); 735 488 } 736 489 737 490 /* 738 491 * Activate a secondary processor. 739 492 */ 740 - int __cpuinit start_secondary(void *cpuvoid) 493 + static void __cpuinit smp_start_secondary(void *cpuvoid) 741 494 { 495 + S390_lowcore.last_update_clock = get_clock(); 496 + S390_lowcore.restart_stack = (unsigned long) restart_stack; 497 + S390_lowcore.restart_fn = (unsigned long) do_restart; 498 + S390_lowcore.restart_data = 0; 499 + S390_lowcore.restart_source = -1UL; 500 + restore_access_regs(S390_lowcore.access_regs_save_area); 501 + __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 502 + __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); 742 503 cpu_init(); 743 504 preempt_disable(); 744 505 init_cpu_timer(); 745 506 init_cpu_vtimer(); 746 507 pfault_init(); 747 - 748 508 notify_cpu_starting(smp_processor_id()); 749 509 ipi_call_lock(); 750 510 set_cpu_online(smp_processor_id(), true); 751 511 ipi_call_unlock(); 752 - __ctl_clear_bit(0, 28); /* Disable lowcore protection */ 753 - S390_lowcore.restart_psw.mask = 754 - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 755 - S390_lowcore.restart_psw.addr = 756 - PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; 757 - __ctl_set_bit(0, 28); /* Enable lowcore protection */ 758 512 local_irq_enable(); 759 513 /* cpu_idle will call schedule for us */ 760 514 cpu_idle(); 761 - return 0; 762 515 } 763 516 764 517 struct create_idle { ··· 735 572 complete(&c_idle->done); 736 573 } 737 574 738 - static int __cpuinit smp_alloc_lowcore(int cpu) 739 - { 740 - unsigned long async_stack, panic_stack; 741 - struct _lowcore *lowcore; 742 - 743 - lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 744 - if (!lowcore) 745 - return -ENOMEM; 746 - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 747 - panic_stack = __get_free_page(GFP_KERNEL); 748 - if (!panic_stack || !async_stack) 749 - goto out; 750 - memcpy(lowcore, &S390_lowcore, 512); 751 - memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); 752 - lowcore->async_stack = async_stack + ASYNC_SIZE; 753 - lowcore->panic_stack = panic_stack + PAGE_SIZE; 754 - lowcore->restart_psw.mask = 755 - PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; 756 - lowcore->restart_psw.addr = 757 - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 758 - if (user_mode != HOME_SPACE_MODE) 759 - lowcore->restart_psw.mask |= PSW_ASC_HOME; 760 - #ifndef CONFIG_64BIT 761 - if (MACHINE_HAS_IEEE) { 762 - unsigned long save_area; 763 - 764 - save_area = get_zeroed_page(GFP_KERNEL); 765 - if (!save_area) 766 - goto out; 767 - lowcore->extended_save_area_addr = (u32) save_area; 768 - } 769 - #else 770 - if (vdso_alloc_per_cpu(cpu, lowcore)) 771 - goto out; 772 - #endif 773 - lowcore_ptr[cpu] = lowcore; 774 - return 0; 775 - 776 - out: 777 - free_page(panic_stack); 778 - free_pages(async_stack, ASYNC_ORDER); 779 - free_pages((unsigned long) lowcore, LC_ORDER); 780 - return -ENOMEM; 781 - } 782 - 783 - static void smp_free_lowcore(int cpu) 784 - { 785 - struct _lowcore *lowcore; 786 - 787 - lowcore = lowcore_ptr[cpu]; 788 - #ifndef CONFIG_64BIT 789 - if (MACHINE_HAS_IEEE) 790 - free_page((unsigned long) lowcore->extended_save_area_addr); 791 - #else 792 - vdso_free_per_cpu(cpu, lowcore); 793 - #endif 794 - free_page(lowcore->panic_stack - PAGE_SIZE); 795 - free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER); 796 - free_pages((unsigned long) lowcore, LC_ORDER); 797 - lowcore_ptr[cpu] = NULL; 798 - } 799 - 800 575 /* Upping and downing of CPUs */ 801 576 int __cpuinit __cpu_up(unsigned int cpu) 802 577 { 803 - struct _lowcore *cpu_lowcore; 804 578 struct create_idle c_idle; 805 - struct task_struct *idle; 806 - struct stack_frame *sf; 807 - u32 lowcore; 808 - int ccode; 579 + struct pcpu *pcpu; 580 + int rc; 809 581 810 - if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 582 + pcpu = pcpu_devices + cpu; 583 + if (pcpu->state != CPU_STATE_CONFIGURED) 811 584 return -EIO; 812 - idle = current_set[cpu]; 813 - if (!idle) { 585 + if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) != 586 + sigp_order_code_accepted) 587 + return -EIO; 588 + if (!pcpu->idle) { 814 589 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done); 815 590 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle); 816 591 c_idle.cpu = cpu; ··· 756 655 wait_for_completion(&c_idle.done); 757 656 if (IS_ERR(c_idle.idle)) 758 657 return PTR_ERR(c_idle.idle); 759 - idle = c_idle.idle; 760 - current_set[cpu] = c_idle.idle; 658 + pcpu->idle = c_idle.idle; 761 659 } 762 - init_idle(idle, cpu); 763 - if (smp_alloc_lowcore(cpu)) 764 - return -ENOMEM; 765 - do { 766 - ccode = sigp(cpu, sigp_initial_cpu_reset); 767 - if (ccode == sigp_busy) 768 - udelay(10); 769 - if (ccode == sigp_not_operational) 770 - goto err_out; 771 - } while (ccode == sigp_busy); 772 - 773 - lowcore = (u32)(unsigned long)lowcore_ptr[cpu]; 774 - while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy) 775 - udelay(10); 776 - 777 - cpu_lowcore = lowcore_ptr[cpu]; 778 - cpu_lowcore->kernel_stack = (unsigned long) 779 - task_stack_page(idle) + THREAD_SIZE; 780 - cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle); 781 - sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 782 - - sizeof(struct pt_regs) 783 - - sizeof(struct stack_frame)); 784 - memset(sf, 0, sizeof(struct stack_frame)); 785 - sf->gprs[9] = (unsigned long) sf; 786 - cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf; 787 - __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); 788 - atomic_inc(&init_mm.context.attach_count); 789 - asm volatile( 790 - " stam 0,15,0(%0)" 791 - : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 792 - cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 793 - cpu_lowcore->current_task = (unsigned long) idle; 794 - cpu_lowcore->cpu_nr = cpu; 795 - cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 796 - cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 797 - cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 798 - memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list, 799 - MAX_FACILITY_BIT/8); 800 - eieio(); 801 - 802 - while (sigp(cpu, sigp_restart) == sigp_busy) 803 - udelay(10); 804 - 660 + init_idle(pcpu->idle, cpu); 661 + rc = pcpu_alloc_lowcore(pcpu, cpu); 662 + if (rc) 663 + return rc; 664 + pcpu_prepare_secondary(pcpu, cpu); 665 + pcpu_attach_task(pcpu, pcpu->idle); 666 + pcpu_start_fn(pcpu, smp_start_secondary, NULL); 805 667 while (!cpu_online(cpu)) 806 668 cpu_relax(); 807 669 return 0; 808 - 809 - err_out: 810 - smp_free_lowcore(cpu); 811 - return -EIO; 812 670 } 813 671 814 672 static int __init setup_possible_cpus(char *s) 815 673 { 816 - int pcpus, cpu; 674 + int max, cpu; 817 675 818 - pcpus = simple_strtoul(s, NULL, 0); 676 + if (kstrtoint(s, 0, &max) < 0) 677 + return 0; 819 678 init_cpu_possible(cpumask_of(0)); 820 - for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++) 679 + for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) 821 680 set_cpu_possible(cpu, true); 822 681 return 0; 823 682 } ··· 787 726 788 727 int __cpu_disable(void) 789 728 { 790 - struct ec_creg_mask_parms cr_parms; 791 - int cpu = smp_processor_id(); 729 + unsigned long cregs[16]; 792 730 793 - set_cpu_online(cpu, false); 794 - 795 - /* Disable pfault pseudo page faults on this cpu. */ 731 + set_cpu_online(smp_processor_id(), false); 732 + /* Disable pseudo page faults on this cpu. */ 796 733 pfault_fini(); 797 - 798 - memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 799 - memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 800 - 801 - /* disable all external interrupts */ 802 - cr_parms.orvals[0] = 0; 803 - cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | 804 - 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 | 805 - 1 << 4); 806 - /* disable all I/O interrupts */ 807 - cr_parms.orvals[6] = 0; 808 - cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 809 - 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 810 - /* disable most machine checks */ 811 - cr_parms.orvals[14] = 0; 812 - cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 813 - 1 << 25 | 1 << 24); 814 - 815 - smp_ctl_bit_callback(&cr_parms); 816 - 734 + /* Disable interrupt sources via control register. */ 735 + __ctl_store(cregs, 0, 15); 736 + cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 737 + cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 738 + cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 739 + __ctl_load(cregs, 0, 15); 817 740 return 0; 818 741 } 819 742 820 743 void __cpu_die(unsigned int cpu) 821 744 { 745 + struct pcpu *pcpu; 746 + 822 747 /* Wait until target cpu is down */ 823 - while (!cpu_stopped(cpu)) 748 + pcpu = pcpu_devices + cpu; 749 + while (!pcpu_stopped(pcpu)) 824 750 cpu_relax(); 825 - while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy) 826 - udelay(10); 827 - smp_free_lowcore(cpu); 751 + pcpu_free_lowcore(pcpu); 828 752 atomic_dec(&init_mm.context.attach_count); 829 753 } 830 754 831 755 void __noreturn cpu_die(void) 832 756 { 833 757 idle_task_exit(); 834 - while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) 835 - cpu_relax(); 836 - for (;;); 758 + pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0); 759 + for (;;) ; 837 760 } 838 761 839 762 #endif /* CONFIG_HOTPLUG_CPU */ 840 763 764 + static void smp_call_os_info_init_fn(void) 765 + { 766 + int (*init_fn)(void); 767 + unsigned long size; 768 + 769 + init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size); 770 + if (!init_fn) 771 + return; 772 + init_fn(); 773 + } 774 + 841 775 void __init smp_prepare_cpus(unsigned int max_cpus) 842 776 { 843 - #ifndef CONFIG_64BIT 844 - unsigned long save_area = 0; 845 - #endif 846 - unsigned long async_stack, panic_stack; 847 - struct _lowcore *lowcore; 848 - 849 - smp_detect_cpus(); 850 - 851 777 /* request the 0x1201 emergency signal external interrupt */ 852 778 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 853 779 panic("Couldn't request external interrupt 0x1201"); 854 780 /* request the 0x1202 external call external interrupt */ 855 781 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) 856 782 panic("Couldn't request external interrupt 0x1202"); 857 - 858 - /* Reallocate current lowcore, but keep its contents. */ 859 - lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 860 - panic_stack = __get_free_page(GFP_KERNEL); 861 - async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 862 - BUG_ON(!lowcore || !panic_stack || !async_stack); 863 - #ifndef CONFIG_64BIT 864 - if (MACHINE_HAS_IEEE) 865 - save_area = get_zeroed_page(GFP_KERNEL); 866 - #endif 867 - local_irq_disable(); 868 - local_mcck_disable(); 869 - lowcore_ptr[smp_processor_id()] = lowcore; 870 - *lowcore = S390_lowcore; 871 - lowcore->panic_stack = panic_stack + PAGE_SIZE; 872 - lowcore->async_stack = async_stack + ASYNC_SIZE; 873 - #ifndef CONFIG_64BIT 874 - if (MACHINE_HAS_IEEE) 875 - lowcore->extended_save_area_addr = (u32) save_area; 876 - #endif 877 - set_prefix((u32)(unsigned long) lowcore); 878 - local_mcck_enable(); 879 - local_irq_enable(); 880 - #ifdef CONFIG_64BIT 881 - if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore)) 882 - BUG(); 883 - #endif 783 + smp_call_os_info_init_fn(); 784 + smp_detect_cpus(); 884 785 } 885 786 886 787 void __init smp_prepare_boot_cpu(void) 887 788 { 888 - BUG_ON(smp_processor_id() != 0); 789 + struct pcpu *pcpu = pcpu_devices; 889 790 890 - current_thread_info()->cpu = 0; 791 + boot_cpu_address = stap(); 792 + pcpu->idle = current; 793 + pcpu->state = CPU_STATE_CONFIGURED; 794 + pcpu->address = boot_cpu_address; 795 + pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 796 + pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE; 797 + pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE; 798 + S390_lowcore.percpu_offset = __per_cpu_offset[0]; 799 + cpu_set_polarization(0, POLARIZATION_UNKNOWN); 891 800 set_cpu_present(0, true); 892 801 set_cpu_online(0, true); 893 - S390_lowcore.percpu_offset = __per_cpu_offset[0]; 894 - current_set[0] = current; 895 - smp_cpu_state[0] = CPU_STATE_CONFIGURED; 896 - cpu_set_polarization(0, POLARIZATION_UNKNOWN); 897 802 } 898 803 899 804 void __init smp_cpus_done(unsigned int max_cpus) ··· 869 842 void __init smp_setup_processor_id(void) 870 843 { 871 844 S390_lowcore.cpu_nr = 0; 872 - __cpu_logical_map[0] = stap(); 873 845 } 874 846 875 847 /* ··· 884 858 885 859 #ifdef CONFIG_HOTPLUG_CPU 886 860 static ssize_t cpu_configure_show(struct device *dev, 887 - struct device_attribute *attr, char *buf) 861 + struct device_attribute *attr, char *buf) 888 862 { 889 863 ssize_t count; 890 864 891 865 mutex_lock(&smp_cpu_state_mutex); 892 - count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]); 866 + count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 893 867 mutex_unlock(&smp_cpu_state_mutex); 894 868 return count; 895 869 } 896 870 897 871 static ssize_t cpu_configure_store(struct device *dev, 898 - struct device_attribute *attr, 899 - const char *buf, size_t count) 872 + struct device_attribute *attr, 873 + const char *buf, size_t count) 900 874 { 901 - int cpu = dev->id; 902 - int val, rc; 875 + struct pcpu *pcpu; 876 + int cpu, val, rc; 903 877 char delim; 904 878 905 879 if (sscanf(buf, "%d %c", &val, &delim) != 1) 906 880 return -EINVAL; 907 881 if (val != 0 && val != 1) 908 882 return -EINVAL; 909 - 910 883 get_online_cpus(); 911 884 mutex_lock(&smp_cpu_state_mutex); 912 885 rc = -EBUSY; 913 886 /* disallow configuration changes of online cpus and cpu 0 */ 887 + cpu = dev->id; 914 888 if (cpu_online(cpu) || cpu == 0) 915 889 goto out; 890 + pcpu = pcpu_devices + cpu; 916 891 rc = 0; 917 892 switch (val) { 918 893 case 0: 919 - if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { 920 - rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); 921 - if (!rc) { 922 - smp_cpu_state[cpu] = CPU_STATE_STANDBY; 923 - cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 924 - topology_expect_change(); 925 - } 926 - } 894 + if (pcpu->state != CPU_STATE_CONFIGURED) 895 + break; 896 + rc = sclp_cpu_deconfigure(pcpu->address); 897 + if (rc) 898 + break; 899 + pcpu->state = CPU_STATE_STANDBY; 900 + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 901 + topology_expect_change(); 927 902 break; 928 903 case 1: 929 - if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { 930 - rc = sclp_cpu_configure(__cpu_logical_map[cpu]); 931 - if (!rc) { 932 - smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; 933 - cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 934 - topology_expect_change(); 935 - } 936 - } 904 + if (pcpu->state != CPU_STATE_STANDBY) 905 + break; 906 + rc = sclp_cpu_configure(pcpu->address); 907 + if (rc) 908 + break; 909 + pcpu->state = CPU_STATE_CONFIGURED; 910 + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 911 + topology_expect_change(); 937 912 break; 938 913 default: 939 914 break; ··· 950 923 static ssize_t show_cpu_address(struct device *dev, 951 924 struct device_attribute *attr, char *buf) 952 925 { 953 - return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 926 + return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 954 927 } 955 928 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 956 929 ··· 982 955 static ssize_t show_idle_count(struct device *dev, 983 956 struct device_attribute *attr, char *buf) 984 957 { 985 - struct s390_idle_data *idle; 958 + struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 986 959 unsigned long long idle_count; 987 960 unsigned int sequence; 988 961 989 - idle = &per_cpu(s390_idle, dev->id); 990 - repeat: 991 - sequence = idle->sequence; 992 - smp_rmb(); 993 - if (sequence & 1) 994 - goto repeat; 995 - idle_count = idle->idle_count; 996 - if (idle->idle_enter) 997 - idle_count++; 998 - smp_rmb(); 999 - if (idle->sequence != sequence) 1000 - goto repeat; 962 + do { 963 + sequence = ACCESS_ONCE(idle->sequence); 964 + idle_count = ACCESS_ONCE(idle->idle_count); 965 + if (ACCESS_ONCE(idle->idle_enter)) 966 + idle_count++; 967 + } while ((sequence & 1) || (idle->sequence != sequence)); 1001 968 return sprintf(buf, "%llu\n", idle_count); 1002 969 } 1003 970 static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); ··· 999 978 static ssize_t show_idle_time(struct device *dev, 1000 979 struct device_attribute *attr, char *buf) 1001 980 { 1002 - struct s390_idle_data *idle; 1003 - unsigned long long now, idle_time, idle_enter; 981 + struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 982 + unsigned long long now, idle_time, idle_enter, idle_exit; 1004 983 unsigned int sequence; 1005 984 1006 - idle = &per_cpu(s390_idle, dev->id); 1007 - now = get_clock(); 1008 - repeat: 1009 - sequence = idle->sequence; 1010 - smp_rmb(); 1011 - if (sequence & 1) 1012 - goto repeat; 1013 - idle_time = idle->idle_time; 1014 - idle_enter = idle->idle_enter; 1015 - if (idle_enter != 0ULL && idle_enter < now) 1016 - idle_time += now - idle_enter; 1017 - smp_rmb(); 1018 - if (idle->sequence != sequence) 1019 - goto repeat; 985 + do { 986 + now = get_clock(); 987 + sequence = ACCESS_ONCE(idle->sequence); 988 + idle_time = ACCESS_ONCE(idle->idle_time); 989 + idle_enter = ACCESS_ONCE(idle->idle_enter); 990 + idle_exit = ACCESS_ONCE(idle->idle_exit); 991 + } while ((sequence & 1) || (idle->sequence != sequence)); 992 + idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 1020 993 return sprintf(buf, "%llu\n", idle_time >> 12); 1021 994 } 1022 995 static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); ··· 1030 1015 unsigned long action, void *hcpu) 1031 1016 { 1032 1017 unsigned int cpu = (unsigned int)(long)hcpu; 1033 - struct cpu *c = &per_cpu(cpu_devices, cpu); 1018 + struct cpu *c = &pcpu_devices[cpu].cpu; 1034 1019 struct device *s = &c->dev; 1035 1020 struct s390_idle_data *idle; 1036 1021 int err = 0; ··· 1056 1041 1057 1042 static int __devinit smp_add_present_cpu(int cpu) 1058 1043 { 1059 - struct cpu *c = &per_cpu(cpu_devices, cpu); 1044 + struct cpu *c = &pcpu_devices[cpu].cpu; 1060 1045 struct device *s = &c->dev; 1061 1046 int rc; 1062 1047 ··· 1094 1079 1095 1080 int __ref smp_rescan_cpus(void) 1096 1081 { 1097 - cpumask_t newcpus; 1098 - int cpu; 1099 - int rc; 1082 + struct sclp_cpu_info *info; 1083 + int nr; 1100 1084 1085 + info = smp_get_cpu_info(); 1086 + if (!info) 1087 + return -ENOMEM; 1101 1088 get_online_cpus(); 1102 1089 mutex_lock(&smp_cpu_state_mutex); 1103 - cpumask_copy(&newcpus, cpu_present_mask); 1104 - rc = __smp_rescan_cpus(); 1105 - if (rc) 1106 - goto out; 1107 - cpumask_andnot(&newcpus, cpu_present_mask, &newcpus); 1108 - for_each_cpu(cpu, &newcpus) { 1109 - rc = smp_add_present_cpu(cpu); 1110 - if (rc) 1111 - set_cpu_present(cpu, false); 1112 - } 1113 - rc = 0; 1114 - out: 1090 + nr = __smp_rescan_cpus(info, 1); 1115 1091 mutex_unlock(&smp_cpu_state_mutex); 1116 1092 put_online_cpus(); 1117 - if (!cpumask_empty(&newcpus)) 1093 + kfree(info); 1094 + if (nr) 1118 1095 topology_schedule_update(); 1119 - return rc; 1096 + return 0; 1120 1097 } 1121 1098 1122 1099 static ssize_t __ref rescan_store(struct device *dev,
-58
arch/s390/kernel/switch_cpu.S
··· 1 - /* 2 - * 31-bit switch cpu code 3 - * 4 - * Copyright IBM Corp. 2009 5 - * 6 - */ 7 - 8 - #include <linux/linkage.h> 9 - #include <asm/asm-offsets.h> 10 - #include <asm/ptrace.h> 11 - 12 - # smp_switch_to_cpu switches to destination cpu and executes the passed function 13 - # Parameter: %r2 - function to call 14 - # %r3 - function parameter 15 - # %r4 - stack poiner 16 - # %r5 - current cpu 17 - # %r6 - destination cpu 18 - 19 - .section .text 20 - ENTRY(smp_switch_to_cpu) 21 - stm %r6,%r15,__SF_GPRS(%r15) 22 - lr %r1,%r15 23 - ahi %r15,-STACK_FRAME_OVERHEAD 24 - st %r1,__SF_BACKCHAIN(%r15) 25 - basr %r13,0 26 - 0: la %r1,.gprregs_addr-0b(%r13) 27 - l %r1,0(%r1) 28 - stm %r0,%r15,0(%r1) 29 - 1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ 30 - brc 2,1b /* busy, try again */ 31 - 2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ 32 - brc 2,2b /* busy, try again */ 33 - 3: j 3b 34 - 35 - ENTRY(smp_restart_cpu) 36 - basr %r13,0 37 - 0: la %r1,.gprregs_addr-0b(%r13) 38 - l %r1,0(%r1) 39 - lm %r0,%r15,0(%r1) 40 - 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ 41 - brc 10,1b /* busy, accepted (status 0), running */ 42 - tmll %r0,0x40 /* Test if calling CPU is stopped */ 43 - jz 1b 44 - ltr %r4,%r4 /* New stack ? */ 45 - jz 1f 46 - lr %r15,%r4 47 - 1: lr %r14,%r2 /* r14: Function to call */ 48 - lr %r2,%r3 /* r2 : Parameter for function*/ 49 - basr %r14,%r14 /* Call function */ 50 - 51 - .gprregs_addr: 52 - .long .gprregs 53 - 54 - .section .data,"aw",@progbits 55 - .gprregs: 56 - .rept 16 57 - .long 0 58 - .endr
-51
arch/s390/kernel/switch_cpu64.S
··· 1 - /* 2 - * 64-bit switch cpu code 3 - * 4 - * Copyright IBM Corp. 2009 5 - * 6 - */ 7 - 8 - #include <linux/linkage.h> 9 - #include <asm/asm-offsets.h> 10 - #include <asm/ptrace.h> 11 - 12 - # smp_switch_to_cpu switches to destination cpu and executes the passed function 13 - # Parameter: %r2 - function to call 14 - # %r3 - function parameter 15 - # %r4 - stack poiner 16 - # %r5 - current cpu 17 - # %r6 - destination cpu 18 - 19 - .section .text 20 - ENTRY(smp_switch_to_cpu) 21 - stmg %r6,%r15,__SF_GPRS(%r15) 22 - lgr %r1,%r15 23 - aghi %r15,-STACK_FRAME_OVERHEAD 24 - stg %r1,__SF_BACKCHAIN(%r15) 25 - larl %r1,.gprregs 26 - stmg %r0,%r15,0(%r1) 27 - 1: sigp %r0,%r6,__SIGP_RESTART /* start destination CPU */ 28 - brc 2,1b /* busy, try again */ 29 - 2: sigp %r0,%r5,__SIGP_STOP /* stop current CPU */ 30 - brc 2,2b /* busy, try again */ 31 - 3: j 3b 32 - 33 - ENTRY(smp_restart_cpu) 34 - larl %r1,.gprregs 35 - lmg %r0,%r15,0(%r1) 36 - 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ 37 - brc 10,1b /* busy, accepted (status 0), running */ 38 - tmll %r0,0x40 /* Test if calling CPU is stopped */ 39 - jz 1b 40 - ltgr %r4,%r4 /* New stack ? */ 41 - jz 1f 42 - lgr %r15,%r4 43 - 1: lgr %r14,%r2 /* r14: Function to call */ 44 - lgr %r2,%r3 /* r2 : Parameter for function*/ 45 - basr %r14,%r14 /* Call function */ 46 - 47 - .section .data,"aw",@progbits 48 - .gprregs: 49 - .rept 16 50 - .quad 0 51 - .endr
+11 -8
arch/s390/kernel/swsusp_asm64.S
··· 42 42 lghi %r1,0x1000 43 43 44 44 /* Save CPU address */ 45 - stap __LC_CPU_ADDRESS(%r0) 45 + stap __LC_EXT_CPU_ADDR(%r0) 46 46 47 47 /* Store registers */ 48 48 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ ··· 173 173 larl %r1,.Lresume_cpu /* Resume CPU address: r2 */ 174 174 stap 0(%r1) 175 175 llgh %r2,0(%r1) 176 - llgh %r1,__LC_CPU_ADDRESS(%r0) /* Suspend CPU address: r1 */ 176 + llgh %r1,__LC_EXT_CPU_ADDR(%r0) /* Suspend CPU address: r1 */ 177 177 cgr %r1,%r2 178 178 je restore_registers /* r1 = r2 -> nothing to do */ 179 179 larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */ 180 180 mvc __LC_RST_NEW_PSW(16,%r0),0(%r4) 181 181 3: 182 - sigp %r9,%r1,__SIGP_INITIAL_CPU_RESET 183 - brc 8,4f /* accepted */ 184 - brc 2,3b /* busy, try again */ 182 + sigp %r9,%r1,11 /* sigp initial cpu reset */ 183 + brc 8,4f /* accepted */ 184 + brc 2,3b /* busy, try again */ 185 185 186 186 /* Suspend CPU not available -> panic */ 187 187 larl %r15,init_thread_union ··· 196 196 lpsw 0(%r3) 197 197 4: 198 198 /* Switch to suspend CPU */ 199 - sigp %r9,%r1,__SIGP_RESTART /* start suspend CPU */ 199 + sigp %r9,%r1,6 /* sigp restart to suspend CPU */ 200 200 brc 2,4b /* busy, try again */ 201 201 5: 202 - sigp %r9,%r2,__SIGP_STOP /* stop resume (current) CPU */ 202 + sigp %r9,%r2,5 /* sigp stop to current resume CPU */ 203 203 brc 2,5b /* busy, try again */ 204 204 6: j 6b 205 205 ··· 207 207 larl %r1,.Lresume_cpu 208 208 llgh %r2,0(%r1) 209 209 7: 210 - sigp %r9,%r2,__SIGP_SENSE /* Wait for resume CPU */ 210 + sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */ 211 211 brc 8,7b /* accepted, status 0, still running */ 212 212 brc 2,7b /* busy, try again */ 213 213 tmll %r9,0x40 /* Test if resume CPU is stopped */ ··· 256 256 /* Make all free pages unstable */ 257 257 lghi %r2,0 258 258 brasl %r14,arch_set_page_states 259 + 260 + /* Log potential guest relocation */ 261 + brasl %r14,lgr_info_log 259 262 260 263 /* Reinitialize the channel subsystem */ 261 264 brasl %r14,channel_subsystem_reinit
+2 -2
arch/s390/kernel/time.c
··· 165 165 __ctl_set_bit(0, 4); 166 166 } 167 167 168 - static void clock_comparator_interrupt(unsigned int ext_int_code, 168 + static void clock_comparator_interrupt(struct ext_code ext_code, 169 169 unsigned int param32, 170 170 unsigned long param64) 171 171 { ··· 177 177 static void etr_timing_alert(struct etr_irq_parm *); 178 178 static void stp_timing_alert(struct stp_irq_parm *); 179 179 180 - static void timing_alert_interrupt(unsigned int ext_int_code, 180 + static void timing_alert_interrupt(struct ext_code ext_code, 181 181 unsigned int param32, unsigned long param64) 182 182 { 183 183 kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
+4 -4
arch/s390/kernel/topology.c
··· 79 79 cpu < TOPOLOGY_CPU_BITS; 80 80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1)) 81 81 { 82 - unsigned int rcpu, lcpu; 82 + unsigned int rcpu; 83 + int lcpu; 83 84 84 85 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 85 - for_each_present_cpu(lcpu) { 86 - if (cpu_logical_map(lcpu) != rcpu) 87 - continue; 86 + lcpu = smp_find_processor_id(rcpu); 87 + if (lcpu >= 0) { 88 88 cpumask_set_cpu(lcpu, &book->mask); 89 89 cpu_book_id[lcpu] = book->id; 90 90 cpumask_set_cpu(lcpu, &core->mask);
+4 -2
arch/s390/kernel/traps.c
··· 41 41 #include <asm/cpcmd.h> 42 42 #include <asm/lowcore.h> 43 43 #include <asm/debug.h> 44 + #include <asm/ipl.h> 44 45 #include "entry.h" 45 46 46 47 void (*pgm_check_table[128])(struct pt_regs *regs); ··· 145 144 for (i = 0; i < kstack_depth_to_print; i++) { 146 145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0) 147 146 break; 148 - if (i && ((i * sizeof (long) % 32) == 0)) 149 - printk("\n "); 147 + if ((i * sizeof(long) % 32) == 0) 148 + printk("%s ", i == 0 ? "" : "\n"); 150 149 printk(LONG, *stack++); 151 150 } 152 151 printk("\n"); ··· 240 239 static int die_counter; 241 240 242 241 oops_enter(); 242 + lgr_info_log(); 243 243 debug_stop_all(); 244 244 console_verbose(); 245 245 spin_lock_irq(&die_lock);
+7 -21
arch/s390/kernel/vdso.c
··· 89 89 90 90 #ifdef CONFIG_64BIT 91 91 /* 92 - * Setup per cpu vdso data page. 93 - */ 94 - static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd) 95 - { 96 - } 97 - 98 - /* 99 92 * Allocate/free per cpu vdso data. 100 93 */ 101 94 #define SEGMENT_ORDER 2 102 95 103 - int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 96 + int vdso_alloc_per_cpu(struct _lowcore *lowcore) 104 97 { 105 98 unsigned long segment_table, page_table, page_frame; 106 99 u32 *psal, *aste; ··· 132 139 aste[4] = (u32)(addr_t) psal; 133 140 lowcore->vdso_per_cpu_data = page_frame; 134 141 135 - vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame); 136 142 return 0; 137 143 138 144 out: ··· 141 149 return -ENOMEM; 142 150 } 143 151 144 - void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) 152 + void vdso_free_per_cpu(struct _lowcore *lowcore) 145 153 { 146 154 unsigned long segment_table, page_table, page_frame; 147 155 u32 *psal, *aste; ··· 160 168 free_pages(segment_table, SEGMENT_ORDER); 161 169 } 162 170 163 - static void __vdso_init_cr5(void *dummy) 171 + static void vdso_init_cr5(void) 164 172 { 165 173 unsigned long cr5; 166 174 175 + if (user_mode == HOME_SPACE_MODE || !vdso_enabled) 176 + return; 167 177 cr5 = offsetof(struct _lowcore, paste); 168 178 __ctl_load(cr5, 5, 5); 169 - } 170 - 171 - static void vdso_init_cr5(void) 172 - { 173 - if (user_mode != HOME_SPACE_MODE && vdso_enabled) 174 - on_each_cpu(__vdso_init_cr5, NULL, 1); 175 179 } 176 180 #endif /* CONFIG_64BIT */ 177 181 ··· 310 322 } 311 323 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); 312 324 vdso64_pagelist[vdso64_pages] = NULL; 313 - #ifndef CONFIG_SMP 314 - if (vdso_alloc_per_cpu(0, &S390_lowcore)) 325 + if (vdso_alloc_per_cpu(&S390_lowcore)) 315 326 BUG(); 316 - #endif 317 327 vdso_init_cr5(); 318 328 #endif /* CONFIG_64BIT */ 319 329 ··· 321 335 322 336 return 0; 323 337 } 324 - arch_initcall(vdso_init); 338 + early_initcall(vdso_init); 325 339 326 340 int in_gate_area_no_mm(unsigned long addr) 327 341 {
+39 -137
arch/s390/kernel/vtime.c
··· 26 26 #include <asm/irq_regs.h> 27 27 #include <asm/cputime.h> 28 28 #include <asm/irq.h> 29 + #include "entry.h" 29 30 30 31 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 31 32 ··· 124 123 } 125 124 EXPORT_SYMBOL_GPL(account_system_vtime); 126 125 127 - void __kprobes vtime_start_cpu(__u64 int_clock, __u64 enter_timer) 128 - { 129 - struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 130 - struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 131 - __u64 idle_time, expires; 132 - 133 - if (idle->idle_enter == 0ULL) 134 - return; 135 - 136 - /* Account time spent with enabled wait psw loaded as idle time. */ 137 - idle_time = int_clock - idle->idle_enter; 138 - account_idle_time(idle_time); 139 - S390_lowcore.steal_timer += 140 - idle->idle_enter - S390_lowcore.last_update_clock; 141 - S390_lowcore.last_update_clock = int_clock; 142 - 143 - /* Account system time spent going idle. */ 144 - S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 145 - S390_lowcore.last_update_timer = enter_timer; 146 - 147 - /* Restart vtime CPU timer */ 148 - if (vq->do_spt) { 149 - /* Program old expire value but first save progress. */ 150 - expires = vq->idle - enter_timer; 151 - expires += get_vtimer(); 152 - set_vtimer(expires); 153 - } else { 154 - /* Don't account the CPU timer delta while the cpu was idle. */ 155 - vq->elapsed -= vq->idle - enter_timer; 156 - } 157 - 158 - idle->sequence++; 159 - smp_wmb(); 160 - idle->idle_time += idle_time; 161 - idle->idle_enter = 0ULL; 162 - idle->idle_count++; 163 - smp_wmb(); 164 - idle->sequence++; 165 - } 166 - 167 126 void __kprobes vtime_stop_cpu(void) 168 127 { 169 128 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 170 129 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 171 - psw_t psw; 130 + unsigned long long idle_time; 131 + unsigned long psw_mask; 132 + 133 + trace_hardirqs_on(); 134 + /* Don't trace preempt off for idle. */ 135 + stop_critical_timings(); 172 136 173 137 /* Wait for external, I/O or machine check interrupt. */ 174 - psw.mask = psw_kernel_bits | PSW_MASK_WAIT | 175 - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 176 - 138 + psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | 139 + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 177 140 idle->nohz_delay = 0; 178 141 179 - /* Check if the CPU timer needs to be reprogrammed. */ 180 - if (vq->do_spt) { 181 - __u64 vmax = VTIMER_MAX_SLICE; 182 - /* 183 - * The inline assembly is equivalent to 184 - * vq->idle = get_cpu_timer(); 185 - * set_cpu_timer(VTIMER_MAX_SLICE); 186 - * idle->idle_enter = get_clock(); 187 - * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 188 - * PSW_MASK_DAT | PSW_MASK_IO | 189 - * PSW_MASK_EXT | PSW_MASK_MCHECK); 190 - * The difference is that the inline assembly makes sure that 191 - * the last three instruction are stpt, stck and lpsw in that 192 - * order. This is done to increase the precision. 193 - */ 194 - asm volatile( 195 - #ifndef CONFIG_64BIT 196 - " basr 1,0\n" 197 - "0: ahi 1,1f-0b\n" 198 - " st 1,4(%2)\n" 199 - #else /* CONFIG_64BIT */ 200 - " larl 1,1f\n" 201 - " stg 1,8(%2)\n" 202 - #endif /* CONFIG_64BIT */ 203 - " stpt 0(%4)\n" 204 - " spt 0(%5)\n" 205 - " stck 0(%3)\n" 206 - #ifndef CONFIG_64BIT 207 - " lpsw 0(%2)\n" 208 - #else /* CONFIG_64BIT */ 209 - " lpswe 0(%2)\n" 210 - #endif /* CONFIG_64BIT */ 211 - "1:" 212 - : "=m" (idle->idle_enter), "=m" (vq->idle) 213 - : "a" (&psw), "a" (&idle->idle_enter), 214 - "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw) 215 - : "memory", "cc", "1"); 216 - } else { 217 - /* 218 - * The inline assembly is equivalent to 219 - * vq->idle = get_cpu_timer(); 220 - * idle->idle_enter = get_clock(); 221 - * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 222 - * PSW_MASK_DAT | PSW_MASK_IO | 223 - * PSW_MASK_EXT | PSW_MASK_MCHECK); 224 - * The difference is that the inline assembly makes sure that 225 - * the last three instruction are stpt, stck and lpsw in that 226 - * order. This is done to increase the precision. 227 - */ 228 - asm volatile( 229 - #ifndef CONFIG_64BIT 230 - " basr 1,0\n" 231 - "0: ahi 1,1f-0b\n" 232 - " st 1,4(%2)\n" 233 - #else /* CONFIG_64BIT */ 234 - " larl 1,1f\n" 235 - " stg 1,8(%2)\n" 236 - #endif /* CONFIG_64BIT */ 237 - " stpt 0(%4)\n" 238 - " stck 0(%3)\n" 239 - #ifndef CONFIG_64BIT 240 - " lpsw 0(%2)\n" 241 - #else /* CONFIG_64BIT */ 242 - " lpswe 0(%2)\n" 243 - #endif /* CONFIG_64BIT */ 244 - "1:" 245 - : "=m" (idle->idle_enter), "=m" (vq->idle) 246 - : "a" (&psw), "a" (&idle->idle_enter), 247 - "a" (&vq->idle), "m" (psw) 248 - : "memory", "cc", "1"); 249 - } 142 + /* Call the assembler magic in entry.S */ 143 + psw_idle(idle, vq, psw_mask, !list_empty(&vq->list)); 144 + 145 + /* Reenable preemption tracer. */ 146 + start_critical_timings(); 147 + 148 + /* Account time spent with enabled wait psw loaded as idle time. */ 149 + idle->sequence++; 150 + smp_wmb(); 151 + idle_time = idle->idle_exit - idle->idle_enter; 152 + idle->idle_time += idle_time; 153 + idle->idle_enter = idle->idle_exit = 0ULL; 154 + idle->idle_count++; 155 + account_idle_time(idle_time); 156 + smp_wmb(); 157 + idle->sequence++; 250 158 } 251 159 252 160 cputime64_t s390_get_idle_time(int cpu) 253 161 { 254 - struct s390_idle_data *idle; 255 - unsigned long long now, idle_time, idle_enter; 162 + struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 163 + unsigned long long now, idle_enter, idle_exit; 256 164 unsigned int sequence; 257 165 258 - idle = &per_cpu(s390_idle, cpu); 259 - 260 - now = get_clock(); 261 - repeat: 262 - sequence = idle->sequence; 263 - smp_rmb(); 264 - if (sequence & 1) 265 - goto repeat; 266 - idle_time = 0; 267 - idle_enter = idle->idle_enter; 268 - if (idle_enter != 0ULL && idle_enter < now) 269 - idle_time = now - idle_enter; 270 - smp_rmb(); 271 - if (idle->sequence != sequence) 272 - goto repeat; 273 - return idle_time; 166 + do { 167 + now = get_clock(); 168 + sequence = ACCESS_ONCE(idle->sequence); 169 + idle_enter = ACCESS_ONCE(idle->idle_enter); 170 + idle_exit = ACCESS_ONCE(idle->idle_exit); 171 + } while ((sequence & 1) || (idle->sequence != sequence)); 172 + return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 274 173 } 275 174 276 175 /* ··· 220 319 /* 221 320 * Handler for the virtual CPU timer. 222 321 */ 223 - static void do_cpu_timer_interrupt(unsigned int ext_int_code, 322 + static void do_cpu_timer_interrupt(struct ext_code ext_code, 224 323 unsigned int param32, unsigned long param64) 225 324 { 226 325 struct vtimer_queue *vq; ··· 247 346 } 248 347 spin_unlock(&vq->lock); 249 348 250 - vq->do_spt = list_empty(&cb_list); 251 349 do_callbacks(&cb_list); 252 350 253 351 /* next event is first in list */ ··· 255 355 if (!list_empty(&vq->list)) { 256 356 event = list_first_entry(&vq->list, struct vtimer_list, entry); 257 357 next = event->expires; 258 - } else 259 - vq->do_spt = 0; 358 + } 260 359 spin_unlock(&vq->lock); 261 360 /* 262 361 * To improve precision add the time spent by the ··· 469 570 470 571 /* enable cpu timer interrupts */ 471 572 __ctl_set_bit(0,10); 573 + 574 + /* set initial cpu timer */ 575 + set_vtimer(0x7fffffffffffffffULL); 472 576 } 473 577 474 578 static int __cpuinit s390_nohz_notify(struct notifier_block *self,
+3 -3
arch/s390/kvm/interrupt.c
··· 134 134 if (rc == -EFAULT) 135 135 exception = 1; 136 136 137 - rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code); 137 + rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->emerg.code); 138 138 if (rc == -EFAULT) 139 139 exception = 1; 140 140 ··· 156 156 if (rc == -EFAULT) 157 157 exception = 1; 158 158 159 - rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->extcall.code); 159 + rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, inti->extcall.code); 160 160 if (rc == -EFAULT) 161 161 exception = 1; 162 162 ··· 202 202 if (rc == -EFAULT) 203 203 exception = 1; 204 204 205 - rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00); 205 + rc = put_guest_u16(vcpu, __LC_EXT_CPU_ADDR, 0x0d00); 206 206 if (rc == -EFAULT) 207 207 exception = 1; 208 208
+14 -17
arch/s390/lib/delay.c
··· 13 13 #include <linux/irqflags.h> 14 14 #include <linux/interrupt.h> 15 15 #include <asm/div64.h> 16 + #include <asm/timer.h> 16 17 17 18 void __delay(unsigned long loops) 18 19 { ··· 29 28 30 29 static void __udelay_disabled(unsigned long long usecs) 31 30 { 32 - unsigned long mask, cr0, cr0_saved; 33 - u64 clock_saved; 34 - u64 end; 31 + unsigned long cr0, cr6, new; 32 + u64 clock_saved, end; 35 33 36 - mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_WAIT | 37 - PSW_MASK_EXT | PSW_MASK_MCHECK; 38 34 end = get_clock() + (usecs << 12); 39 35 clock_saved = local_tick_disable(); 40 - __ctl_store(cr0_saved, 0, 0); 41 - cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; 42 - __ctl_load(cr0 , 0, 0); 36 + __ctl_store(cr0, 0, 0); 37 + __ctl_store(cr6, 6, 6); 38 + new = (cr0 & 0xffff00e0) | 0x00000800; 39 + __ctl_load(new , 0, 0); 40 + new = 0; 41 + __ctl_load(new, 6, 6); 43 42 lockdep_off(); 44 43 do { 45 44 set_clock_comparator(end); 46 - trace_hardirqs_on(); 47 - __load_psw_mask(mask); 45 + vtime_stop_cpu(); 48 46 local_irq_disable(); 49 47 } while (get_clock() < end); 50 48 lockdep_on(); 51 - __ctl_load(cr0_saved, 0, 0); 49 + __ctl_load(cr0, 0, 0); 50 + __ctl_load(cr6, 6, 6); 52 51 local_tick_enable(clock_saved); 53 52 } 54 53 55 54 static void __udelay_enabled(unsigned long long usecs) 56 55 { 57 - unsigned long mask; 58 - u64 clock_saved; 59 - u64 end; 56 + u64 clock_saved, end; 60 57 61 - mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO; 62 58 end = get_clock() + (usecs << 12); 63 59 do { 64 60 clock_saved = 0; ··· 63 65 clock_saved = local_tick_disable(); 64 66 set_clock_comparator(end); 65 67 } 66 - trace_hardirqs_on(); 67 - __load_psw_mask(mask); 68 + vtime_stop_cpu(); 68 69 local_irq_disable(); 69 70 if (clock_saved) 70 71 local_tick_enable(clock_saved);
+8 -22
arch/s390/lib/spinlock.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/spinlock.h> 12 12 #include <linux/init.h> 13 + #include <linux/smp.h> 13 14 #include <asm/io.h> 14 15 15 16 int spin_retry = 1000; ··· 24 23 return 1; 25 24 } 26 25 __setup("spin_retry=", spin_retry_setup); 27 - 28 - static inline void _raw_yield(void) 29 - { 30 - if (MACHINE_HAS_DIAG44) 31 - asm volatile("diag 0,0,0x44"); 32 - } 33 - 34 - static inline void _raw_yield_cpu(int cpu) 35 - { 36 - if (MACHINE_HAS_DIAG9C) 37 - asm volatile("diag %0,0,0x9c" 38 - : : "d" (cpu_logical_map(cpu))); 39 - else 40 - _raw_yield(); 41 - } 42 26 43 27 void arch_spin_lock_wait(arch_spinlock_t *lp) 44 28 { ··· 46 60 } 47 61 owner = lp->owner_cpu; 48 62 if (owner) 49 - _raw_yield_cpu(~owner); 63 + smp_yield_cpu(~owner); 50 64 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 51 65 return; 52 66 } ··· 77 91 } 78 92 owner = lp->owner_cpu; 79 93 if (owner) 80 - _raw_yield_cpu(~owner); 94 + smp_yield_cpu(~owner); 81 95 local_irq_disable(); 82 96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 83 97 return; ··· 107 121 if (cpu != 0) { 108 122 if (MACHINE_IS_VM || MACHINE_IS_KVM || 109 123 !smp_vcpu_scheduled(~cpu)) 110 - _raw_yield_cpu(~cpu); 124 + smp_yield_cpu(~cpu); 111 125 } 112 126 } 113 127 EXPORT_SYMBOL(arch_spin_relax); ··· 119 133 120 134 while (1) { 121 135 if (count-- <= 0) { 122 - _raw_yield(); 136 + smp_yield(); 123 137 count = spin_retry; 124 138 } 125 139 if (!arch_read_can_lock(rw)) ··· 139 153 local_irq_restore(flags); 140 154 while (1) { 141 155 if (count-- <= 0) { 142 - _raw_yield(); 156 + smp_yield(); 143 157 count = spin_retry; 144 158 } 145 159 if (!arch_read_can_lock(rw)) ··· 174 188 175 189 while (1) { 176 190 if (count-- <= 0) { 177 - _raw_yield(); 191 + smp_yield(); 178 192 count = spin_retry; 179 193 } 180 194 if (!arch_write_can_lock(rw)) ··· 192 206 local_irq_restore(flags); 193 207 while (1) { 194 208 if (count-- <= 0) { 195 - _raw_yield(); 209 + smp_yield(); 196 210 count = spin_retry; 197 211 } 198 212 if (!arch_write_can_lock(rw))
+2 -2
arch/s390/mm/fault.c
··· 532 532 static DEFINE_SPINLOCK(pfault_lock); 533 533 static LIST_HEAD(pfault_list); 534 534 535 - static void pfault_interrupt(unsigned int ext_int_code, 535 + static void pfault_interrupt(struct ext_code ext_code, 536 536 unsigned int param32, unsigned long param64) 537 537 { 538 538 struct task_struct *tsk; ··· 545 545 * in the 'cpu address' field associated with the 546 546 * external interrupt. 547 547 */ 548 - subcode = ext_int_code >> 16; 548 + subcode = ext_code.subcode; 549 549 if ((subcode & 0xff00) != __SUBCODE_MASK) 550 550 return; 551 551 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
+3 -3
arch/s390/oprofile/hwsampler.c
··· 233 233 } 234 234 235 235 /* prototypes for external interrupt handler and worker */ 236 - static void hws_ext_handler(unsigned int ext_int_code, 237 - unsigned int param32, unsigned long param64); 236 + static void hws_ext_handler(struct ext_code ext_code, 237 + unsigned int param32, unsigned long param64); 238 238 239 239 static void worker(struct work_struct *work); 240 240 ··· 673 673 return rc; 674 674 } 675 675 676 - static void hws_ext_handler(unsigned int ext_int_code, 676 + static void hws_ext_handler(struct ext_code ext_code, 677 677 unsigned int param32, unsigned long param64) 678 678 { 679 679 struct hws_cpu_buffer *cb;
-9
drivers/crypto/Kconfig
··· 64 64 config ZCRYPT 65 65 tristate "Support for PCI-attached cryptographic adapters" 66 66 depends on S390 67 - select ZCRYPT_MONOLITHIC if ZCRYPT="y" 68 67 select HW_RANDOM 69 68 help 70 69 Select this option if you want to use a PCI-attached cryptographic ··· 75 76 + Crypto Express2 Accelerator (CEX2A) 76 77 + Crypto Express3 Coprocessor (CEX3C) 77 78 + Crypto Express3 Accelerator (CEX3A) 78 - 79 - config ZCRYPT_MONOLITHIC 80 - bool "Monolithic zcrypt module" 81 - depends on ZCRYPT 82 - help 83 - Select this option if you want to have a single module z90crypt, 84 - that contains all parts of the crypto device driver (ap bus, 85 - request router and all the card drivers). 86 79 87 80 config CRYPTO_SHA1_S390 88 81 tristate "SHA1 digest algorithm"
+4
drivers/s390/block/dasd.c
··· 640 640 dasd_set_target_state(device, DASD_STATE_NEW); 641 641 /* Now wait for the devices to come up. */ 642 642 wait_event(dasd_init_waitq, _wait_for_device(device)); 643 + 644 + dasd_reload_device(device); 645 + if (device->discipline->kick_validate) 646 + device->discipline->kick_validate(device); 643 647 } 644 648 645 649 /*
+4 -4
drivers/s390/block/dasd_diag.c
··· 229 229 } 230 230 231 231 /* Handle external interruption. */ 232 - static void dasd_ext_handler(unsigned int ext_int_code, 232 + static void dasd_ext_handler(struct ext_code ext_code, 233 233 unsigned int param32, unsigned long param64) 234 234 { 235 235 struct dasd_ccw_req *cqr, *next; ··· 239 239 addr_t ip; 240 240 int rc; 241 241 242 - switch (ext_int_code >> 24) { 242 + switch (ext_code.subcode >> 8) { 243 243 case DASD_DIAG_CODE_31BIT: 244 244 ip = (addr_t) param32; 245 245 break; ··· 280 280 cqr->stopclk = get_clock(); 281 281 282 282 expires = 0; 283 - if ((ext_int_code & 0xff0000) == 0) { 283 + if ((ext_code.subcode & 0xff) == 0) { 284 284 cqr->status = DASD_CQR_SUCCESS; 285 285 /* Start first request on queue if possible -> fast_io. */ 286 286 if (!list_empty(&device->ccw_queue)) { ··· 296 296 cqr->status = DASD_CQR_QUEUED; 297 297 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " 298 298 "request %p was %d (%d retries left)", cqr, 299 - (ext_int_code >> 16) & 0xff, cqr->retries); 299 + ext_code.subcode & 0xff, cqr->retries); 300 300 dasd_diag_erp(device); 301 301 } 302 302
+8
drivers/s390/block/dasd_eckd.c
··· 1564 1564 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 1565 1565 { 1566 1566 dasd_get_device(device); 1567 + /* exit if device not online or in offline processing */ 1568 + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1569 + device->state < DASD_STATE_ONLINE) { 1570 + dasd_put_device(device); 1571 + return; 1572 + } 1567 1573 /* queue call to do_validate_server to the kernel event daemon. */ 1568 1574 schedule_work(&device->kick_validate); 1569 1575 } ··· 1999 1993 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2000 1994 { 2001 1995 cancel_work_sync(&device->reload_device); 1996 + cancel_work_sync(&device->kick_validate); 2002 1997 return dasd_alias_remove_device(device); 2003 1998 }; 2004 1999 ··· 2270 2263 * and only if not suspended 2271 2264 */ 2272 2265 if (!device->block && private->lcu && 2266 + device->state == DASD_STATE_ONLINE && 2273 2267 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2274 2268 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 2275 2269 /*
+2 -2
drivers/s390/char/sclp.c
··· 393 393 /* Handler for external interruption. Perform request post-processing. 394 394 * Prepare read event data request if necessary. Start processing of next 395 395 * request on queue. */ 396 - static void sclp_interrupt_handler(unsigned int ext_int_code, 396 + static void sclp_interrupt_handler(struct ext_code ext_code, 397 397 unsigned int param32, unsigned long param64) 398 398 { 399 399 struct sclp_req *req; ··· 818 818 819 819 /* Handler for external interruption used during initialization. Modify 820 820 * request state to done. */ 821 - static void sclp_check_handler(unsigned int ext_int_code, 821 + static void sclp_check_handler(struct ext_code ext_code, 822 822 unsigned int param32, unsigned long param64) 823 823 { 824 824 u32 finished_sccb;
-1
drivers/s390/char/sclp_quiesce.c
··· 15 15 #include <linux/reboot.h> 16 16 #include <linux/atomic.h> 17 17 #include <asm/ptrace.h> 18 - #include <asm/sigp.h> 19 18 #include <asm/smp.h> 20 19 21 20 #include "sclp.h"
+80 -21
drivers/s390/char/sclp_sdias.c
··· 8 8 #define KMSG_COMPONENT "sclp_sdias" 9 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 10 11 + #include <linux/completion.h> 11 12 #include <linux/sched.h> 12 13 #include <asm/sclp.h> 13 14 #include <asm/debug.h> ··· 63 62 } __attribute__((packed)); 64 63 65 64 static struct sdias_sccb sccb __attribute__((aligned(4096))); 65 + static struct sdias_evbuf sdias_evbuf; 66 66 67 - static int sclp_req_done; 68 - static wait_queue_head_t sdias_wq; 67 + static DECLARE_COMPLETION(evbuf_accepted); 68 + static DECLARE_COMPLETION(evbuf_done); 69 69 static DEFINE_MUTEX(sdias_mutex); 70 70 71 + /* 72 + * Called by SCLP base when read event data has been completed (async mode only) 73 + */ 74 + static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf) 75 + { 76 + memcpy(&sdias_evbuf, evbuf, 77 + min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length)); 78 + complete(&evbuf_done); 79 + TRACE("sclp_sdias_receiver_fn done\n"); 80 + } 81 + 82 + /* 83 + * Called by SCLP base when sdias event has been accepted 84 + */ 71 85 static void sdias_callback(struct sclp_req *request, void *data) 72 86 { 73 - sclp_req_done = 1; 74 - wake_up(&sdias_wq); /* Inform caller, that request is complete */ 87 + complete(&evbuf_accepted); 75 88 TRACE("callback done\n"); 76 89 } 77 90 ··· 95 80 int rc; 96 81 97 82 for (retries = SDIAS_RETRIES; retries; retries--) { 98 - sclp_req_done = 0; 99 83 TRACE("add request\n"); 100 84 rc = sclp_add_request(req); 101 85 if (rc) { ··· 105 91 continue; 106 92 } 107 93 /* initiated, wait for completion of service call */ 108 - wait_event(sdias_wq, (sclp_req_done == 1)); 94 + wait_for_completion(&evbuf_accepted); 109 95 if (req->status == SCLP_REQ_FAILED) { 110 96 TRACE("sclp request failed\n"); 111 - rc = -EIO; 112 97 continue; 113 98 } 99 + /* if not accepted, retry */ 100 + if (!(sccb.evbuf.hdr.flags & 0x80)) { 101 + TRACE("sclp request failed: flags=%x\n", 102 + sccb.evbuf.hdr.flags); 103 + continue; 104 + } 105 + /* 106 + * for the sync interface the response is in the initial sccb 107 + */ 108 + if (!sclp_sdias_register.receiver_fn) { 109 + memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf)); 110 + TRACE("sync request done\n"); 111 + return 0; 112 + } 113 + /* otherwise we wait for completion */ 114 + wait_for_completion(&evbuf_done); 114 115 TRACE("request done\n"); 115 - break; 116 + return 0; 116 117 } 117 - return rc; 118 + return -EIO; 118 119 } 119 120 120 121 /* ··· 169 140 goto out; 170 141 } 171 142 172 - switch (sccb.evbuf.event_status) { 143 + switch (sdias_evbuf.event_status) { 173 144 case 0: 174 - rc = sccb.evbuf.blk_cnt; 145 + rc = sdias_evbuf.blk_cnt; 175 146 break; 176 147 default: 177 - pr_err("SCLP error: %x\n", 178 - sccb.evbuf.event_status); 148 + pr_err("SCLP error: %x\n", sdias_evbuf.event_status); 179 149 rc = -EIO; 180 150 goto out; 181 151 } ··· 239 211 goto out; 240 212 } 241 213 242 - switch (sccb.evbuf.event_status) { 214 + switch (sdias_evbuf.event_status) { 243 215 case EVSTATE_ALL_STORED: 244 216 TRACE("all stored\n"); 245 217 case EVSTATE_PART_STORED: 246 - TRACE("part stored: %i\n", sccb.evbuf.blk_cnt); 218 + TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); 247 219 break; 248 220 case EVSTATE_NO_DATA: 249 221 TRACE("no data\n"); 250 222 default: 251 223 pr_err("Error from SCLP while copying hsa. " 252 224 "Event status = %x\n", 253 - sccb.evbuf.event_status); 225 + sdias_evbuf.event_status); 254 226 rc = -EIO; 255 227 } 256 228 out: ··· 258 230 return rc; 259 231 } 260 232 261 - int __init sclp_sdias_init(void) 233 + static int __init sclp_sdias_register_check(void) 262 234 { 263 235 int rc; 264 236 237 + rc = sclp_register(&sclp_sdias_register); 238 + if (rc) 239 + return rc; 240 + if (sclp_sdias_blk_count() == 0) { 241 + sclp_unregister(&sclp_sdias_register); 242 + return -ENODEV; 243 + } 244 + return 0; 245 + } 246 + 247 + static int __init sclp_sdias_init_sync(void) 248 + { 249 + TRACE("Try synchronous mode\n"); 250 + sclp_sdias_register.receive_mask = 0; 251 + sclp_sdias_register.receiver_fn = NULL; 252 + return sclp_sdias_register_check(); 253 + } 254 + 255 + static int __init sclp_sdias_init_async(void) 256 + { 257 + TRACE("Try asynchronous mode\n"); 258 + sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK; 259 + sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn; 260 + return sclp_sdias_register_check(); 261 + } 262 + 263 + int __init sclp_sdias_init(void) 264 + { 265 265 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 266 266 return 0; 267 267 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); 268 268 debug_register_view(sdias_dbf, &debug_sprintf_view); 269 269 debug_set_level(sdias_dbf, 6); 270 - rc = sclp_register(&sclp_sdias_register); 271 - if (rc) 272 - return rc; 273 - init_waitqueue_head(&sdias_wq); 270 + if (sclp_sdias_init_sync() == 0) 271 + goto out; 272 + if (sclp_sdias_init_async() == 0) 273 + goto out; 274 + TRACE("init failed\n"); 275 + return -ENODEV; 276 + out: 274 277 TRACE("init done\n"); 275 278 return 0; 276 279 }
-1
drivers/s390/char/zcore.c
··· 21 21 #include <asm/ipl.h> 22 22 #include <asm/sclp.h> 23 23 #include <asm/setup.h> 24 - #include <asm/sigp.h> 25 24 #include <asm/uaccess.h> 26 25 #include <asm/debug.h> 27 26 #include <asm/processor.h>
-2
drivers/s390/cio/cio.c
··· 601 601 struct pt_regs *old_regs; 602 602 603 603 old_regs = set_irq_regs(regs); 604 - s390_idle_check(regs, S390_lowcore.int_clock, 605 - S390_lowcore.async_enter_timer); 606 604 irq_enter(); 607 605 __this_cpu_write(s390_idle.nohz_delay, 1); 608 606 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+6
drivers/s390/cio/qdio_main.c
··· 18 18 #include <linux/atomic.h> 19 19 #include <asm/debug.h> 20 20 #include <asm/qdio.h> 21 + #include <asm/ipl.h> 21 22 22 23 #include "cio.h" 23 24 #include "css.h" ··· 1094 1093 q->nr, q->first_to_kick, count, irq_ptr->int_parm); 1095 1094 no_handler: 1096 1095 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1096 + /* 1097 + * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. 1098 + * Therefore we call the LGR detection function here. 1099 + */ 1100 + lgr_info_log(); 1097 1101 } 1098 1102 1099 1103 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
-10
drivers/s390/crypto/Makefile
··· 2 2 # S/390 crypto devices 3 3 # 4 4 5 - ifdef CONFIG_ZCRYPT_MONOLITHIC 6 - 7 - z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \ 8 - zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o 9 - obj-$(CONFIG_ZCRYPT) += z90crypt.o 10 - 11 - else 12 - 13 5 ap-objs := ap_bus.o 14 6 obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 15 7 obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o 16 - 17 - endif
-2
drivers/s390/crypto/ap_bus.c
··· 1862 1862 } 1863 1863 } 1864 1864 1865 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 1866 1865 module_init(ap_module_init); 1867 1866 module_exit(ap_module_exit); 1868 - #endif
-2
drivers/s390/crypto/zcrypt_api.c
··· 1220 1220 misc_deregister(&zcrypt_misc_device); 1221 1221 } 1222 1222 1223 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 1224 1223 module_init(zcrypt_api_init); 1225 1224 module_exit(zcrypt_api_exit); 1226 - #endif
-4
drivers/s390/crypto/zcrypt_cex2a.c
··· 63 63 { /* end of list */ }, 64 64 }; 65 65 66 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 67 66 MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 68 67 MODULE_AUTHOR("IBM Corporation"); 69 68 MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " 70 69 "Copyright 2001, 2006 IBM Corporation"); 71 70 MODULE_LICENSE("GPL"); 72 - #endif 73 71 74 72 static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 75 73 static void zcrypt_cex2a_remove(struct ap_device *ap_dev); ··· 494 496 ap_driver_unregister(&zcrypt_cex2a_driver); 495 497 } 496 498 497 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 498 499 module_init(zcrypt_cex2a_init); 499 500 module_exit(zcrypt_cex2a_exit); 500 - #endif
-100
drivers/s390/crypto/zcrypt_mono.c
··· 1 - /* 2 - * linux/drivers/s390/crypto/zcrypt_mono.c 3 - * 4 - * zcrypt 2.1.0 5 - * 6 - * Copyright (C) 2001, 2006 IBM Corporation 7 - * Author(s): Robert Burroughs 8 - * Eric Rossman (edrossma@us.ibm.com) 9 - * 10 - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 - * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 - * 13 - * This program is free software; you can redistribute it and/or modify 14 - * it under the terms of the GNU General Public License as published by 15 - * the Free Software Foundation; either version 2, or (at your option) 16 - * any later version. 17 - * 18 - * This program is distributed in the hope that it will be useful, 19 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 - * GNU General Public License for more details. 22 - * 23 - * You should have received a copy of the GNU General Public License 24 - * along with this program; if not, write to the Free Software 25 - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 - */ 27 - 28 - #include <linux/module.h> 29 - #include <linux/init.h> 30 - #include <linux/interrupt.h> 31 - #include <linux/miscdevice.h> 32 - #include <linux/fs.h> 33 - #include <linux/proc_fs.h> 34 - #include <linux/compat.h> 35 - #include <linux/atomic.h> 36 - #include <asm/uaccess.h> 37 - 38 - #include "ap_bus.h" 39 - #include "zcrypt_api.h" 40 - #include "zcrypt_pcica.h" 41 - #include "zcrypt_pcicc.h" 42 - #include "zcrypt_pcixcc.h" 43 - #include "zcrypt_cex2a.h" 44 - 45 - /** 46 - * The module initialization code. 47 - */ 48 - static int __init zcrypt_init(void) 49 - { 50 - int rc; 51 - 52 - rc = ap_module_init(); 53 - if (rc) 54 - goto out; 55 - rc = zcrypt_api_init(); 56 - if (rc) 57 - goto out_ap; 58 - rc = zcrypt_pcica_init(); 59 - if (rc) 60 - goto out_api; 61 - rc = zcrypt_pcicc_init(); 62 - if (rc) 63 - goto out_pcica; 64 - rc = zcrypt_pcixcc_init(); 65 - if (rc) 66 - goto out_pcicc; 67 - rc = zcrypt_cex2a_init(); 68 - if (rc) 69 - goto out_pcixcc; 70 - return 0; 71 - 72 - out_pcixcc: 73 - zcrypt_pcixcc_exit(); 74 - out_pcicc: 75 - zcrypt_pcicc_exit(); 76 - out_pcica: 77 - zcrypt_pcica_exit(); 78 - out_api: 79 - zcrypt_api_exit(); 80 - out_ap: 81 - ap_module_exit(); 82 - out: 83 - return rc; 84 - } 85 - 86 - /** 87 - * The module termination code. 88 - */ 89 - static void __exit zcrypt_exit(void) 90 - { 91 - zcrypt_cex2a_exit(); 92 - zcrypt_pcixcc_exit(); 93 - zcrypt_pcicc_exit(); 94 - zcrypt_pcica_exit(); 95 - zcrypt_api_exit(); 96 - ap_module_exit(); 97 - } 98 - 99 - module_init(zcrypt_init); 100 - module_exit(zcrypt_exit);
-4
drivers/s390/crypto/zcrypt_pcica.c
··· 53 53 { /* end of list */ }, 54 54 }; 55 55 56 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 57 56 MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); 58 57 MODULE_AUTHOR("IBM Corporation"); 59 58 MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " 60 59 "Copyright 2001, 2006 IBM Corporation"); 61 60 MODULE_LICENSE("GPL"); 62 - #endif 63 61 64 62 static int zcrypt_pcica_probe(struct ap_device *ap_dev); 65 63 static void zcrypt_pcica_remove(struct ap_device *ap_dev); ··· 406 408 ap_driver_unregister(&zcrypt_pcica_driver); 407 409 } 408 410 409 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 410 411 module_init(zcrypt_pcica_init); 411 412 module_exit(zcrypt_pcica_exit); 412 - #endif
-4
drivers/s390/crypto/zcrypt_pcicc.c
··· 65 65 { /* end of list */ }, 66 66 }; 67 67 68 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 69 68 MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); 70 69 MODULE_AUTHOR("IBM Corporation"); 71 70 MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " 72 71 "Copyright 2001, 2006 IBM Corporation"); 73 72 MODULE_LICENSE("GPL"); 74 - #endif 75 73 76 74 static int zcrypt_pcicc_probe(struct ap_device *ap_dev); 77 75 static void zcrypt_pcicc_remove(struct ap_device *ap_dev); ··· 612 614 ap_driver_unregister(&zcrypt_pcicc_driver); 613 615 } 614 616 615 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 616 617 module_init(zcrypt_pcicc_init); 617 618 module_exit(zcrypt_pcicc_exit); 618 - #endif
-4
drivers/s390/crypto/zcrypt_pcixcc.c
··· 75 75 { /* end of list */ }, 76 76 }; 77 77 78 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 79 78 MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 80 79 MODULE_AUTHOR("IBM Corporation"); 81 80 MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " 82 81 "Copyright 2001, 2006 IBM Corporation"); 83 82 MODULE_LICENSE("GPL"); 84 - #endif 85 83 86 84 static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 87 85 static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); ··· 1119 1121 ap_driver_unregister(&zcrypt_pcixcc_driver); 1120 1122 } 1121 1123 1122 - #ifndef CONFIG_ZCRYPT_MONOLITHIC 1123 1124 module_init(zcrypt_pcixcc_init); 1124 1125 module_exit(zcrypt_pcixcc_exit); 1125 - #endif
+2 -4
drivers/s390/kvm/kvm_virtio.c
··· 380 380 /* 381 381 * we emulate the request_irq behaviour on top of s390 extints 382 382 */ 383 - static void kvm_extint_handler(unsigned int ext_int_code, 383 + static void kvm_extint_handler(struct ext_code ext_code, 384 384 unsigned int param32, unsigned long param64) 385 385 { 386 386 struct virtqueue *vq; 387 - u16 subcode; 388 387 u32 param; 389 388 390 - subcode = ext_int_code >> 16; 391 - if ((subcode & 0xff00) != VIRTIO_SUBCODE_64) 389 + if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64) 392 390 return; 393 391 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++; 394 392
+1 -1
net/iucv/iucv.c
··· 1800 1800 * Handles external interrupts coming in from CP. 1801 1801 * Places the interrupt buffer on a queue and schedules iucv_tasklet_fn(). 1802 1802 */ 1803 - static void iucv_external_interrupt(unsigned int ext_int_code, 1803 + static void iucv_external_interrupt(struct ext_code ext_code, 1804 1804 unsigned int param32, unsigned long param64) 1805 1805 { 1806 1806 struct iucv_irq_data *p;