x86: msr: propagate errors from smp_call_function_single()

Propagate error (-ENXIO) from smp_call_function_single(). These
errors can happen when a CPU is unplugged while the MSR driver is
open.

Signed-off-by: H. Peter Anvin <hpa@zytor.com>

+28 -19
+10 -4
arch/x86/kernel/msr.c
··· 79 79 80 80 for (; count; count -= 8) { 81 81 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 82 - if (err) 83 - return -EIO; 82 + if (err) { 83 + if (err == -EFAULT) /* Fix idiotic error code */ 84 + err = -EIO; 85 + return err; 86 + } 84 87 if (copy_to_user(tmp, &data, 8)) 85 88 return -EFAULT; 86 89 tmp += 2; ··· 108 105 if (copy_from_user(&data, tmp, 8)) 109 106 return -EFAULT; 110 107 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 111 - if (err) 112 - return -EIO; 108 + if (err) { 109 + if (err == -EFAULT) /* Fix idiotic error code */ 110 + err = -EIO; 111 + return err; 112 + } 113 113 tmp += 2; 114 114 } 115 115
+12 -10
arch/x86/lib/msr-on-cpu.c
··· 30 30 31 31 rv.msr_no = msr_no; 32 32 if (safe) { 33 - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); 34 - err = rv.err; 33 + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, 34 + &rv, 1); 35 + err = err ? err : rv.err; 35 36 } else { 36 - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 37 + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 37 38 } 38 39 *l = rv.l; 39 40 *h = rv.h; ··· 65 64 rv.l = l; 66 65 rv.h = h; 67 66 if (safe) { 68 - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); 69 - err = rv.err; 67 + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, 68 + &rv, 1); 69 + err = err ? err : rv.err; 70 70 } else { 71 - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 71 + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 72 72 } 73 73 74 74 return err; 75 75 } 76 76 77 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 77 + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 78 { 79 - _wrmsr_on_cpu(cpu, msr_no, l, h, 0); 79 + return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); 80 80 } 81 81 82 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 82 + int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 83 83 { 84 - _rdmsr_on_cpu(cpu, msr_no, l, h, 0); 84 + return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); 85 85 } 86 86 87 87 /* These "safe" variants are slower and should be used when the target MSR
+6 -5
include/asm-x86/msr.h
··· 192 192 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 193 193 194 194 #ifdef CONFIG_SMP 195 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 196 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 195 + int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 196 + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 197 197 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 198 - 199 198 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 200 199 #else /* CONFIG_SMP */ 201 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 200 + static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 202 201 { 203 202 rdmsr(msr_no, *l, *h); 203 + return 0; 204 204 } 205 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 205 + static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 206 206 { 207 207 wrmsr(msr_no, l, h); 208 + return 0; 208 209 } 209 210 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 210 211 u32 *l, u32 *h)