x86: msr: propagate errors from smp_call_function_single()

Propagate error (-ENXIO) from smp_call_function_single(). These
errors can happen when a CPU is unplugged while the MSR driver is
open.

Signed-off-by: H. Peter Anvin <hpa@zytor.com>

+28 -19
+10 -4
arch/x86/kernel/msr.c
··· 79 80 for (; count; count -= 8) { 81 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 82 - if (err) 83 - return -EIO; 84 if (copy_to_user(tmp, &data, 8)) 85 return -EFAULT; 86 tmp += 2; ··· 108 if (copy_from_user(&data, tmp, 8)) 109 return -EFAULT; 110 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 111 - if (err) 112 - return -EIO; 113 tmp += 2; 114 } 115
··· 79 80 for (; count; count -= 8) { 81 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 82 + if (err) { 83 + if (err == -EFAULT) /* Fix idiotic error code */ 84 + err = -EIO; 85 + return err; 86 + } 87 if (copy_to_user(tmp, &data, 8)) 88 return -EFAULT; 89 tmp += 2; ··· 105 if (copy_from_user(&data, tmp, 8)) 106 return -EFAULT; 107 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 108 + if (err) { 109 + if (err == -EFAULT) /* Fix idiotic error code */ 110 + err = -EIO; 111 + return err; 112 + } 113 tmp += 2; 114 } 115
+12 -10
arch/x86/lib/msr-on-cpu.c
··· 30 31 rv.msr_no = msr_no; 32 if (safe) { 33 - smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); 34 - err = rv.err; 35 } else { 36 - smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 37 } 38 *l = rv.l; 39 *h = rv.h; ··· 65 rv.l = l; 66 rv.h = h; 67 if (safe) { 68 - smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); 69 - err = rv.err; 70 } else { 71 - smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 72 } 73 74 return err; 75 } 76 77 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 { 79 - _wrmsr_on_cpu(cpu, msr_no, l, h, 0); 80 } 81 82 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 83 { 84 - _rdmsr_on_cpu(cpu, msr_no, l, h, 0); 85 } 86 87 /* These "safe" variants are slower and should be used when the target MSR
··· 30 31 rv.msr_no = msr_no; 32 if (safe) { 33 + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, 34 + &rv, 1); 35 + err = err ? err : rv.err; 36 } else { 37 + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); 38 } 39 *l = rv.l; 40 *h = rv.h; ··· 64 rv.l = l; 65 rv.h = h; 66 if (safe) { 67 + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, 68 + &rv, 1); 69 + err = err ? err : rv.err; 70 } else { 71 + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); 72 } 73 74 return err; 75 } 76 77 + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 78 { 79 + return _wrmsr_on_cpu(cpu, msr_no, l, h, 0); 80 } 81 82 + int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 83 { 84 + return _rdmsr_on_cpu(cpu, msr_no, l, h, 0); 85 } 86 87 /* These "safe" variants are slower and should be used when the target MSR
+6 -5
include/asm-x86/msr.h
··· 192 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 193 194 #ifdef CONFIG_SMP 195 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 196 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 197 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 198 - 199 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 200 #else /* CONFIG_SMP */ 201 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 202 { 203 rdmsr(msr_no, *l, *h); 204 } 205 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 206 { 207 wrmsr(msr_no, l, h); 208 } 209 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 210 u32 *l, u32 *h)
··· 192 #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) 193 194 #ifdef CONFIG_SMP 195 + int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 196 + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 197 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 198 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 199 #else /* CONFIG_SMP */ 200 + static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 201 { 202 rdmsr(msr_no, *l, *h); 203 + return 0; 204 } 205 + static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 206 { 207 wrmsr(msr_no, l, h); 208 + return 0; 209 } 210 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 211 u32 *l, u32 *h)