Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Disable relocation on exceptions whenever PR KVM is active

For PR KVM we allow userspace to map 0xc000000000000000. Because
transitioning from userspace to the guest kernel may use the relocated
exception vectors we have to disable relocation on exceptions whenever
PR KVM is active as we cannot trust that address.

This issue does not apply to HV KVM, since changing from a guest to the
hypervisor will never use the relocated exception vectors.

Currently the hypervisor interface only allows us to toggle relocation
on exceptions on a partition wide scope, so we need to globally disable
relocation on exceptions when the first PR KVM instance is started and
only re-enable them when all PR KVM instances have been destroyed.

It's a bit heavy handed, but until the hypervisor gives us a lightweight
way to toggle relocation on exceptions on a single thread it's only real
option.

Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Ian Munsie and committed by
Benjamin Herrenschmidt
a413f474 96f013fe

+32 -3
+9
arch/powerpc/include/asm/hvcall.h
··· 395 395 { 396 396 return CMO_PageSize; 397 397 } 398 + 399 + extern long pSeries_enable_reloc_on_exc(void); 400 + extern long pSeries_disable_reloc_on_exc(void); 401 + 402 + #else 403 + 404 + #define pSeries_enable_reloc_on_exc() do {} while (0) 405 + #define pSeries_disable_reloc_on_exc() do {} while (0) 406 + 398 407 #endif /* CONFIG_PPC_PSERIES */ 399 408 400 409 #endif /* __ASSEMBLY__ */
+18
arch/powerpc/kvm/book3s_pr.c
··· 34 34 #include <asm/kvm_book3s.h> 35 35 #include <asm/mmu_context.h> 36 36 #include <asm/switch_to.h> 37 + #include <asm/firmware.h> 37 38 #include <linux/gfp.h> 38 39 #include <linux/sched.h> 39 40 #include <linux/vmalloc.h> ··· 1285 1284 { 1286 1285 } 1287 1286 1287 + static unsigned int kvm_global_user_count = 0; 1288 + static DEFINE_SPINLOCK(kvm_global_user_count_lock); 1289 + 1288 1290 int kvmppc_core_init_vm(struct kvm *kvm) 1289 1291 { 1290 1292 #ifdef CONFIG_PPC64 1291 1293 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); 1292 1294 #endif 1293 1295 1296 + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1297 + spin_lock(&kvm_global_user_count_lock); 1298 + if (++kvm_global_user_count == 1) 1299 + pSeries_disable_reloc_on_exc(); 1300 + spin_unlock(&kvm_global_user_count_lock); 1301 + } 1294 1302 return 0; 1295 1303 } 1296 1304 ··· 1308 1298 #ifdef CONFIG_PPC64 1309 1299 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); 1310 1300 #endif 1301 + 1302 + if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 1303 + spin_lock(&kvm_global_user_count_lock); 1304 + BUG_ON(kvm_global_user_count == 0); 1305 + if (--kvm_global_user_count == 0) 1306 + pSeries_enable_reloc_on_exc(); 1307 + spin_unlock(&kvm_global_user_count_lock); 1308 + } 1311 1309 } 1312 1310 1313 1311 static int kvmppc_book3s_init(void)
+5 -3
arch/powerpc/platforms/pseries/setup.c
··· 375 375 * to ever be a problem in practice we can move this into a kernel thread to 376 376 * finish off the process later in boot. 377 377 */ 378 - static int __init pSeries_enable_reloc_on_exc(void) 378 + long pSeries_enable_reloc_on_exc(void) 379 379 { 380 380 long rc; 381 381 unsigned int delay, total_delay = 0; ··· 397 397 mdelay(delay); 398 398 } 399 399 } 400 + EXPORT_SYMBOL(pSeries_enable_reloc_on_exc); 400 401 401 - #ifdef CONFIG_KEXEC 402 - static long pSeries_disable_reloc_on_exc(void) 402 + long pSeries_disable_reloc_on_exc(void) 403 403 { 404 404 long rc; 405 405 ··· 410 410 mdelay(get_longbusy_msecs(rc)); 411 411 } 412 412 } 413 + EXPORT_SYMBOL(pSeries_disable_reloc_on_exc); 413 414 415 + #ifdef CONFIG_KEXEC 414 416 static void pSeries_machine_kexec(struct kimage *image) 415 417 { 416 418 long rc;