Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/kasan: Don't instrument non-maskable or raw interrupts

Disable address sanitization for raw and non-maskable interrupt
handlers, because they can run in real mode, where we cannot access
the shadow memory. (Note that kasan_arch_is_ready() doesn't test for
real mode, since it is a static branch for speed, and in any case not
all the entry points to the generic KASAN code are protected by
kasan_arch_is_ready guards.)

The changes to interrupt_nmi_enter/exit_prepare() look larger than
they actually are. The changes are equivalent to adding
!IS_ENABLED(CONFIG_KASAN) to the conditions for calling nmi_enter() or
nmi_exit() in real mode. That is, the code is equivalent to using the
following condition for calling nmi_enter/exit:

if (((!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
!firmware_has_feature(FW_FEATURE_LPAR) ||
radix_enabled()) &&
!IS_ENABLED(CONFIG_KASAN) ||
(mfmsr() & MSR_DR))

That unwieldy condition has been split into several statements with
comments, for easier reading.

The nmi_ipi_lock functions that call atomic functions (i.e.,
nmi_ipi_lock_start(), nmi_ipi_lock() and nmi_ipi_unlock()), besides
being marked noinstr, now call arch_atomic_* functions instead of
atomic_* functions because with KASAN enabled, the atomic_* functions
are wrappers which explicitly do address sanitization on their
arguments. Since we are trying to avoid address sanitization, we have
to use the lower-level arch_atomic_* versions.

In hv_nmi_check_nonrecoverable(), the regs_set_unrecoverable() call
has been open-coded so as to avoid having to either trust the inlining
or mark regs_set_unrecoverable() as noinstr.

[paulus@ozlabs.org: combined a few work-in-progress commits of
Daniel's and wrote the commit message.]

Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/YoTFGaKM8Pd46PIK@cleo

authored by

Daniel Axtens and committed by
Michael Ellerman
5352090a f08aed52

+59 -26
+40 -12
arch/powerpc/include/asm/interrupt.h
··· 324 324 } 325 325 #endif 326 326 327 + /* If data relocations are enabled, it's safe to use nmi_enter() */ 328 + if (mfmsr() & MSR_DR) { 329 + nmi_enter(); 330 + return; 331 + } 332 + 327 333 /* 328 - * Do not use nmi_enter() for pseries hash guest taking a real-mode 334 + * But do not use nmi_enter() for pseries hash guest taking a real-mode 329 335 * NMI because not everything it touches is within the RMA limit. 330 336 */ 331 - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 332 - !firmware_has_feature(FW_FEATURE_LPAR) || 333 - radix_enabled() || (mfmsr() & MSR_DR)) 334 - nmi_enter(); 337 + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 338 + firmware_has_feature(FW_FEATURE_LPAR) && 339 + !radix_enabled()) 340 + return; 341 + 342 + /* 343 + * Likewise, don't use it if we have some form of instrumentation (like 344 + * KASAN shadow) that is not safe to access in real mode (even on radix) 345 + */ 346 + if (IS_ENABLED(CONFIG_KASAN)) 347 + return; 348 + 349 + /* Otherwise, it should be safe to call it */ 350 + nmi_enter(); 335 351 } 336 352 337 353 static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) 338 354 { 339 - if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || 340 - !firmware_has_feature(FW_FEATURE_LPAR) || 341 - radix_enabled() || (mfmsr() & MSR_DR)) 355 + if (mfmsr() & MSR_DR) { 356 + // nmi_exit if relocations are on 342 357 nmi_exit(); 358 + } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 359 + firmware_has_feature(FW_FEATURE_LPAR) && 360 + !radix_enabled()) { 361 + // no nmi_exit for a pseries hash guest taking a real mode exception 362 + } else if (IS_ENABLED(CONFIG_KASAN)) { 363 + // no nmi_exit for KASAN in real mode 364 + } else { 365 + nmi_exit(); 366 + } 343 367 344 368 /* 345 369 * nmi does not call nap_adjust_return because nmi should not create ··· 431 407 * Specific handlers may have additional restrictions. 432 408 */ 433 409 #define DEFINE_INTERRUPT_HANDLER_RAW(func) \ 434 - static __always_inline long ____##func(struct pt_regs *regs); \ 410 + static __always_inline __no_sanitize_address __no_kcsan long \ 411 + ____##func(struct pt_regs *regs); \ 435 412 \ 436 413 interrupt_handler long func(struct pt_regs *regs) \ 437 414 { \ ··· 446 421 } \ 447 422 NOKPROBE_SYMBOL(func); \ 448 423 \ 449 - static __always_inline long ____##func(struct pt_regs *regs) 424 + static __always_inline __no_sanitize_address __no_kcsan long \ 425 + ____##func(struct pt_regs *regs) 450 426 451 427 /** 452 428 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function ··· 567 541 * body with a pair of curly brackets. 568 542 */ 569 543 #define DEFINE_INTERRUPT_HANDLER_NMI(func) \ 570 - static __always_inline long ____##func(struct pt_regs *regs); \ 544 + static __always_inline __no_sanitize_address __no_kcsan long \ 545 + ____##func(struct pt_regs *regs); \ 571 546 \ 572 547 interrupt_handler long func(struct pt_regs *regs) \ 573 548 { \ ··· 585 558 } \ 586 559 NOKPROBE_SYMBOL(func); \ 587 560 \ 588 - static __always_inline long ____##func(struct pt_regs *regs) 561 + static __always_inline __no_sanitize_address __no_kcsan long \ 562 + ____##func(struct pt_regs *regs) 589 563 590 564 591 565 /* Interrupt handlers */
+11 -11
arch/powerpc/kernel/smp.c
··· 411 411 static bool nmi_ipi_busy = false; 412 412 static void (*nmi_ipi_function)(struct pt_regs *) = NULL; 413 413 414 - static void nmi_ipi_lock_start(unsigned long *flags) 414 + noinstr static void nmi_ipi_lock_start(unsigned long *flags) 415 415 { 416 416 raw_local_irq_save(*flags); 417 417 hard_irq_disable(); 418 - while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 418 + while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 419 419 raw_local_irq_restore(*flags); 420 - spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 420 + spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 421 421 raw_local_irq_save(*flags); 422 422 hard_irq_disable(); 423 423 } 424 424 } 425 425 426 - static void nmi_ipi_lock(void) 426 + noinstr static void nmi_ipi_lock(void) 427 427 { 428 - while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 429 - spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0); 428 + while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 429 + spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 430 430 } 431 431 432 - static void nmi_ipi_unlock(void) 432 + noinstr static void nmi_ipi_unlock(void) 433 433 { 434 434 smp_mb(); 435 - WARN_ON(atomic_read(&__nmi_ipi_lock) != 1); 436 - atomic_set(&__nmi_ipi_lock, 0); 435 + WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); 436 + arch_atomic_set(&__nmi_ipi_lock, 0); 437 437 } 438 438 439 - static void nmi_ipi_unlock_end(unsigned long *flags) 439 + noinstr static void nmi_ipi_unlock_end(unsigned long *flags) 440 440 { 441 441 nmi_ipi_unlock(); 442 442 raw_local_irq_restore(*flags); ··· 445 445 /* 446 446 * Platform NMI handler calls this to ack 447 447 */ 448 - int smp_handle_nmi_ipi(struct pt_regs *regs) 448 + noinstr int smp_handle_nmi_ipi(struct pt_regs *regs) 449 449 { 450 450 void (*fn)(struct pt_regs *) = NULL; 451 451 unsigned long flags;
+4 -2
arch/powerpc/kernel/traps.c
··· 393 393 * Builds that do not support KVM could take this second option to increase 394 394 * the recoverability of NMIs. 395 395 */ 396 - void hv_nmi_check_nonrecoverable(struct pt_regs *regs) 396 + noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs) 397 397 { 398 398 #ifdef CONFIG_PPC_POWERNV 399 399 unsigned long kbase = (unsigned long)_stext; ··· 433 433 return; 434 434 435 435 nonrecoverable: 436 - regs_set_unrecoverable(regs); 436 + regs->msr &= ~MSR_RI; 437 + local_paca->hsrr_valid = 0; 438 + local_paca->srr_valid = 0; 437 439 #endif 438 440 } 439 441 DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
+3
arch/powerpc/lib/Makefile
··· 13 13 14 14 KASAN_SANITIZE_code-patching.o := n 15 15 KASAN_SANITIZE_feature-fixups.o := n 16 + # restart_table.o contains functions called in the NMI interrupt path 17 + # which can be in real mode. Disable KASAN. 18 + KASAN_SANITIZE_restart_table.o := n 16 19 17 20 ifdef CONFIG_KASAN 18 21 CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
+1 -1
arch/powerpc/platforms/powernv/smp.c
··· 345 345 } 346 346 } 347 347 348 - static int pnv_system_reset_exception(struct pt_regs *regs) 348 + noinstr static int pnv_system_reset_exception(struct pt_regs *regs) 349 349 { 350 350 if (smp_handle_nmi_ipi(regs)) 351 351 return 1;