Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/irq: Remove arch_local_irq_restore() for !CONFIG_CC_HAS_ASM_GOTO

All supported versions of GCC & clang support asm goto.

Remove the !CONFIG_CC_HAS_ASM_GOTO version of arch_local_irq_restore()

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/58df50c9e77e2ed945bacdead30412770578886b.1652715336.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
5fe85516 48482f4d

-77
-77
arch/powerpc/kernel/irq.c
··· 217 217 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts() 218 218 #endif 219 219 220 - #ifdef CONFIG_CC_HAS_ASM_GOTO 221 220 notrace void arch_local_irq_restore(unsigned long mask) 222 221 { 223 222 unsigned char irq_happened; ··· 312 313 __hard_irq_enable(); 313 314 preempt_enable(); 314 315 } 315 - #else 316 - notrace void arch_local_irq_restore(unsigned long mask) 317 - { 318 - unsigned char irq_happened; 319 - 320 - /* Write the new soft-enabled value */ 321 - irq_soft_mask_set(mask); 322 - if (mask) 323 - return; 324 - 325 - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 326 - WARN_ON_ONCE(in_nmi() || in_hardirq()); 327 - 328 - /* 329 - * From this point onward, we can take interrupts, preempt, 330 - * etc... unless we got hard-disabled. We check if an event 331 - * happened. If none happened, we know we can just return. 332 - * 333 - * We may have preempted before the check below, in which case 334 - * we are checking the "new" CPU instead of the old one. This 335 - * is only a problem if an event happened on the "old" CPU. 336 - * 337 - * External interrupt events will have caused interrupts to 338 - * be hard-disabled, so there is no problem, we 339 - * cannot have preempted. 340 - */ 341 - irq_happened = get_irq_happened(); 342 - if (!irq_happened) { 343 - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 344 - WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 345 - return; 346 - } 347 - 348 - /* We need to hard disable to replay. */ 349 - if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 350 - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 351 - WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 352 - __hard_irq_disable(); 353 - local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 354 - } else { 355 - /* 356 - * We should already be hard disabled here. We had bugs 357 - * where that wasn't the case so let's dbl check it and 358 - * warn if we are wrong. Only do that when IRQ tracing 359 - * is enabled as mfmsr() can be costly. 360 - */ 361 - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 362 - if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 363 - __hard_irq_disable(); 364 - } 365 - 366 - if (irq_happened == PACA_IRQ_HARD_DIS) { 367 - local_paca->irq_happened = 0; 368 - __hard_irq_enable(); 369 - return; 370 - } 371 - } 372 - 373 - /* 374 - * Disable preempt here, so that the below preempt_enable will 375 - * perform resched if required (a replayed interrupt may set 376 - * need_resched). 377 - */ 378 - preempt_disable(); 379 - irq_soft_mask_set(IRQS_ALL_DISABLED); 380 - trace_hardirqs_off(); 381 - 382 - replay_soft_interrupts_irqrestore(); 383 - local_paca->irq_happened = 0; 384 - 385 - trace_hardirqs_on(); 386 - irq_soft_mask_set(IRQS_ENABLED); 387 - __hard_irq_enable(); 388 - preempt_enable(); 389 - } 390 - #endif 391 316 EXPORT_SYMBOL(arch_local_irq_restore); 392 317 393 318 /*