Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] lockdep: irqtrace subsystem, x86_64 support

Add irqflags-tracing support to x86_64.

[akpm@osdl.org: build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ingo Molnar and committed by
Linus Torvalds
2601e64d c8558fcd

+223 -91
+4
arch/x86_64/Kconfig.debug
··· 1 1 menu "Kernel hacking" 2 2 3 + config TRACE_IRQFLAGS_SUPPORT 4 + bool 5 + default y 6 + 3 7 source "lib/Kconfig.debug" 4 8 5 9 config DEBUG_RODATA
+18 -1
arch/x86_64/ia32/ia32entry.S
··· 13 13 #include <asm/thread_info.h> 14 14 #include <asm/segment.h> 15 15 #include <asm/vsyscall32.h> 16 + #include <asm/irqflags.h> 16 17 #include <linux/linkage.h> 17 18 18 19 #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) ··· 76 75 swapgs 77 76 movq %gs:pda_kernelstack, %rsp 78 77 addq $(PDA_STACKOFFSET),%rsp 78 + /* 79 + * No need to follow this irqs on/off section: the syscall 80 + * disabled irqs, here we enable it straight after entry: 81 + */ 79 82 sti 80 83 movl %ebp,%ebp /* zero extension */ 81 84 pushq $__USER32_DS ··· 123 118 movq %rax,RAX-ARGOFFSET(%rsp) 124 119 GET_THREAD_INFO(%r10) 125 120 cli 121 + TRACE_IRQS_OFF 126 122 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 127 123 jnz int_ret_from_sys_call 128 124 andl $~TS_COMPAT,threadinfo_status(%r10) ··· 138 132 CFI_REGISTER rsp,rcx 139 133 movl $VSYSCALL32_SYSEXIT,%edx /* User %eip */ 140 134 CFI_REGISTER rip,rdx 135 + TRACE_IRQS_ON 141 136 swapgs 142 137 sti /* sti only takes effect after the next instruction */ 143 138 /* sysexit */ ··· 193 186 movl %esp,%r8d 194 187 CFI_REGISTER rsp,r8 195 188 movq %gs:pda_kernelstack,%rsp 189 + /* 190 + * No need to follow this irqs on/off section: the syscall 191 + * disabled irqs and here we enable it straight after entry: 192 + */ 196 193 sti 197 194 SAVE_ARGS 8,1,1 198 195 movl %eax,%eax /* zero extension */ ··· 231 220 movq %rax,RAX-ARGOFFSET(%rsp) 232 221 GET_THREAD_INFO(%r10) 233 222 cli 223 + TRACE_IRQS_OFF 234 224 testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) 235 225 jnz int_ret_from_sys_call 236 226 andl $~TS_COMPAT,threadinfo_status(%r10) ··· 240 228 CFI_REGISTER rip,rcx 241 229 movl EFLAGS-ARGOFFSET(%rsp),%r11d 242 230 /*CFI_REGISTER rflags,r11*/ 231 + TRACE_IRQS_ON 243 232 movl RSP-ARGOFFSET(%rsp),%esp 244 233 CFI_RESTORE rsp 245 234 swapgs ··· 299 286 /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/ 300 287 /*CFI_REL_OFFSET cs,CS-RIP*/ 301 288 CFI_REL_OFFSET rip,RIP-RIP 302 - swapgs 289 + swapgs 290 + /* 291 + * No need to follow this irqs on/off section: the syscall 292 + * disabled irqs and here we enable it straight after entry: 293 + */ 303 294 sti 304 295 movl %eax,%eax 305 296 pushq %rax
+136 -52
arch/x86_64/kernel/entry.S
··· 41 41 #include <asm/thread_info.h> 42 42 #include <asm/hw_irq.h> 43 43 #include <asm/page.h> 44 + #include <asm/irqflags.h> 44 45 45 46 .code64 46 47 47 48 #ifndef CONFIG_PREEMPT 48 49 #define retint_kernel retint_restore_args 49 50 #endif 50 - 51 + 52 + 53 + .macro TRACE_IRQS_IRETQ offset=ARGOFFSET 54 + #ifdef CONFIG_TRACE_IRQFLAGS 55 + bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ 56 + jnc 1f 57 + TRACE_IRQS_ON 58 + 1: 59 + #endif 60 + .endm 61 + 51 62 /* 52 63 * C code is not supposed to know about undefined top of stack. Every time 53 64 * a C function with an pt_regs argument is called from the SYSCALL based ··· 205 194 swapgs 206 195 movq %rsp,%gs:pda_oldrsp 207 196 movq %gs:pda_kernelstack,%rsp 197 + /* 198 + * No need to follow this irqs off/on section - it's straight 199 + * and short: 200 + */ 208 201 sti 209 202 SAVE_ARGS 8,1 210 203 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) ··· 234 219 sysret_check: 235 220 GET_THREAD_INFO(%rcx) 236 221 cli 222 + TRACE_IRQS_OFF 237 223 movl threadinfo_flags(%rcx),%edx 238 224 andl %edi,%edx 239 225 CFI_REMEMBER_STATE 240 226 jnz sysret_careful 227 + /* 228 + * sysretq will re-enable interrupts: 229 + */ 230 + TRACE_IRQS_ON 241 231 movq RIP-ARGOFFSET(%rsp),%rcx 242 232 CFI_REGISTER rip,rcx 243 233 RESTORE_ARGS 0,-ARG_SKIP,1 ··· 257 237 CFI_RESTORE_STATE 258 238 bt $TIF_NEED_RESCHED,%edx 259 239 jnc sysret_signal 240 + TRACE_IRQS_ON 260 241 sti 261 242 pushq %rdi 262 243 CFI_ADJUST_CFA_OFFSET 8 ··· 268 247 269 248 /* Handle a signal */ 270 249 sysret_signal: 250 + TRACE_IRQS_ON 271 251 sti 272 252 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 273 253 jz 1f ··· 283 261 /* Use IRET because user could have changed frame. This 284 262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ 285 263 cli 264 + TRACE_IRQS_OFF 286 265 jmp int_with_check 287 266 288 267 badsys: ··· 332 309 CFI_REL_OFFSET r10,R10-ARGOFFSET 333 310 CFI_REL_OFFSET r11,R11-ARGOFFSET 334 311 cli 312 + TRACE_IRQS_OFF 335 313 testl $3,CS-ARGOFFSET(%rsp) 336 314 je retint_restore_args 337 315 movl $_TIF_ALLWORK_MASK,%edi ··· 351 327 int_careful: 352 328 bt $TIF_NEED_RESCHED,%edx 353 329 jnc int_very_careful 330 + TRACE_IRQS_ON 354 331 sti 355 332 pushq %rdi 356 333 CFI_ADJUST_CFA_OFFSET 8 ··· 359 334 popq %rdi 360 335 CFI_ADJUST_CFA_OFFSET -8 361 336 cli 337 + TRACE_IRQS_OFF 362 338 jmp int_with_check 363 339 364 340 /* handle signals and tracing -- both require a full stack frame */ 365 341 int_very_careful: 342 + TRACE_IRQS_ON 366 343 sti 367 344 SAVE_REST 368 345 /* Check for syscall exit trace */ ··· 378 351 CFI_ADJUST_CFA_OFFSET -8 379 352 andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi 380 353 cli 354 + TRACE_IRQS_OFF 381 355 jmp int_restore_rest 382 356 383 357 int_signal: ··· 391 363 int_restore_rest: 392 364 RESTORE_REST 393 365 cli 366 + TRACE_IRQS_OFF 394 367 jmp int_with_check 395 368 CFI_ENDPROC 396 369 END(int_ret_from_sys_call) ··· 513 484 swapgs 514 485 1: incl %gs:pda_irqcount # RED-PEN should check preempt count 515 486 cmoveq %gs:pda_irqstackptr,%rsp 487 + /* 488 + * We entered an interrupt context - irqs are off: 489 + */ 490 + TRACE_IRQS_OFF 516 491 call \func 517 492 .endm 518 493 ··· 526 493 /* 0(%rsp): oldrsp-ARGOFFSET */ 527 494 ret_from_intr: 528 495 cli 496 + TRACE_IRQS_OFF 529 497 decl %gs:pda_irqcount 530 498 leaveq 531 499 CFI_DEF_CFA_REGISTER rsp ··· 549 515 CFI_REMEMBER_STATE 550 516 jnz retint_careful 551 517 retint_swapgs: 518 + /* 519 + * The iretq could re-enable interrupts: 520 + */ 521 + cli 522 + TRACE_IRQS_IRETQ 552 523 swapgs 524 + jmp restore_args 525 + 553 526 retint_restore_args: 554 527 cli 528 + /* 529 + * The iretq could re-enable interrupts: 530 + */ 531 + TRACE_IRQS_IRETQ 532 + restore_args: 555 533 RESTORE_ARGS 0,8,0 556 534 iret_label: 557 535 iretq ··· 576 530 /* running with kernel gs */ 577 531 bad_iret: 578 532 movq $11,%rdi /* SIGSEGV */ 533 + TRACE_IRQS_ON 579 534 sti 580 535 jmp do_exit 581 536 .previous ··· 586 539 CFI_RESTORE_STATE 587 540 bt $TIF_NEED_RESCHED,%edx 588 541 jnc retint_signal 542 + TRACE_IRQS_ON 589 543 sti 590 544 pushq %rdi 591 545 CFI_ADJUST_CFA_OFFSET 8 ··· 595 547 CFI_ADJUST_CFA_OFFSET -8 596 548 GET_THREAD_INFO(%rcx) 597 549 cli 550 + TRACE_IRQS_OFF 598 551 jmp retint_check 599 552 600 553 retint_signal: 601 554 testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx 602 555 jz retint_swapgs 556 + TRACE_IRQS_ON 603 557 sti 604 558 SAVE_REST 605 559 movq $-1,ORIG_RAX(%rsp) ··· 610 560 call do_notify_resume 611 561 RESTORE_REST 612 562 cli 563 + TRACE_IRQS_OFF 613 564 movl $_TIF_NEED_RESCHED,%edi 614 565 GET_THREAD_INFO(%rcx) 615 566 jmp retint_check ··· 717 666 718 667 /* error code is on the stack already */ 719 668 /* handle NMI like exceptions that can happen everywhere */ 720 - .macro paranoidentry sym, ist=0 669 + .macro paranoidentry sym, ist=0, irqtrace=1 721 670 SAVE_ALL 722 671 cld 723 672 movl $1,%ebx ··· 742 691 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 743 692 .endif 744 693 cli 694 + .if \irqtrace 695 + TRACE_IRQS_OFF 696 + .endif 745 697 .endm 746 - 698 + 699 + /* 700 + * "Paranoid" exit path from exception stack. 701 + * Paranoid because this is used by NMIs and cannot take 702 + * any kernel state for granted. 703 + * We don't do kernel preemption checks here, because only 704 + * NMI should be common and it does not enable IRQs and 705 + * cannot get reschedule ticks. 706 + * 707 + * "trace" is 0 for the NMI handler only, because irq-tracing 708 + * is fundamentally NMI-unsafe. (we cannot change the soft and 709 + * hard flags at once, atomically) 710 + */ 711 + .macro paranoidexit trace=1 712 + /* ebx: no swapgs flag */ 713 + paranoid_exit\trace: 714 + testl %ebx,%ebx /* swapgs needed? */ 715 + jnz paranoid_restore\trace 716 + testl $3,CS(%rsp) 717 + jnz paranoid_userspace\trace 718 + paranoid_swapgs\trace: 719 + TRACE_IRQS_IRETQ 0 720 + swapgs 721 + paranoid_restore\trace: 722 + RESTORE_ALL 8 723 + iretq 724 + paranoid_userspace\trace: 725 + GET_THREAD_INFO(%rcx) 726 + movl threadinfo_flags(%rcx),%ebx 727 + andl $_TIF_WORK_MASK,%ebx 728 + jz paranoid_swapgs\trace 729 + movq %rsp,%rdi /* &pt_regs */ 730 + call sync_regs 731 + movq %rax,%rsp /* switch stack for scheduling */ 732 + testl $_TIF_NEED_RESCHED,%ebx 733 + jnz paranoid_schedule\trace 734 + movl %ebx,%edx /* arg3: thread flags */ 735 + .if \trace 736 + TRACE_IRQS_ON 737 + .endif 738 + sti 739 + xorl %esi,%esi /* arg2: oldset */ 740 + movq %rsp,%rdi /* arg1: &pt_regs */ 741 + call do_notify_resume 742 + cli 743 + .if \trace 744 + TRACE_IRQS_OFF 745 + .endif 746 + jmp paranoid_userspace\trace 747 + paranoid_schedule\trace: 748 + .if \trace 749 + TRACE_IRQS_ON 750 + .endif 751 + sti 752 + call schedule 753 + cli 754 + .if \trace 755 + TRACE_IRQS_OFF 756 + .endif 757 + jmp paranoid_userspace\trace 758 + CFI_ENDPROC 759 + .endm 760 + 747 761 /* 748 762 * Exception entry point. This expects an error code/orig_rax on the stack 749 763 * and the exception handler in %rax. ··· 864 748 movl %ebx,%eax 865 749 RESTORE_REST 866 750 cli 751 + TRACE_IRQS_OFF 867 752 GET_THREAD_INFO(%rcx) 868 753 testl %eax,%eax 869 754 jne retint_kernel ··· 872 755 movl $_TIF_WORK_MASK,%edi 873 756 andl %edi,%edx 874 757 jnz retint_careful 758 + /* 759 + * The iret might restore flags: 760 + */ 761 + TRACE_IRQS_IRETQ 875 762 swapgs 876 763 RESTORE_ARGS 0,8,0 877 764 jmp iret_label ··· 1037 916 pushq $0 1038 917 CFI_ADJUST_CFA_OFFSET 8 1039 918 paranoidentry do_debug, DEBUG_STACK 1040 - jmp paranoid_exit 1041 - CFI_ENDPROC 919 + paranoidexit 1042 920 END(debug) 1043 921 .previous .text 1044 922 ··· 1046 926 INTR_FRAME 1047 927 pushq $-1 1048 928 CFI_ADJUST_CFA_OFFSET 8 1049 - paranoidentry do_nmi 1050 - /* 1051 - * "Paranoid" exit path from exception stack. 1052 - * Paranoid because this is used by NMIs and cannot take 1053 - * any kernel state for granted. 1054 - * We don't do kernel preemption checks here, because only 1055 - * NMI should be common and it does not enable IRQs and 1056 - * cannot get reschedule ticks. 1057 - */ 1058 - /* ebx: no swapgs flag */ 1059 - paranoid_exit: 1060 - testl %ebx,%ebx /* swapgs needed? */ 1061 - jnz paranoid_restore 1062 - testl $3,CS(%rsp) 1063 - jnz paranoid_userspace 1064 - paranoid_swapgs: 1065 - swapgs 1066 - paranoid_restore: 1067 - RESTORE_ALL 8 1068 - iretq 1069 - paranoid_userspace: 1070 - GET_THREAD_INFO(%rcx) 1071 - movl threadinfo_flags(%rcx),%ebx 1072 - andl $_TIF_WORK_MASK,%ebx 1073 - jz paranoid_swapgs 1074 - movq %rsp,%rdi /* &pt_regs */ 1075 - call sync_regs 1076 - movq %rax,%rsp /* switch stack for scheduling */ 1077 - testl $_TIF_NEED_RESCHED,%ebx 1078 - jnz paranoid_schedule 1079 - movl %ebx,%edx /* arg3: thread flags */ 1080 - sti 1081 - xorl %esi,%esi /* arg2: oldset */ 1082 - movq %rsp,%rdi /* arg1: &pt_regs */ 1083 - call do_notify_resume 1084 - cli 1085 - jmp paranoid_userspace 1086 - paranoid_schedule: 1087 - sti 1088 - call schedule 1089 - cli 1090 - jmp paranoid_userspace 1091 - CFI_ENDPROC 929 + paranoidentry do_nmi, 0, 0 930 + #ifdef CONFIG_TRACE_IRQFLAGS 931 + paranoidexit 0 932 + #else 933 + jmp paranoid_exit1 934 + CFI_ENDPROC 935 + #endif 1092 936 END(nmi) 1093 937 .previous .text 1094 938 ··· 1061 977 pushq $0 1062 978 CFI_ADJUST_CFA_OFFSET 8 1063 979 paranoidentry do_int3, DEBUG_STACK 1064 - jmp paranoid_exit 980 + jmp paranoid_exit1 1065 981 CFI_ENDPROC 1066 982 END(int3) 1067 983 .previous .text ··· 1090 1006 ENTRY(double_fault) 1091 1007 XCPT_FRAME 1092 1008 paranoidentry do_double_fault 1093 - jmp paranoid_exit 1009 + jmp paranoid_exit1 1094 1010 CFI_ENDPROC 1095 1011 END(double_fault) 1096 1012 ··· 1106 1022 ENTRY(stack_segment) 1107 1023 XCPT_FRAME 1108 1024 paranoidentry do_stack_segment 1109 - jmp paranoid_exit 1025 + jmp paranoid_exit1 1110 1026 CFI_ENDPROC 1111 1027 END(stack_segment) 1112 1028 ··· 1134 1050 pushq $0 1135 1051 CFI_ADJUST_CFA_OFFSET 8 1136 1052 paranoidentry do_machine_check 1137 - jmp paranoid_exit 1053 + jmp paranoid_exit1 1138 1054 CFI_ENDPROC 1139 1055 END(machine_check) 1140 1056 #endif
+3 -1
arch/x86_64/kernel/irq.c
··· 177 177 local_irq_save(flags); 178 178 pending = local_softirq_pending(); 179 179 /* Switch to interrupt stack */ 180 - if (pending) 180 + if (pending) { 181 181 call_softirq(); 182 + WARN_ON_ONCE(softirq_count()); 183 + } 182 184 local_irq_restore(flags); 183 185 } 184 186 EXPORT_SYMBOL(do_softirq);
+61
include/asm-x86_64/irqflags.h
··· 1 + /* 2 + * include/asm-x86_64/irqflags.h 3 + * 4 + * IRQ flags handling 5 + * 6 + * This file gets included from lowlevel asm headers too, to provide 7 + * wrapped versions of the local_irq_*() APIs, based on the 8 + * raw_local_irq_*() macros from the lowlevel headers. 9 + */ 10 + #ifndef _ASM_IRQFLAGS_H 11 + #define _ASM_IRQFLAGS_H 12 + 13 + #ifndef __ASSEMBLY__ 14 + 15 + /* interrupt control.. */ 16 + #define raw_local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 17 + #define raw_local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 18 + 19 + #ifdef CONFIG_X86_VSMP 20 + /* Interrupt control for VSMP architecture */ 21 + #define raw_local_irq_disable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) 22 + #define raw_local_irq_enable() do { unsigned long flags; raw_local_save_flags(flags); raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) 23 + 24 + #define raw_irqs_disabled_flags(flags) \ 25 + ({ \ 26 + (flags & (1<<18)) || !(flags & (1<<9)); \ 27 + }) 28 + 29 + /* For spinlocks etc */ 30 + #define raw_local_irq_save(x) do { raw_local_save_flags(x); raw_local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) 31 + #else /* CONFIG_X86_VSMP */ 32 + #define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory") 33 + #define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory") 34 + 35 + #define raw_irqs_disabled_flags(flags) \ 36 + ({ \ 37 + !(flags & (1<<9)); \ 38 + }) 39 + 40 + /* For spinlocks etc */ 41 + #define raw_local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# raw_local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 42 + #endif 43 + 44 + #define raw_irqs_disabled() \ 45 + ({ \ 46 + unsigned long flags; \ 47 + raw_local_save_flags(flags); \ 48 + raw_irqs_disabled_flags(flags); \ 49 + }) 50 + 51 + /* used in the idle loop; sti takes one instruction cycle to complete */ 52 + #define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 53 + /* used when interrupts are already enabled or to shutdown the processor */ 54 + #define halt() __asm__ __volatile__("hlt": : :"memory") 55 + 56 + #else /* __ASSEMBLY__: */ 57 + # define TRACE_IRQS_ON 58 + # define TRACE_IRQS_OFF 59 + #endif 60 + 61 + #endif
+1 -37
include/asm-x86_64/system.h
··· 244 244 245 245 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 246 246 247 - /* interrupt control.. */ 248 - #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 249 - #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 250 - 251 - #ifdef CONFIG_X86_VSMP 252 - /* Interrupt control for VSMP architecture */ 253 - #define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) 254 - #define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) 255 - 256 - #define irqs_disabled() \ 257 - ({ \ 258 - unsigned long flags; \ 259 - local_save_flags(flags); \ 260 - (flags & (1<<18)) || !(flags & (1<<9)); \ 261 - }) 262 - 263 - /* For spinlocks etc */ 264 - #define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) 265 - #else /* CONFIG_X86_VSMP */ 266 - #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") 267 - #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") 268 - 269 - #define irqs_disabled() \ 270 - ({ \ 271 - unsigned long flags; \ 272 - local_save_flags(flags); \ 273 - !(flags & (1<<9)); \ 274 - }) 275 - 276 - /* For spinlocks etc */ 277 - #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 278 - #endif 279 - 280 - /* used in the idle loop; sti takes one instruction cycle to complete */ 281 - #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 282 - /* used when interrupts are already enabled or to shutdown the processor */ 283 - #define halt() __asm__ __volatile__("hlt": : :"memory") 247 + #include <linux/irqflags.h> 284 248 285 249 void cpu_idle_wait(void); 286 250