Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Use enum instead of literals for trap values

The traps are referred to by their numbers and it can be difficult to
understand them while reading the code without context. This patch adds
enumeration of the trap numbers and replaces the numbers with the correct
enum for x86.

Signed-off-by: Kees Cook <keescook@chromium.org>
Link: http://lkml.kernel.org/r/20120310000710.GA32667@www.outflux.net
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

authored by

Kees Cook and committed by
H. Peter Anvin
c9408265 192cfd58

+91 -59
+25
arch/x86/include/asm/traps.h
··· 89 89 asmlinkage void mce_threshold_interrupt(void); 90 90 #endif 91 91 92 + /* Interrupts/Exceptions */ 93 + enum { 94 + X86_TRAP_DE = 0, /* 0, Divide-by-zero */ 95 + X86_TRAP_DB, /* 1, Debug */ 96 + X86_TRAP_NMI, /* 2, Non-maskable Interrupt */ 97 + X86_TRAP_BP, /* 3, Breakpoint */ 98 + X86_TRAP_OF, /* 4, Overflow */ 99 + X86_TRAP_BR, /* 5, Bound Range Exceeded */ 100 + X86_TRAP_UD, /* 6, Invalid Opcode */ 101 + X86_TRAP_NM, /* 7, Device Not Available */ 102 + X86_TRAP_DF, /* 8, Double Fault */ 103 + X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */ 104 + X86_TRAP_TS, /* 10, Invalid TSS */ 105 + X86_TRAP_NP, /* 11, Segment Not Present */ 106 + X86_TRAP_SS, /* 12, Stack Segment Fault */ 107 + X86_TRAP_GP, /* 13, General Protection Fault */ 108 + X86_TRAP_PF, /* 14, Page Fault */ 109 + X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */ 110 + X86_TRAP_MF, /* 16, x87 Floating-Point Exception */ 111 + X86_TRAP_AC, /* 17, Alignment Check */ 112 + X86_TRAP_MC, /* 18, Machine Check */ 113 + X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */ 114 + X86_TRAP_IRET = 32, /* 32, IRET Exception */ 115 + }; 116 + 92 117 #endif /* _ASM_X86_TRAPS_H */
+1 -1
arch/x86/kernel/irqinit.c
··· 61 61 outb(0, 0xF0); 62 62 if (ignore_fpu_irq || !boot_cpu_data.hard_math) 63 63 return IRQ_NONE; 64 - math_error(get_irq_regs(), 0, 16); 64 + math_error(get_irq_regs(), 0, X86_TRAP_MF); 65 65 return IRQ_HANDLED; 66 66 } 67 67
+65 -58
arch/x86/kernel/traps.c
··· 119 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 120 120 * On nmi (interrupt 2), do_trap should not be called. 121 121 */ 122 - if (trapnr < 6) 122 + if (trapnr < X86_TRAP_UD) 123 123 goto vm86_trap; 124 124 goto trap_signal; 125 125 } ··· 203 203 do_trap(trapnr, signr, str, regs, error_code, &info); \ 204 204 } 205 205 206 - DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 207 - DO_ERROR(4, SIGSEGV, "overflow", overflow) 208 - DO_ERROR(5, SIGSEGV, "bounds", bounds) 209 - DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 210 - DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 211 - DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 212 - DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 206 + DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, 207 + regs->ip) 208 + DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) 209 + DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) 210 + DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, 211 + regs->ip) 212 + DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", 213 + coprocessor_segment_overrun) 214 + DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 215 + DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 213 216 #ifdef CONFIG_X86_32 214 - DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217 + DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 215 218 #endif 216 - DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219 + DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, 220 + BUS_ADRALN, 0) 217 221 218 222 #ifdef CONFIG_X86_64 219 223 /* Runs on IST stack */ 220 224 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 221 225 { 222 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 223 - 12, SIGBUS) == NOTIFY_STOP) 227 + X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) 224 228 return; 225 229 preempt_conditional_sti(regs); 226 - do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); 227 231 preempt_conditional_cli(regs); 228 232 } 229 233 ··· 237 233 struct task_struct *tsk = current; 238 234 239 235 /* Return not checked because double check cannot be ignored */ 240 - notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 236 + notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 241 237 242 238 tsk->thread.error_code = error_code; 243 - tsk->thread.trap_no = 8; 239 + tsk->thread.trap_no = X86_TRAP_DF; 244 240 245 241 /* 246 242 * This is always a kernel trap and never fixable (and thus must ··· 268 264 goto gp_in_kernel; 269 265 270 266 tsk->thread.error_code = error_code; 271 - tsk->thread.trap_no = 13; 267 + tsk->thread.trap_no = X86_TRAP_GP; 272 268 273 269 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 274 270 printk_ratelimit()) { ··· 295 291 return; 296 292 297 293 tsk->thread.error_code = error_code; 298 - tsk->thread.trap_no = 13; 299 - if (notify_die(DIE_GPF, "general protection fault", regs, 300 - error_code, 13, SIGSEGV) == NOTIFY_STOP) 294 + tsk->thread.trap_no = X86_TRAP_GP; 295 + if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 296 + X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) 301 297 return; 302 298 die("general protection fault", regs, error_code); 303 299 } ··· 306 302 dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 307 303 { 308 304 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 309 - if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 310 - == NOTIFY_STOP) 305 + if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 306 + SIGTRAP) == NOTIFY_STOP) 311 307 return; 312 308 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 313 309 314 - if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 315 - == NOTIFY_STOP) 310 + if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 311 + SIGTRAP) == NOTIFY_STOP) 316 312 return; 317 313 318 314 /* ··· 321 317 */ 322 318 debug_stack_usage_inc(); 323 319 preempt_conditional_sti(regs); 324 - do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 320 + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); 325 321 preempt_conditional_cli(regs); 326 322 debug_stack_usage_dec(); 327 323 } ··· 426 422 preempt_conditional_sti(regs); 427 423 428 424 if (regs->flags & X86_VM_MASK) { 429 - handle_vm86_trap((struct kernel_vm86_regs *) regs, 430 - error_code, 1); 425 + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 426 + X86_TRAP_DB); 431 427 preempt_conditional_cli(regs); 432 428 debug_stack_usage_dec(); 433 429 return; ··· 464 460 struct task_struct *task = current; 465 461 siginfo_t info; 466 462 unsigned short err; 467 - char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 463 + char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : 464 + "simd exception"; 468 465 469 466 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 467 return; ··· 490 485 info.si_signo = SIGFPE; 491 486 info.si_errno = 0; 492 487 info.si_addr = (void __user *)regs->ip; 493 - if (trapnr == 16) { 488 + if (trapnr == X86_TRAP_MF) { 494 489 unsigned short cwd, swd; 495 490 /* 496 491 * (~cwd & swd) will mask out exceptions that are not set to unmasked ··· 534 529 info.si_code = FPE_FLTRES; 535 530 } else { 536 531 /* 537 - * If we're using IRQ 13, or supposedly even some trap 16 538 - * implementations, it's possible we get a spurious trap... 532 + * If we're using IRQ 13, or supposedly even some trap 533 + * X86_TRAP_MF implementations, it's possible 534 + * we get a spurious trap, which is not an error. 539 535 */ 540 - return; /* Spurious trap, no error */ 536 + return; 541 537 } 542 538 force_sig_info(SIGFPE, &info, task); 543 539 } ··· 549 543 ignore_fpu_irq = 1; 550 544 #endif 551 545 552 - math_error(regs, error_code, 16); 546 + math_error(regs, error_code, X86_TRAP_MF); 553 547 } 554 548 555 549 dotraplinkage void 556 550 do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 557 551 { 558 - math_error(regs, error_code, 19); 552 + math_error(regs, error_code, X86_TRAP_XF); 559 553 } 560 554 561 555 dotraplinkage void ··· 649 643 info.si_errno = 0; 650 644 info.si_code = ILL_BADSTK; 651 645 info.si_addr = NULL; 652 - if (notify_die(DIE_TRAP, "iret exception", 653 - regs, error_code, 32, SIGILL) == NOTIFY_STOP) 646 + if (notify_die(DIE_TRAP, "iret exception", regs, error_code, 647 + X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) 654 648 return; 655 - do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 649 + do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, 650 + &info); 656 651 } 657 652 #endif 658 653 659 654 /* Set of traps needed for early debugging. */ 660 655 void __init early_trap_init(void) 661 656 { 662 - set_intr_gate_ist(1, &debug, DEBUG_STACK); 657 + set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 663 658 /* int3 can be called from all */ 664 - set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 - set_intr_gate(14, &page_fault); 659 + set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 660 + set_intr_gate(X86_TRAP_PF, &page_fault); 666 661 load_idt(&idt_descr); 667 662 } 668 663 ··· 679 672 early_iounmap(p, 4); 680 673 #endif 681 674 682 - set_intr_gate(0, &divide_error); 683 - set_intr_gate_ist(2, &nmi, NMI_STACK); 675 + set_intr_gate(X86_TRAP_DE, &divide_error); 676 + set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); 684 677 /* int4 can be called from all */ 685 - set_system_intr_gate(4, &overflow); 686 - set_intr_gate(5, &bounds); 687 - set_intr_gate(6, &invalid_op); 688 - set_intr_gate(7, &device_not_available); 678 + set_system_intr_gate(X86_TRAP_OF, &overflow); 679 + set_intr_gate(X86_TRAP_BR, &bounds); 680 + set_intr_gate(X86_TRAP_UD, &invalid_op); 681 + set_intr_gate(X86_TRAP_NM, &device_not_available); 689 682 #ifdef CONFIG_X86_32 690 - set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 683 + set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); 691 684 #else 692 - set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 685 + set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); 693 686 #endif 694 - set_intr_gate(9, &coprocessor_segment_overrun); 695 - set_intr_gate(10, &invalid_TSS); 696 - set_intr_gate(11, &segment_not_present); 697 - set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 698 - set_intr_gate(13, &general_protection); 699 - set_intr_gate(15, &spurious_interrupt_bug); 700 - set_intr_gate(16, &coprocessor_error); 701 - set_intr_gate(17, &alignment_check); 687 + set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); 688 + set_intr_gate(X86_TRAP_TS, &invalid_TSS); 689 + set_intr_gate(X86_TRAP_NP, &segment_not_present); 690 + set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 691 + set_intr_gate(X86_TRAP_GP, &general_protection); 692 + set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); 693 + set_intr_gate(X86_TRAP_MF, &coprocessor_error); 694 + set_intr_gate(X86_TRAP_AC, &alignment_check); 702 695 #ifdef CONFIG_X86_MCE 703 - set_intr_gate_ist(18, &machine_check, MCE_STACK); 696 + set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); 704 697 #endif 705 - set_intr_gate(19, &simd_coprocessor_error); 698 + set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); 706 699 707 700 /* Reserve all the builtin and the syscall vector: */ 708 701 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) ··· 727 720 728 721 #ifdef CONFIG_X86_64 729 722 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 730 - set_nmi_gate(1, &debug); 731 - set_nmi_gate(3, &int3); 723 + set_nmi_gate(X86_TRAP_DB, &debug); 724 + set_nmi_gate(X86_TRAP_BP, &int3); 732 725 #endif 733 726 }