Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

+1695 -1382
+25 -6
arch/ia64/hp/sim/boot/boot_head.S
··· 4 4 */ 5 5 6 6 #include <asm/asmmacro.h> 7 + #include <asm/pal.h> 7 8 8 9 .bss 9 10 .align 16 ··· 50 49 br.sptk.few b7 51 50 END(jmp_to_kernel) 52 51 53 - 52 + /* 53 + * r28 contains the index of the PAL function 54 + * r29--31 the args 55 + * Return values in ret0--3 (r8--11) 56 + */ 54 57 GLOBAL_ENTRY(pal_emulator_static) 55 58 mov r8=-1 56 59 mov r9=256 ··· 67 62 cmp.gtu p6,p7=r9,r28 68 63 (p6) br.cond.sptk.few stacked 69 64 ;; 70 - static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */ 65 + static: cmp.eq p6,p7=PAL_PTCE_INFO,r28 71 66 (p7) br.cond.sptk.few 1f 72 67 ;; 73 68 mov r8=0 /* status = 0 */ ··· 75 70 movl r10=0x0000000200000003 /* count[0], count[1] */ 76 71 movl r11=0x1000000000002000 /* stride[0], stride[1] */ 77 72 br.cond.sptk.few rp 78 - 1: cmp.eq p6,p7=14,r28 /* PAL_FREQ_RATIOS */ 73 + 1: cmp.eq p6,p7=PAL_FREQ_RATIOS,r28 79 74 (p7) br.cond.sptk.few 1f 80 75 mov r8=0 /* status = 0 */ 81 76 movl r9 =0x100000064 /* proc_ratio (1/100) */ 82 77 movl r10=0x100000100 /* bus_ratio<<32 (1/256) */ 83 78 movl r11=0x100000064 /* itc_ratio<<32 (1/100) */ 84 79 ;; 85 - 1: cmp.eq p6,p7=19,r28 /* PAL_RSE_INFO */ 80 + 1: cmp.eq p6,p7=PAL_RSE_INFO,r28 86 81 (p7) br.cond.sptk.few 1f 87 82 mov r8=0 /* status = 0 */ 88 83 mov r9=96 /* num phys stacked */ 89 84 mov r10=0 /* hints */ 90 85 mov r11=0 91 86 br.cond.sptk.few rp 92 - 1: cmp.eq p6,p7=1,r28 /* PAL_CACHE_FLUSH */ 87 + 1: cmp.eq p6,p7=PAL_CACHE_FLUSH,r28 /* PAL_CACHE_FLUSH */ 93 88 (p7) br.cond.sptk.few 1f 94 89 mov r9=ar.lc 95 90 movl r8=524288 /* flush 512k million cache lines (16MB) */ ··· 107 102 mov ar.lc=r9 108 103 mov r8=r0 109 104 ;; 110 - 1: cmp.eq p6,p7=15,r28 /* PAL_PERF_MON_INFO */ 105 + 1: cmp.eq p6,p7=PAL_PERF_MON_INFO,r28 111 106 (p7) br.cond.sptk.few 1f 112 107 mov r8=0 /* status = 0 */ 113 108 movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */ ··· 142 137 ;; 143 138 st8 [r29]=r0,16 /* clear remaining bits */ 144 139 st8 [r18]=r0,16 /* clear remaining bits */ 140 + ;; 141 + 1: cmp.eq p6,p7=PAL_VM_SUMMARY,r28 142 + (p7) br.cond.sptk.few 1f 143 + mov r8=0 /* status = 0 */ 144 + movl r9=0x2044040020F1865 /* num_tc_levels=2, num_unique_tcs=4 */ 145 + /* max_itr_entry=64, max_dtr_entry=64 */ 146 + /* hash_tag_id=2, max_pkr=15 */ 147 + /* key_size=24, phys_add_size=50, vw=1 */ 148 + movl r10=0x183C /* rid_size=24, impl_va_msb=60 */ 149 + ;; 150 + 1: cmp.eq p6,p7=PAL_MEM_ATTRIB,r28 151 + (p7) br.cond.sptk.few 1f 152 + mov r8=0 /* status = 0 */ 153 + mov r9=0x80|0x01 /* NatPage|WB */ 145 154 ;; 146 155 1: br.cond.sptk.few rp 147 156 stacked:
+32 -8
arch/ia64/kernel/asm-offsets.c
··· 211 211 #endif 212 212 213 213 BLANK(); 214 - DEFINE(IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, 215 - offsetof (struct ia64_mca_cpu, proc_state_dump)); 216 - DEFINE(IA64_MCA_CPU_STACK_OFFSET, 217 - offsetof (struct ia64_mca_cpu, stack)); 218 - DEFINE(IA64_MCA_CPU_STACKFRAME_OFFSET, 219 - offsetof (struct ia64_mca_cpu, stackframe)); 220 - DEFINE(IA64_MCA_CPU_RBSTORE_OFFSET, 221 - offsetof (struct ia64_mca_cpu, rbstore)); 214 + DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET, 215 + offsetof (struct ia64_mca_cpu, mca_stack)); 222 216 DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, 223 217 offsetof (struct ia64_mca_cpu, init_stack)); 224 218 BLANK(); 219 + DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET, 220 + offsetof (struct ia64_sal_os_state, sal_ra)); 221 + DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, 222 + offsetof (struct ia64_sal_os_state, os_gp)); 223 + DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, 224 + offsetof (struct ia64_sal_os_state, pal_min_state)); 225 + DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, 226 + offsetof (struct ia64_sal_os_state, proc_state_param)); 227 + DEFINE(IA64_SAL_OS_STATE_SIZE, 228 + sizeof (struct ia64_sal_os_state)); 229 + DEFINE(IA64_PMSA_GR_OFFSET, 230 + offsetof (struct pal_min_state_area_s, pmsa_gr)); 231 + DEFINE(IA64_PMSA_BANK1_GR_OFFSET, 232 + offsetof (struct pal_min_state_area_s, pmsa_bank1_gr)); 233 + DEFINE(IA64_PMSA_PR_OFFSET, 234 + offsetof (struct pal_min_state_area_s, pmsa_pr)); 235 + DEFINE(IA64_PMSA_BR0_OFFSET, 236 + offsetof (struct pal_min_state_area_s, pmsa_br0)); 237 + DEFINE(IA64_PMSA_RSC_OFFSET, 238 + offsetof (struct pal_min_state_area_s, pmsa_rsc)); 239 + DEFINE(IA64_PMSA_IIP_OFFSET, 240 + offsetof (struct pal_min_state_area_s, pmsa_iip)); 241 + DEFINE(IA64_PMSA_IPSR_OFFSET, 242 + offsetof (struct pal_min_state_area_s, pmsa_ipsr)); 243 + DEFINE(IA64_PMSA_IFS_OFFSET, 244 + offsetof (struct pal_min_state_area_s, pmsa_ifs)); 245 + DEFINE(IA64_PMSA_XIP_OFFSET, 246 + offsetof (struct pal_min_state_area_s, pmsa_xip)); 247 + BLANK(); 248 + 225 249 /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ 226 250 DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); 227 251 DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
-1
arch/ia64/kernel/ivt.S
··· 69 69 # define DBG_FAULT(i) 70 70 #endif 71 71 72 - #define MINSTATE_VIRT /* needed by minstate.h */ 73 72 #include "minstate.h" 74 73 75 74 #define FAULT(n) \
+507 -318
arch/ia64/kernel/mca.c
··· 48 48 * Delete dead variables and functions. 49 49 * Reorder to remove the need for forward declarations and to consolidate 50 50 * related code. 51 + * 52 + * 2005-08-12 Keith Owens <kaos@sgi.com> 53 + * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. 51 54 */ 52 55 #include <linux/config.h> 53 56 #include <linux/types.h> ··· 80 77 #include <asm/irq.h> 81 78 #include <asm/hw_irq.h> 82 79 80 + #include "entry.h" 81 + 83 82 #if defined(IA64_MCA_DEBUG_INFO) 84 83 # define IA64_MCA_DEBUG(fmt...) printk(fmt) 85 84 #else ··· 89 84 #endif 90 85 91 86 /* Used by mca_asm.S */ 92 - ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; 93 - ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; 94 - u64 ia64_mca_serialize; 87 + u32 ia64_mca_serialize; 95 88 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 96 89 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 97 90 DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ ··· 98 95 unsigned long __per_cpu_mca[NR_CPUS]; 99 96 100 97 /* In mca_asm.S */ 101 - extern void ia64_monarch_init_handler (void); 102 - extern void ia64_slave_init_handler (void); 98 + extern void ia64_os_init_dispatch_monarch (void); 99 + extern void ia64_os_init_dispatch_slave (void); 100 + 101 + static int monarch_cpu = -1; 103 102 104 103 static ia64_mc_info_t ia64_mc_info; 105 104 ··· 239 234 * This function retrieves a specified error record type from SAL 240 235 * and wakes up any processes waiting for error records. 241 236 * 242 - * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) 237 + * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) 238 + * FIXME: remove MCA and irq_safe. 243 239 */ 244 240 static void 245 241 ia64_mca_log_sal_error_record(int sal_info_type) ··· 248 242 u8 *buffer; 249 243 sal_log_record_header_t *rh; 250 244 u64 size; 251 - int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; 245 + int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; 252 246 #ifdef IA64_MCA_DEBUG_INFO 253 247 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; 254 248 #endif ··· 335 329 } 336 330 337 331 #endif /* CONFIG_ACPI */ 338 - 339 - static void 340 - show_min_state (pal_min_state_area_t *minstate) 341 - { 342 - u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; 343 - u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; 344 - 345 - printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); 346 - printk("pr\t\t%016lx\n", minstate->pmsa_pr); 347 - printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); 348 - printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); 349 - printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); 350 - printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); 351 - printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); 352 - printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); 353 - printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); 354 - printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); 355 - printk("b1\t\t%016lx ", minstate->pmsa_br1); 356 - print_symbol("%s\n", minstate->pmsa_br1); 357 - 358 - printk("\nstatic registers r0-r15:\n"); 359 - printk(" r0- 3 %016lx %016lx %016lx %016lx\n", 360 - 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); 361 - printk(" r4- 7 %016lx %016lx %016lx %016lx\n", 362 - minstate->pmsa_gr[3], minstate->pmsa_gr[4], 363 - minstate->pmsa_gr[5], minstate->pmsa_gr[6]); 364 - printk(" r8-11 %016lx %016lx %016lx %016lx\n", 365 - minstate->pmsa_gr[7], minstate->pmsa_gr[8], 366 - minstate->pmsa_gr[9], minstate->pmsa_gr[10]); 367 - printk("r12-15 %016lx %016lx %016lx %016lx\n", 368 - minstate->pmsa_gr[11], minstate->pmsa_gr[12], 369 - minstate->pmsa_gr[13], minstate->pmsa_gr[14]); 370 - 371 - printk("\nbank 0:\n"); 372 - printk("r16-19 %016lx %016lx %016lx %016lx\n", 373 - minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], 374 - minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); 375 - printk("r20-23 %016lx %016lx %016lx %016lx\n", 376 - minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], 377 - minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); 378 - printk("r24-27 %016lx %016lx %016lx %016lx\n", 379 - minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], 380 - minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); 381 - printk("r28-31 %016lx %016lx %016lx %016lx\n", 382 - minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], 383 - minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); 384 - 385 - printk("\nbank 1:\n"); 386 - printk("r16-19 %016lx %016lx %016lx %016lx\n", 387 - minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], 388 - minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); 389 - printk("r20-23 %016lx %016lx %016lx %016lx\n", 390 - minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], 391 - minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); 392 - printk("r24-27 %016lx %016lx %016lx %016lx\n", 393 - minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], 394 - minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); 395 - printk("r28-31 %016lx %016lx %016lx %016lx\n", 396 - minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], 397 - minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); 398 - } 399 - 400 - static void 401 - fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) 402 - { 403 - u64 *dst_banked, *src_banked, bit, shift, nat_bits; 404 - int i; 405 - 406 - /* 407 - * First, update the pt-regs and switch-stack structures with the contents stored 408 - * in the min-state area: 409 - */ 410 - if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { 411 - pt->cr_ipsr = ms->pmsa_xpsr; 412 - pt->cr_iip = ms->pmsa_xip; 413 - pt->cr_ifs = ms->pmsa_xfs; 414 - } else { 415 - pt->cr_ipsr = ms->pmsa_ipsr; 416 - pt->cr_iip = ms->pmsa_iip; 417 - pt->cr_ifs = ms->pmsa_ifs; 418 - } 419 - pt->ar_rsc = ms->pmsa_rsc; 420 - pt->pr = ms->pmsa_pr; 421 - pt->r1 = ms->pmsa_gr[0]; 422 - pt->r2 = ms->pmsa_gr[1]; 423 - pt->r3 = ms->pmsa_gr[2]; 424 - sw->r4 = ms->pmsa_gr[3]; 425 - sw->r5 = ms->pmsa_gr[4]; 426 - sw->r6 = ms->pmsa_gr[5]; 427 - sw->r7 = ms->pmsa_gr[6]; 428 - pt->r8 = ms->pmsa_gr[7]; 429 - pt->r9 = ms->pmsa_gr[8]; 430 - pt->r10 = ms->pmsa_gr[9]; 431 - pt->r11 = ms->pmsa_gr[10]; 432 - pt->r12 = ms->pmsa_gr[11]; 433 - pt->r13 = ms->pmsa_gr[12]; 434 - pt->r14 = ms->pmsa_gr[13]; 435 - pt->r15 = ms->pmsa_gr[14]; 436 - dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ 437 - src_banked = ms->pmsa_bank1_gr; 438 - for (i = 0; i < 16; ++i) 439 - dst_banked[i] = src_banked[i]; 440 - pt->b0 = ms->pmsa_br0; 441 - sw->b1 = ms->pmsa_br1; 442 - 443 - /* construct the NaT bits for the pt-regs structure: */ 444 - # define PUT_NAT_BIT(dst, addr) \ 445 - do { \ 446 - bit = nat_bits & 1; nat_bits >>= 1; \ 447 - shift = ((unsigned long) addr >> 3) & 0x3f; \ 448 - dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ 449 - } while (0) 450 - 451 - /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ 452 - shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; 453 - nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); 454 - 455 - PUT_NAT_BIT(sw->caller_unat, &pt->r1); 456 - PUT_NAT_BIT(sw->caller_unat, &pt->r2); 457 - PUT_NAT_BIT(sw->caller_unat, &pt->r3); 458 - PUT_NAT_BIT(sw->ar_unat, &sw->r4); 459 - PUT_NAT_BIT(sw->ar_unat, &sw->r5); 460 - PUT_NAT_BIT(sw->ar_unat, &sw->r6); 461 - PUT_NAT_BIT(sw->ar_unat, &sw->r7); 462 - PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); 463 - PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); 464 - PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); 465 - PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); 466 - nat_bits >>= 16; /* skip over bank0 NaT bits */ 467 - PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); 468 - PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); 469 - PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); 470 - PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); 471 - PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); 472 - PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); 473 - PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); 474 - PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); 475 - } 476 - 477 - static void 478 - init_handler_platform (pal_min_state_area_t *ms, 479 - struct pt_regs *pt, struct switch_stack *sw) 480 - { 481 - struct unw_frame_info info; 482 - 483 - /* if a kernel debugger is available call it here else just dump the registers */ 484 - 485 - /* 486 - * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 487 - * generated via the BMC's command-line interface, but since the console is on the 488 - * same serial line, the user will need some time to switch out of the BMC before 489 - * the dump begins. 490 - */ 491 - printk("Delaying for 5 seconds...\n"); 492 - udelay(5*1000000); 493 - show_min_state(ms); 494 - 495 - printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); 496 - fetch_min_state(ms, pt, sw); 497 - unw_init_from_interruption(&info, current, pt, sw); 498 - ia64_do_show_stack(&info, NULL); 499 - 500 - if (read_trylock(&tasklist_lock)) { 501 - struct task_struct *g, *t; 502 - do_each_thread (g, t) { 503 - if (t == current) 504 - continue; 505 - 506 - printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); 507 - show_stack(t, NULL); 508 - } while_each_thread (g, t); 509 - } 510 - 511 - printk("\nINIT dump complete. Please reboot now.\n"); 512 - while (1); /* hang city if no debugger */ 513 - } 514 332 515 333 #ifdef CONFIG_ACPI 516 334 /* ··· 478 648 } 479 649 480 650 /* 481 - * ia64_mca_wakeup_ipi_wait 482 - * 483 - * Wait for the inter-cpu interrupt to be sent by the 484 - * monarch processor once it is done with handling the 485 - * MCA. 486 - * 487 - * Inputs : None 488 - * Outputs : None 489 - */ 490 - static void 491 - ia64_mca_wakeup_ipi_wait(void) 492 - { 493 - int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); 494 - int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); 495 - u64 irr = 0; 496 - 497 - do { 498 - switch(irr_num) { 499 - case 0: 500 - irr = ia64_getreg(_IA64_REG_CR_IRR0); 501 - break; 502 - case 1: 503 - irr = ia64_getreg(_IA64_REG_CR_IRR1); 504 - break; 505 - case 2: 506 - irr = ia64_getreg(_IA64_REG_CR_IRR2); 507 - break; 508 - case 3: 509 - irr = ia64_getreg(_IA64_REG_CR_IRR3); 510 - break; 511 - } 512 - cpu_relax(); 513 - } while (!(irr & (1UL << irr_bit))) ; 514 - } 515 - 516 - /* 517 651 * ia64_mca_wakeup 518 652 * 519 653 * Send an inter-cpu interrupt to wake-up a particular cpu ··· 542 748 */ 543 749 ia64_sal_mc_rendez(); 544 750 545 - /* Wait for the wakeup IPI from the monarch 546 - * This waiting is done by polling on the wakeup-interrupt 547 - * vector bit in the processor's IRRs 548 - */ 549 - ia64_mca_wakeup_ipi_wait(); 751 + /* Wait for the monarch cpu to exit. */ 752 + while (monarch_cpu != -1) 753 + cpu_relax(); /* spin until monarch leaves */ 550 754 551 755 /* Enable all interrupts */ 552 756 local_irq_restore(flags); ··· 572 780 return IRQ_HANDLED; 573 781 } 574 782 575 - /* 576 - * ia64_return_to_sal_check 577 - * 578 - * This is function called before going back from the OS_MCA handler 579 - * to the OS_MCA dispatch code which finally takes the control back 580 - * to the SAL. 581 - * The main purpose of this routine is to setup the OS_MCA to SAL 582 - * return state which can be used by the OS_MCA dispatch code 583 - * just before going back to SAL. 584 - * 585 - * Inputs : None 586 - * Outputs : None 587 - */ 588 - 589 - static void 590 - ia64_return_to_sal_check(int recover) 591 - { 592 - 593 - /* Copy over some relevant stuff from the sal_to_os_mca_handoff 594 - * so that it can be used at the time of os_mca_to_sal_handoff 595 - */ 596 - ia64_os_to_sal_handoff_state.imots_sal_gp = 597 - ia64_sal_to_os_handoff_state.imsto_sal_gp; 598 - 599 - ia64_os_to_sal_handoff_state.imots_sal_check_ra = 600 - ia64_sal_to_os_handoff_state.imsto_sal_check_ra; 601 - 602 - if (recover) 603 - ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; 604 - else 605 - ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; 606 - 607 - /* Default = tell SAL to return to same context */ 608 - ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT; 609 - 610 - ia64_os_to_sal_handoff_state.imots_new_min_state = 611 - (u64 *)ia64_sal_to_os_handoff_state.pal_min_state; 612 - 613 - } 614 - 615 783 /* Function pointer for extra MCA recovery */ 616 784 int (*ia64_mca_ucmc_extension) 617 - (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) 785 + (void*,struct ia64_sal_os_state*) 618 786 = NULL; 619 787 620 788 int 621 - ia64_reg_MCA_extension(void *fn) 789 + ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) 622 790 { 623 791 if (ia64_mca_ucmc_extension) 624 792 return 1; ··· 597 845 EXPORT_SYMBOL(ia64_reg_MCA_extension); 598 846 EXPORT_SYMBOL(ia64_unreg_MCA_extension); 599 847 848 + 849 + static inline void 850 + copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat) 851 + { 852 + u64 fslot, tslot, nat; 853 + *tr = *fr; 854 + fslot = ((unsigned long)fr >> 3) & 63; 855 + tslot = ((unsigned long)tr >> 3) & 63; 856 + *tnat &= ~(1UL << tslot); 857 + nat = (fnat >> fslot) & 1; 858 + *tnat |= (nat << tslot); 859 + } 860 + 861 + /* On entry to this routine, we are running on the per cpu stack, see 862 + * mca_asm.h. The original stack has not been touched by this event. Some of 863 + * the original stack's registers will be in the RBS on this stack. This stack 864 + * also contains a partial pt_regs and switch_stack, the rest of the data is in 865 + * PAL minstate. 866 + * 867 + * The first thing to do is modify the original stack to look like a blocked 868 + * task so we can run backtrace on the original task. Also mark the per cpu 869 + * stack as current to ensure that we use the correct task state, it also means 870 + * that we can do backtrace on the MCA/INIT handler code itself. 871 + */ 872 + 873 + static task_t * 874 + ia64_mca_modify_original_stack(struct pt_regs *regs, 875 + const struct switch_stack *sw, 876 + struct ia64_sal_os_state *sos, 877 + const char *type) 878 + { 879 + char *p, comm[sizeof(current->comm)]; 880 + ia64_va va; 881 + extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ 882 + const pal_min_state_area_t *ms = sos->pal_min_state; 883 + task_t *previous_current; 884 + struct pt_regs *old_regs; 885 + struct switch_stack *old_sw; 886 + unsigned size = sizeof(struct pt_regs) + 887 + sizeof(struct switch_stack) + 16; 888 + u64 *old_bspstore, *old_bsp; 889 + u64 *new_bspstore, *new_bsp; 890 + u64 old_unat, old_rnat, new_rnat, nat; 891 + u64 slots, loadrs = regs->loadrs; 892 + u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; 893 + u64 ar_bspstore = regs->ar_bspstore; 894 + u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); 895 + const u64 *bank; 896 + const char *msg; 897 + int cpu = smp_processor_id(); 898 + 899 + previous_current = curr_task(cpu); 900 + set_curr_task(cpu, current); 901 + if ((p = strchr(current->comm, ' '))) 902 + *p = '\0'; 903 + 904 + /* Best effort attempt to cope with MCA/INIT delivered while in 905 + * physical mode. 906 + */ 907 + regs->cr_ipsr = ms->pmsa_ipsr; 908 + if (ia64_psr(regs)->dt == 0) { 909 + va.l = r12; 910 + if (va.f.reg == 0) { 911 + va.f.reg = 7; 912 + r12 = va.l; 913 + } 914 + va.l = r13; 915 + if (va.f.reg == 0) { 916 + va.f.reg = 7; 917 + r13 = va.l; 918 + } 919 + } 920 + if (ia64_psr(regs)->rt == 0) { 921 + va.l = ar_bspstore; 922 + if (va.f.reg == 0) { 923 + va.f.reg = 7; 924 + ar_bspstore = va.l; 925 + } 926 + va.l = ar_bsp; 927 + if (va.f.reg == 0) { 928 + va.f.reg = 7; 929 + ar_bsp = va.l; 930 + } 931 + } 932 + 933 + /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers 934 + * have been copied to the old stack, the old stack may fail the 935 + * validation tests below. So ia64_old_stack() must restore the dirty 936 + * registers from the new stack. The old and new bspstore probably 937 + * have different alignments, so loadrs calculated on the old bsp 938 + * cannot be used to restore from the new bsp. Calculate a suitable 939 + * loadrs for the new stack and save it in the new pt_regs, where 940 + * ia64_old_stack() can get it. 941 + */ 942 + old_bspstore = (u64 *)ar_bspstore; 943 + old_bsp = (u64 *)ar_bsp; 944 + slots = ia64_rse_num_regs(old_bspstore, old_bsp); 945 + new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET); 946 + new_bsp = ia64_rse_skip_regs(new_bspstore, slots); 947 + regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; 948 + 949 + /* Verify the previous stack state before we change it */ 950 + if (user_mode(regs)) { 951 + msg = "occurred in user space"; 952 + goto no_mod; 953 + } 954 + if (r13 != sos->prev_IA64_KR_CURRENT) { 955 + msg = "inconsistent previous current and r13"; 956 + goto no_mod; 957 + } 958 + if ((r12 - r13) >= KERNEL_STACK_SIZE) { 959 + msg = "inconsistent r12 and r13"; 960 + goto no_mod; 961 + } 962 + if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { 963 + msg = "inconsistent ar.bspstore and r13"; 964 + goto no_mod; 965 + } 966 + va.p = old_bspstore; 967 + if (va.f.reg < 5) { 968 + msg = "old_bspstore is in the wrong region"; 969 + goto no_mod; 970 + } 971 + if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { 972 + msg = "inconsistent ar.bsp and r13"; 973 + goto no_mod; 974 + } 975 + size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; 976 + if (ar_bspstore + size > r12) { 977 + msg = "no room for blocked state"; 978 + goto no_mod; 979 + } 980 + 981 + /* Change the comm field on the MCA/INT task to include the pid that 982 + * was interrupted, it makes for easier debugging. If that pid was 0 983 + * (swapper or nested MCA/INIT) then use the start of the previous comm 984 + * field suffixed with its cpu. 985 + */ 986 + if (previous_current->pid) 987 + snprintf(comm, sizeof(comm), "%s %d", 988 + current->comm, previous_current->pid); 989 + else { 990 + int l; 991 + if ((p = strchr(previous_current->comm, ' '))) 992 + l = p - previous_current->comm; 993 + else 994 + l = strlen(previous_current->comm); 995 + snprintf(comm, sizeof(comm), "%s %*s %d", 996 + current->comm, l, previous_current->comm, 997 + previous_current->thread_info->cpu); 998 + } 999 + memcpy(current->comm, comm, sizeof(current->comm)); 1000 + 1001 + /* Make the original task look blocked. First stack a struct pt_regs, 1002 + * describing the state at the time of interrupt. mca_asm.S built a 1003 + * partial pt_regs, copy it and fill in the blanks using minstate. 1004 + */ 1005 + p = (char *)r12 - sizeof(*regs); 1006 + old_regs = (struct pt_regs *)p; 1007 + memcpy(old_regs, regs, sizeof(*regs)); 1008 + /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use 1009 + * pmsa_{xip,xpsr,xfs} 1010 + */ 1011 + if (ia64_psr(regs)->ic) { 1012 + old_regs->cr_iip = ms->pmsa_iip; 1013 + old_regs->cr_ipsr = ms->pmsa_ipsr; 1014 + old_regs->cr_ifs = ms->pmsa_ifs; 1015 + } else { 1016 + old_regs->cr_iip = ms->pmsa_xip; 1017 + old_regs->cr_ipsr = ms->pmsa_xpsr; 1018 + old_regs->cr_ifs = ms->pmsa_xfs; 1019 + } 1020 + old_regs->pr = ms->pmsa_pr; 1021 + old_regs->b0 = ms->pmsa_br0; 1022 + old_regs->loadrs = loadrs; 1023 + old_regs->ar_rsc = ms->pmsa_rsc; 1024 + old_unat = old_regs->ar_unat; 1025 + copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat); 1026 + copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat); 1027 + copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat); 1028 + copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat); 1029 + copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat); 1030 + copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat); 1031 + copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat); 1032 + copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat); 1033 + copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat); 1034 + copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat); 1035 + copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat); 1036 + if (ia64_psr(old_regs)->bn) 1037 + bank = ms->pmsa_bank1_gr; 1038 + else 1039 + bank = ms->pmsa_bank0_gr; 1040 + copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat); 1041 + copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat); 1042 + copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat); 1043 + copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat); 1044 + copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat); 1045 + copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat); 1046 + copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat); 1047 + copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat); 1048 + copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat); 1049 + copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat); 1050 + copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat); 1051 + copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat); 1052 + copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat); 1053 + copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat); 1054 + copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat); 1055 + copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat); 1056 + 1057 + /* Next stack a struct switch_stack. mca_asm.S built a partial 1058 + * switch_stack, copy it and fill in the blanks using pt_regs and 1059 + * minstate. 1060 + * 1061 + * In the synthesized switch_stack, b0 points to ia64_leave_kernel, 1062 + * ar.pfs is set to 0. 1063 + * 1064 + * unwind.c::unw_unwind() does special processing for interrupt frames. 1065 + * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate 1066 + * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not 1067 + * that this is documented, of course. Set PRED_NON_SYSCALL in the 1068 + * switch_stack on the original stack so it will unwind correctly when 1069 + * unwind.c reads pt_regs. 1070 + * 1071 + * thread.ksp is updated to point to the synthesized switch_stack. 1072 + */ 1073 + p -= sizeof(struct switch_stack); 1074 + old_sw = (struct switch_stack *)p; 1075 + memcpy(old_sw, sw, sizeof(*sw)); 1076 + old_sw->caller_unat = old_unat; 1077 + old_sw->ar_fpsr = old_regs->ar_fpsr; 1078 + copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); 1079 + copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); 1080 + copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); 1081 + copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); 1082 + old_sw->b0 = (u64)ia64_leave_kernel; 1083 + old_sw->b1 = ms->pmsa_br1; 1084 + old_sw->ar_pfs = 0; 1085 + old_sw->ar_unat = old_unat; 1086 + old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); 1087 + previous_current->thread.ksp = (u64)p - 16; 1088 + 1089 + /* Finally copy the original stack's registers back to its RBS. 1090 + * Registers from ar.bspstore through ar.bsp at the time of the event 1091 + * are in the current RBS, copy them back to the original stack. The 1092 + * copy must be done register by register because the original bspstore 1093 + * and the current one have different alignments, so the saved RNAT 1094 + * data occurs at different places. 1095 + * 1096 + * mca_asm does cover, so the old_bsp already includes all registers at 1097 + * the time of MCA/INIT. It also does flushrs, so all registers before 1098 + * this function have been written to backing store on the MCA/INIT 1099 + * stack. 1100 + */ 1101 + new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); 1102 + old_rnat = regs->ar_rnat; 1103 + while (slots--) { 1104 + if (ia64_rse_is_rnat_slot(new_bspstore)) { 1105 + new_rnat = ia64_get_rnat(new_bspstore++); 1106 + } 1107 + if (ia64_rse_is_rnat_slot(old_bspstore)) { 1108 + *old_bspstore++ = old_rnat; 1109 + old_rnat = 0; 1110 + } 1111 + nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; 1112 + old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); 1113 + old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); 1114 + *old_bspstore++ = *new_bspstore++; 1115 + } 1116 + old_sw->ar_bspstore = (unsigned long)old_bspstore; 1117 + old_sw->ar_rnat = old_rnat; 1118 + 1119 + sos->prev_task = previous_current; 1120 + return previous_current; 1121 + 1122 + no_mod: 1123 + printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1124 + smp_processor_id(), type, msg); 1125 + return previous_current; 1126 + } 1127 + 1128 + /* The monarch/slave interaction is based on monarch_cpu and requires that all 1129 + * slaves have entered rendezvous before the monarch leaves. If any cpu has 1130 + * not entered rendezvous yet then wait a bit. The assumption is that any 1131 + * slave that has not rendezvoused after a reasonable time is never going to do 1132 + * so. In this context, slave includes cpus that respond to the MCA rendezvous 1133 + * interrupt, as well as cpus that receive the INIT slave event. 1134 + */ 1135 + 1136 + static void 1137 + ia64_wait_for_slaves(int monarch) 1138 + { 1139 + int c, wait = 0; 1140 + for_each_online_cpu(c) { 1141 + if (c == monarch) 1142 + continue; 1143 + if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { 1144 + udelay(1000); /* short wait first */ 1145 + wait = 1; 1146 + break; 1147 + } 1148 + } 1149 + if (!wait) 1150 + return; 1151 + for_each_online_cpu(c) { 1152 + if (c == monarch) 1153 + continue; 1154 + if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { 1155 + udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */ 1156 + break; 1157 + } 1158 + } 1159 + } 1160 + 600 1161 /* 601 - * ia64_mca_ucmc_handler 1162 + * ia64_mca_handler 602 1163 * 603 1164 * This is uncorrectable machine check handler called from OS_MCA 604 1165 * dispatch code which is in turn called from SAL_CHECK(). ··· 922 857 * further MCA logging is enabled by clearing logs. 923 858 * Monarch also has the duty of sending wakeup-IPIs to pull the 924 859 * slave processors out of rendezvous spinloop. 925 - * 926 - * Inputs : None 927 - * Outputs : None 928 860 */ 929 861 void 930 - ia64_mca_ucmc_handler(void) 862 + ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, 863 + struct ia64_sal_os_state *sos) 931 864 { 932 865 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 933 - &ia64_sal_to_os_handoff_state.proc_state_param; 934 - int recover; 866 + &sos->proc_state_param; 867 + int recover, cpu = smp_processor_id(); 868 + task_t *previous_current; 869 + 870 + oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 871 + previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); 872 + monarch_cpu = cpu; 873 + ia64_wait_for_slaves(cpu); 874 + 875 + /* Wakeup all the processors which are spinning in the rendezvous loop. 876 + * They will leave SAL, then spin in the OS with interrupts disabled 877 + * until this monarch cpu leaves the MCA handler. That gets control 878 + * back to the OS so we can backtrace the other cpus, backtrace when 879 + * spinning in SAL does not work. 880 + */ 881 + ia64_mca_wakeup_all(); 935 882 936 883 /* Get the MCA error record and log it */ 937 884 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); ··· 951 874 /* TLB error is only exist in this SAL error record */ 952 875 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) 953 876 /* other error recovery */ 954 - || (ia64_mca_ucmc_extension 877 + || (ia64_mca_ucmc_extension 955 878 && ia64_mca_ucmc_extension( 956 879 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), 957 - &ia64_sal_to_os_handoff_state, 958 - &ia64_os_to_sal_handoff_state)); 880 + sos)); 959 881 960 882 if (recover) { 961 883 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); 962 884 rh->severity = sal_log_severity_corrected; 963 885 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 886 + sos->os_status = IA64_MCA_CORRECTED; 964 887 } 965 - /* 966 - * Wakeup all the processors which are spinning in the rendezvous 967 - * loop. 968 - */ 969 - ia64_mca_wakeup_all(); 970 888 971 - /* Return to SAL */ 972 - ia64_return_to_sal_check(recover); 889 + set_curr_task(cpu, previous_current); 890 + monarch_cpu = -1; 973 891 } 974 892 975 893 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); ··· 1188 1116 /* 1189 1117 * C portion of the OS INIT handler 1190 1118 * 1191 - * Called from ia64_monarch_init_handler 1119 + * Called from ia64_os_init_dispatch 1192 1120 * 1193 - * Inputs: pointer to pt_regs where processor info was saved. 1121 + * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for 1122 + * this event. This code is used for both monarch and slave INIT events, see 1123 + * sos->monarch. 1194 1124 * 1195 - * Returns: 1196 - * 0 if SAL must warm boot the System 1197 - * 1 if SAL must return to interrupted context using PAL_MC_RESUME 1198 - * 1125 + * All INIT events switch to the INIT stack and change the previous process to 1126 + * blocked status. If one of the INIT events is the monarch then we are 1127 + * probably processing the nmi button/command. Use the monarch cpu to dump all 1128 + * the processes. The slave INIT events all spin until the monarch cpu 1129 + * returns. We can also get INIT slave events for MCA, in which case the MCA 1130 + * process is the monarch. 1199 1131 */ 1200 - void 1201 - ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) 1202 - { 1203 - pal_min_state_area_t *ms; 1204 1132 1205 - oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ 1133 + void 1134 + ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, 1135 + struct ia64_sal_os_state *sos) 1136 + { 1137 + static atomic_t slaves; 1138 + static atomic_t monarchs; 1139 + task_t *previous_current; 1140 + int cpu = smp_processor_id(), c; 1141 + struct task_struct *g, *t; 1142 + 1143 + oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */ 1206 1144 console_loglevel = 15; /* make sure printks make it to console */ 1207 1145 1208 - printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", 1209 - ia64_sal_to_os_handoff_state.proc_state_param); 1146 + printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", 1147 + sos->proc_state_param, cpu, sos->monarch); 1148 + salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); 1149 + 1150 + previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); 1151 + sos->os_status = IA64_INIT_RESUME; 1152 + 1153 + /* FIXME: Workaround for broken proms that drive all INIT events as 1154 + * slaves. The last slave that enters is promoted to be a monarch. 1155 + * Remove this code in September 2006, that gives platforms a year to 1156 + * fix their proms and get their customers updated. 1157 + */ 1158 + if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { 1159 + printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", 1160 + __FUNCTION__, cpu); 1161 + atomic_dec(&slaves); 1162 + sos->monarch = 1; 1163 + } 1164 + 1165 + /* FIXME: Workaround for broken proms that drive all INIT events as 1166 + * monarchs. Second and subsequent monarchs are demoted to slaves. 1167 + * Remove this code in September 2006, that gives platforms a year to 1168 + * fix their proms and get their customers updated. 1169 + */ 1170 + if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { 1171 + printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", 1172 + __FUNCTION__, cpu); 1173 + atomic_dec(&monarchs); 1174 + sos->monarch = 0; 1175 + } 1176 + 1177 + if (!sos->monarch) { 1178 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; 1179 + while (monarch_cpu == -1) 1180 + cpu_relax(); /* spin until monarch enters */ 1181 + while (monarch_cpu != -1) 1182 + cpu_relax(); /* spin until monarch leaves */ 1183 + printk("Slave on cpu %d returning to normal service.\n", cpu); 1184 + set_curr_task(cpu, previous_current); 1185 + ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1186 + atomic_dec(&slaves); 1187 + return; 1188 + } 1189 + 1190 + monarch_cpu = cpu; 1210 1191 1211 1192 /* 1212 - * Address of minstate area provided by PAL is physical, 1213 - * uncacheable (bit 63 set). Convert to Linux virtual 1214 - * address in region 6. 1193 + * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 1194 + * generated via the BMC's command-line interface, but since the console is on the 1195 + * same serial line, the user will need some time to switch out of the BMC before 1196 + * the dump begins. 1215 1197 */ 1216 - ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); 1217 - 1218 - init_handler_platform(ms, pt, sw); /* call platform specific routines */ 1198 + printk("Delaying for 5 seconds...\n"); 1199 + udelay(5*1000000); 1200 + ia64_wait_for_slaves(cpu); 1201 + printk(KERN_ERR "Processes interrupted by INIT -"); 1202 + for_each_online_cpu(c) { 1203 + struct ia64_sal_os_state *s; 1204 + t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); 1205 + s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); 1206 + g = s->prev_task; 1207 + if (g) { 1208 + if (g->pid) 1209 + printk(" %d", g->pid); 1210 + else 1211 + printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); 1212 + } 1213 + } 1214 + printk("\n\n"); 1215 + if (read_trylock(&tasklist_lock)) { 1216 + do_each_thread (g, t) { 1217 + printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); 1218 + show_stack(t, NULL); 1219 + } while_each_thread (g, t); 1220 + read_unlock(&tasklist_lock); 1221 + } 1222 + printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); 1223 + atomic_dec(&monarchs); 1224 + set_curr_task(cpu, previous_current); 1225 + monarch_cpu = -1; 1226 + return; 1219 1227 } 1220 1228 1221 1229 static int __init ··· 1345 1193 }; 1346 1194 #endif /* CONFIG_ACPI */ 1347 1195 1196 + /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on 1197 + * these stacks can never sleep, they cannot return from the kernel to user 1198 + * space, they do not appear in a normal ps listing. So there is no need to 1199 + * format most of the fields. 1200 + */ 1201 + 1202 + static void 1203 + format_mca_init_stack(void *mca_data, unsigned long offset, 1204 + const char *type, int cpu) 1205 + { 1206 + struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); 1207 + struct thread_info *ti; 1208 + memset(p, 0, KERNEL_STACK_SIZE); 1209 + ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE); 1210 + ti->flags = _TIF_MCA_INIT; 1211 + ti->preempt_count = 1; 1212 + ti->task = p; 1213 + ti->cpu = cpu; 1214 + p->thread_info = ti; 1215 + p->state = TASK_UNINTERRUPTIBLE; 1216 + __set_bit(cpu, &p->cpus_allowed); 1217 + INIT_LIST_HEAD(&p->tasks); 1218 + p->parent = p->real_parent = p->group_leader = p; 1219 + INIT_LIST_HEAD(&p->children); 1220 + INIT_LIST_HEAD(&p->sibling); 1221 + strncpy(p->comm, type, sizeof(p->comm)-1); 1222 + } 1223 + 1348 1224 /* Do per-CPU MCA-related initialization. */ 1349 1225 1350 1226 void __devinit ··· 1385 1205 int cpu; 1386 1206 1387 1207 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1388 - * NR_CPUS); 1208 + * NR_CPUS + KERNEL_STACK_SIZE); 1209 + mca_data = (void *)(((unsigned long)mca_data + 1210 + KERNEL_STACK_SIZE - 1) & 1211 + (-KERNEL_STACK_SIZE)); 1389 1212 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1213 + format_mca_init_stack(mca_data, 1214 + offsetof(struct ia64_mca_cpu, mca_stack), 1215 + "MCA", cpu); 1216 + format_mca_init_stack(mca_data, 1217 + offsetof(struct ia64_mca_cpu, init_stack), 1218 + "INIT", cpu); 1390 1219 __per_cpu_mca[cpu] = __pa(mca_data); 1391 1220 mca_data += sizeof(struct ia64_mca_cpu); 1392 1221 } 1393 1222 } 1394 1223 1395 - /* 1396 - * The MCA info structure was allocated earlier and its 1397 - * physical address saved in __per_cpu_mca[cpu]. Copy that 1398 - * address * to ia64_mca_data so we can access it as a per-CPU 1399 - * variable. 1400 - */ 1224 + /* 1225 + * The MCA info structure was allocated earlier and its 1226 + * physical address saved in __per_cpu_mca[cpu]. Copy that 1227 + * address * to ia64_mca_data so we can access it as a per-CPU 1228 + * variable. 1229 + */ 1401 1230 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1402 1231 1403 1232 /* ··· 1416 1227 __get_cpu_var(ia64_mca_per_cpu_pte) = 1417 1228 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1418 1229 1419 - /* 1420 - * Also, stash away a copy of the PAL address and the PTE 1421 - * needed to map it. 1422 - */ 1423 - pal_vaddr = efi_get_pal_addr(); 1230 + /* 1231 + * Also, stash away a copy of the PAL address and the PTE 1232 + * needed to map it. 1233 + */ 1234 + pal_vaddr = efi_get_pal_addr(); 1424 1235 if (!pal_vaddr) 1425 1236 return; 1426 1237 __get_cpu_var(ia64_mca_pal_base) = ··· 1452 1263 void __init 1453 1264 ia64_mca_init(void) 1454 1265 { 1455 - ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; 1456 - ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; 1266 + ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; 1267 + ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; 1457 1268 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; 1458 1269 int i; 1459 1270 s64 rc; ··· 1531 1342 * XXX - disable SAL checksum by setting size to 0, should be 1532 1343 * size of the actual init handler in mca_asm.S. 1533 1344 */ 1534 - ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); 1345 + ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); 1535 1346 ia64_mc_info.imi_monarch_init_handler_size = 0; 1536 - ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); 1347 + ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); 1537 1348 ia64_mc_info.imi_slave_init_handler_size = 0; 1538 1349 1539 1350 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
+759 -681
arch/ia64/kernel/mca_asm.S
··· 16 16 // 04/11/12 Russ Anderson <rja@sgi.com> 17 17 // Added per cpu MCA/INIT stack save areas. 18 18 // 19 + // 12/08/05 Keith Owens <kaos@sgi.com> 20 + // Use per cpu MCA/INIT stacks for all data. 21 + // 19 22 #include <linux/config.h> 20 23 #include <linux/threads.h> 21 24 ··· 28 25 #include <asm/mca_asm.h> 29 26 #include <asm/mca.h> 30 27 31 - /* 32 - * When we get a machine check, the kernel stack pointer is no longer 33 - * valid, so we need to set a new stack pointer. 34 - */ 35 - #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ 36 - 37 - /* 38 - * Needed for return context to SAL 39 - */ 40 - #define IA64_MCA_SAME_CONTEXT 0 41 - #define IA64_MCA_COLD_BOOT -2 42 - 43 - #include "minstate.h" 44 - 45 - /* 46 - * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) 47 - * 1. GR1 = OS GP 48 - * 2. GR8 = PAL_PROC physical address 49 - * 3. GR9 = SAL_PROC physical address 50 - * 4. GR10 = SAL GP (physical) 51 - * 5. GR11 = Rendez state 52 - * 6. GR12 = Return address to location within SAL_CHECK 53 - */ 54 - #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ 55 - LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ 56 - st8 [_tmp]=r1,0x08;; \ 57 - st8 [_tmp]=r8,0x08;; \ 58 - st8 [_tmp]=r9,0x08;; \ 59 - st8 [_tmp]=r10,0x08;; \ 60 - st8 [_tmp]=r11,0x08;; \ 61 - st8 [_tmp]=r12,0x08;; \ 62 - st8 [_tmp]=r17,0x08;; \ 63 - st8 [_tmp]=r18,0x08 64 - 65 - /* 66 - * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) 67 - * (p6) is executed if we never entered virtual mode (TLB error) 68 - * (p7) is executed if we entered virtual mode as expected (normal case) 69 - * 1. GR8 = OS_MCA return status 70 - * 2. GR9 = SAL GP (physical) 71 - * 3. GR10 = 0/1 returning same/new context 72 - * 4. GR22 = New min state save area pointer 73 - * returns ptr to SAL rtn save loc in _tmp 74 - */ 75 - #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ 76 - movl _tmp=ia64_os_to_sal_handoff_state;; \ 77 - DATA_VA_TO_PA(_tmp);; \ 78 - ld8 r8=[_tmp],0x08;; \ 79 - ld8 r9=[_tmp],0x08;; \ 80 - ld8 r10=[_tmp],0x08;; \ 81 - ld8 r22=[_tmp],0x08;; 82 - // now _tmp is pointing to SAL rtn save location 83 - 84 - /* 85 - * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state 86 - * imots_os_status=IA64_MCA_COLD_BOOT 87 - * imots_sal_gp=SAL GP 88 - * imots_context=IA64_MCA_SAME_CONTEXT 89 - * imots_new_min_state=Min state save area pointer 90 - * imots_sal_check_ra=Return address to location within SAL_CHECK 91 - * 92 - */ 93 - #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ 94 - movl tmp=IA64_MCA_COLD_BOOT; \ 95 - movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ 96 - movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ 97 - st8 [os_to_sal_handoff]=tmp,8;; \ 98 - ld8 tmp=[sal_to_os_handoff],48;; \ 99 - st8 [os_to_sal_handoff]=tmp,8;; \ 100 - movl tmp=IA64_MCA_SAME_CONTEXT;; \ 101 - st8 [os_to_sal_handoff]=tmp,8;; \ 102 - ld8 tmp=[sal_to_os_handoff],-8;; \ 103 - st8 [os_to_sal_handoff]=tmp,8;; \ 104 - ld8 tmp=[sal_to_os_handoff];; \ 105 - st8 [os_to_sal_handoff]=tmp;; 28 + #include "entry.h" 106 29 107 30 #define GET_IA64_MCA_DATA(reg) \ 108 31 GET_THIS_PADDR(reg, ia64_mca_data) \ 109 32 ;; \ 110 33 ld8 reg=[reg] 111 34 112 - .global ia64_os_mca_dispatch 113 - .global ia64_os_mca_dispatch_end 114 - .global ia64_sal_to_os_handoff_state 115 - .global ia64_os_to_sal_handoff_state 116 35 .global ia64_do_tlb_purge 36 + .global ia64_os_mca_dispatch 37 + .global ia64_os_init_dispatch_monarch 38 + .global ia64_os_init_dispatch_slave 117 39 118 40 .text 119 41 .align 16 42 + 43 + //StartMain//////////////////////////////////////////////////////////////////// 120 44 121 45 /* 122 46 * Just the TLB purge part is moved to a separate function ··· 137 207 br.sptk.many b1 138 208 ;; 139 209 140 - ia64_os_mca_dispatch: 210 + //EndMain////////////////////////////////////////////////////////////////////// 141 211 212 + //StartMain//////////////////////////////////////////////////////////////////// 213 + 214 + ia64_os_mca_dispatch: 142 215 // Serialize all MCA processing 143 216 mov r3=1;; 144 217 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; 145 218 ia64_os_mca_spin: 146 - xchg8 r4=[r2],r3;; 219 + xchg4 r4=[r2],r3;; 147 220 cmp.ne p6,p0=r4,r0 148 221 (p6) br ia64_os_mca_spin 149 222 150 - // Save the SAL to OS MCA handoff state as defined 151 - // by SAL SPEC 3.0 152 - // NOTE : The order in which the state gets saved 153 - // is dependent on the way the C-structure 154 - // for ia64_mca_sal_to_os_state_t has been 155 - // defined in include/asm/mca.h 156 - SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) 223 + mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 224 + LOAD_PHYSICAL(p0,r2,1f) // return address 225 + mov r19=1 // All MCA events are treated as monarch (for now) 226 + br.sptk ia64_state_save // save the state that is not in minstate 227 + 1: 228 + 229 + GET_IA64_MCA_DATA(r2) 230 + // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param 157 231 ;; 158 - 159 - // LOG PROCESSOR STATE INFO FROM HERE ON.. 160 - begin_os_mca_dump: 161 - br ia64_os_mca_proc_state_dump;; 162 - 163 - ia64_os_mca_done_dump: 164 - 165 - LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) 232 + add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2 166 233 ;; 167 - ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. 234 + ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK. 168 235 ;; 169 236 tbit.nz p6,p7=r18,60 170 237 (p7) br.spnt done_tlb_purge_and_reload ··· 250 323 itr.d dtr[r20]=r16 251 324 ;; 252 325 srlz.d 253 - ;; 254 - br.sptk.many done_tlb_purge_and_reload 255 - err: 256 - COLD_BOOT_HANDOFF_STATE(r20,r21,r22) 257 - br.sptk.many ia64_os_mca_done_restore 258 326 259 327 done_tlb_purge_and_reload: 260 328 261 - // Setup new stack frame for OS_MCA handling 262 - GET_IA64_MCA_DATA(r2) 263 - ;; 264 - add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 265 - add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 266 - ;; 267 - rse_switch_context(r6,r3,r2);; // RSC management in this new context 329 + // switch to per cpu MCA stack 330 + mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 331 + LOAD_PHYSICAL(p0,r2,1f) // return address 332 + br.sptk ia64_new_stack 333 + 1: 268 334 335 + // everything saved, now we can set the kernel registers 336 + mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 337 + LOAD_PHYSICAL(p0,r2,1f) // return address 338 + br.sptk ia64_set_kernel_registers 339 + 1: 340 + 341 + // This must be done in physical mode 269 342 GET_IA64_MCA_DATA(r2) 270 343 ;; 271 - add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 272 - ;; 273 - mov r12=r2 // establish new stack-pointer 344 + mov r7=r2 274 345 275 346 // Enter virtual mode from physical mode 276 347 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) 277 - ia64_os_mca_virtual_begin: 348 + 349 + // This code returns to SAL via SOS r2, in general SAL has no unwind 350 + // data. To get a clean termination when backtracing the C MCA/INIT 351 + // handler, set a dummy return address of 0 in this routine. That 352 + // requires that ia64_os_mca_virtual_begin be a global function. 353 + ENTRY(ia64_os_mca_virtual_begin) 354 + .prologue 355 + .save rp,r0 356 + .body 357 + 358 + mov ar.rsc=3 // set eager mode for C handler 359 + mov r2=r7 // see GET_IA64_MCA_DATA above 360 + ;; 278 361 279 362 // Call virtual mode handler 280 - movl r2=ia64_mca_ucmc_handler;; 281 - mov b6=r2;; 282 - br.call.sptk.many b0=b6;; 283 - .ret0: 363 + alloc r14=ar.pfs,0,0,3,0 364 + ;; 365 + DATA_PA_TO_VA(r2,r7) 366 + ;; 367 + add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 368 + add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 369 + add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2 370 + br.call.sptk.many b0=ia64_mca_handler 371 + 284 372 // Revert back to physical mode before going back to SAL 285 373 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) 286 374 ia64_os_mca_virtual_end: 287 375 288 - // restore the original stack frame here 289 - GET_IA64_MCA_DATA(r2) 290 - ;; 291 - add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 292 - ;; 293 - movl r4=IA64_PSR_MC 294 - ;; 295 - rse_return_context(r4,r3,r2) // switch from interrupt context for RSE 376 + END(ia64_os_mca_virtual_begin) 296 377 297 - // let us restore all the registers from our PSI structure 298 - mov r8=gp 299 - ;; 300 - begin_os_mca_restore: 301 - br ia64_os_mca_proc_state_restore;; 378 + // switch back to previous stack 379 + alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame 380 + mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 381 + LOAD_PHYSICAL(p0,r2,1f) // return address 382 + br.sptk ia64_old_stack 383 + 1: 302 384 303 - ia64_os_mca_done_restore: 304 - OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; 305 - // branch back to SALE_CHECK 306 - ld8 r3=[r2];; 307 - mov b0=r3;; // SAL_CHECK return address 385 + mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack 386 + LOAD_PHYSICAL(p0,r2,1f) // return address 387 + br.sptk ia64_state_restore // restore the SAL state 388 + 1: 389 + 390 + mov b0=r12 // SAL_CHECK return address 308 391 309 392 // release lock 310 - movl r3=ia64_mca_serialize;; 311 - DATA_VA_TO_PA(r3);; 312 - st8.rel [r3]=r0 393 + LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);; 394 + st4.rel [r3]=r0 313 395 314 396 br b0 315 - ;; 316 - ia64_os_mca_dispatch_end: 397 + 317 398 //EndMain////////////////////////////////////////////////////////////////////// 318 399 400 + //StartMain//////////////////////////////////////////////////////////////////// 319 401 320 - //++ 321 - // Name: 322 - // ia64_os_mca_proc_state_dump() 323 402 // 324 - // Stub Description: 403 + // SAL to OS entry point for INIT on all processors. This has been defined for 404 + // registration purposes with SAL as a part of ia64_mca_init. Monarch and 405 + // slave INIT have identical processing, except for the value of the 406 + // sos->monarch flag in r19. 325 407 // 326 - // This stub dumps the processor state during MCHK to a data area 327 - // 328 - //-- 329 408 330 - ia64_os_mca_proc_state_dump: 331 - // Save bank 1 GRs 16-31 which will be used by c-language code when we switch 332 - // to virtual addressing mode. 409 + ia64_os_init_dispatch_monarch: 410 + mov r19=1 // Bow, bow, ye lower middle classes! 411 + br.sptk ia64_os_init_dispatch 412 + 413 + ia64_os_init_dispatch_slave: 414 + mov r19=0 // <igor>yeth, mathter</igor> 415 + 416 + ia64_os_init_dispatch: 417 + 418 + mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack 419 + LOAD_PHYSICAL(p0,r2,1f) // return address 420 + br.sptk ia64_state_save // save the state that is not in minstate 421 + 1: 422 + 423 + // switch to per cpu INIT stack 424 + mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack 425 + LOAD_PHYSICAL(p0,r2,1f) // return address 426 + br.sptk ia64_new_stack 427 + 1: 428 + 429 + // everything saved, now we can set the kernel registers 430 + mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack 431 + LOAD_PHYSICAL(p0,r2,1f) // return address 432 + br.sptk ia64_set_kernel_registers 433 + 1: 434 + 435 + // This must be done in physical mode 333 436 GET_IA64_MCA_DATA(r2) 334 437 ;; 335 - add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 336 - ;; 337 - // save ar.NaT 338 - mov r5=ar.unat // ar.unat 339 - 340 - // save banked GRs 16-31 along with NaT bits 341 - bsw.1;; 342 - st8.spill [r2]=r16,8;; 343 - st8.spill [r2]=r17,8;; 344 - st8.spill [r2]=r18,8;; 345 - st8.spill [r2]=r19,8;; 346 - st8.spill [r2]=r20,8;; 347 - st8.spill [r2]=r21,8;; 348 - st8.spill [r2]=r22,8;; 349 - st8.spill [r2]=r23,8;; 350 - st8.spill [r2]=r24,8;; 351 - st8.spill [r2]=r25,8;; 352 - st8.spill [r2]=r26,8;; 353 - st8.spill [r2]=r27,8;; 354 - st8.spill [r2]=r28,8;; 355 - st8.spill [r2]=r29,8;; 356 - st8.spill [r2]=r30,8;; 357 - st8.spill [r2]=r31,8;; 358 - 359 - mov r4=ar.unat;; 360 - st8 [r2]=r4,8 // save User NaT bits for r16-r31 361 - mov ar.unat=r5 // restore original unat 362 - bsw.0;; 363 - 364 - //save BRs 365 - add r4=8,r2 // duplicate r2 in r4 366 - add r6=2*8,r2 // duplicate r2 in r4 367 - 368 - mov r3=b0 369 - mov r5=b1 370 - mov r7=b2;; 371 - st8 [r2]=r3,3*8 372 - st8 [r4]=r5,3*8 373 - st8 [r6]=r7,3*8;; 374 - 375 - mov r3=b3 376 - mov r5=b4 377 - mov r7=b5;; 378 - st8 [r2]=r3,3*8 379 - st8 [r4]=r5,3*8 380 - st8 [r6]=r7,3*8;; 381 - 382 - mov r3=b6 383 - mov r5=b7;; 384 - st8 [r2]=r3,2*8 385 - st8 [r4]=r5,2*8;; 386 - 387 - cSaveCRs: 388 - // save CRs 389 - add r4=8,r2 // duplicate r2 in r4 390 - add r6=2*8,r2 // duplicate r2 in r4 391 - 392 - mov r3=cr.dcr 393 - mov r5=cr.itm 394 - mov r7=cr.iva;; 395 - 396 - st8 [r2]=r3,8*8 397 - st8 [r4]=r5,3*8 398 - st8 [r6]=r7,3*8;; // 48 byte rements 399 - 400 - mov r3=cr.pta;; 401 - st8 [r2]=r3,8*8;; // 64 byte rements 402 - 403 - // if PSR.ic=0, reading interruption registers causes an illegal operation fault 404 - mov r3=psr;; 405 - tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test 406 - (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. 407 - begin_skip_intr_regs: 408 - (p6) br SkipIntrRegs;; 409 - 410 - add r4=8,r2 // duplicate r2 in r4 411 - add r6=2*8,r2 // duplicate r2 in r6 412 - 413 - mov r3=cr.ipsr 414 - mov r5=cr.isr 415 - mov r7=r0;; 416 - st8 [r2]=r3,3*8 417 - st8 [r4]=r5,3*8 418 - st8 [r6]=r7,3*8;; 419 - 420 - mov r3=cr.iip 421 - mov r5=cr.ifa 422 - mov r7=cr.itir;; 423 - st8 [r2]=r3,3*8 424 - st8 [r4]=r5,3*8 425 - st8 [r6]=r7,3*8;; 426 - 427 - mov r3=cr.iipa 428 - mov r5=cr.ifs 429 - mov r7=cr.iim;; 430 - st8 [r2]=r3,3*8 431 - st8 [r4]=r5,3*8 432 - st8 [r6]=r7,3*8;; 433 - 434 - mov r3=cr25;; // cr.iha 435 - st8 [r2]=r3,160;; // 160 byte rement 436 - 437 - SkipIntrRegs: 438 - st8 [r2]=r0,152;; // another 152 byte . 439 - 440 - add r4=8,r2 // duplicate r2 in r4 441 - add r6=2*8,r2 // duplicate r2 in r6 442 - 443 - mov r3=cr.lid 444 - // mov r5=cr.ivr // cr.ivr, don't read it 445 - mov r7=cr.tpr;; 446 - st8 [r2]=r3,3*8 447 - st8 [r4]=r5,3*8 448 - st8 [r6]=r7,3*8;; 449 - 450 - mov r3=r0 // cr.eoi => cr67 451 - mov r5=r0 // cr.irr0 => cr68 452 - mov r7=r0;; // cr.irr1 => cr69 453 - st8 [r2]=r3,3*8 454 - st8 [r4]=r5,3*8 455 - st8 [r6]=r7,3*8;; 456 - 457 - mov r3=r0 // cr.irr2 => cr70 458 - mov r5=r0 // cr.irr3 => cr71 459 - mov r7=cr.itv;; 460 - st8 [r2]=r3,3*8 461 - st8 [r4]=r5,3*8 462 - st8 [r6]=r7,3*8;; 463 - 464 - mov r3=cr.pmv 465 - mov r5=cr.cmcv;; 466 - st8 [r2]=r3,7*8 467 - st8 [r4]=r5,7*8;; 468 - 469 - mov r3=r0 // cr.lrr0 => cr80 470 - mov r5=r0;; // cr.lrr1 => cr81 471 - st8 [r2]=r3,23*8 472 - st8 [r4]=r5,23*8;; 473 - 474 - adds r2=25*8,r2;; 475 - 476 - cSaveARs: 477 - // save ARs 478 - add r4=8,r2 // duplicate r2 in r4 479 - add r6=2*8,r2 // duplicate r2 in r6 480 - 481 - mov r3=ar.k0 482 - mov r5=ar.k1 483 - mov r7=ar.k2;; 484 - st8 [r2]=r3,3*8 485 - st8 [r4]=r5,3*8 486 - st8 [r6]=r7,3*8;; 487 - 488 - mov r3=ar.k3 489 - mov r5=ar.k4 490 - mov r7=ar.k5;; 491 - st8 [r2]=r3,3*8 492 - st8 [r4]=r5,3*8 493 - st8 [r6]=r7,3*8;; 494 - 495 - mov r3=ar.k6 496 - mov r5=ar.k7 497 - mov r7=r0;; // ar.kr8 498 - st8 [r2]=r3,10*8 499 - st8 [r4]=r5,10*8 500 - st8 [r6]=r7,10*8;; // rement by 72 bytes 501 - 502 - mov r3=ar.rsc 503 - mov ar.rsc=r0 // put RSE in enforced lazy mode 504 - mov r5=ar.bsp 505 - ;; 506 - mov r7=ar.bspstore;; 507 - st8 [r2]=r3,3*8 508 - st8 [r4]=r5,3*8 509 - st8 [r6]=r7,3*8;; 510 - 511 - mov r3=ar.rnat;; 512 - st8 [r2]=r3,8*13 // increment by 13x8 bytes 513 - 514 - mov r3=ar.ccv;; 515 - st8 [r2]=r3,8*4 516 - 517 - mov r3=ar.unat;; 518 - st8 [r2]=r3,8*4 519 - 520 - mov r3=ar.fpsr;; 521 - st8 [r2]=r3,8*4 522 - 523 - mov r3=ar.itc;; 524 - st8 [r2]=r3,160 // 160 525 - 526 - mov r3=ar.pfs;; 527 - st8 [r2]=r3,8 528 - 529 - mov r3=ar.lc;; 530 - st8 [r2]=r3,8 531 - 532 - mov r3=ar.ec;; 533 - st8 [r2]=r3 534 - add r2=8*62,r2 //padding 535 - 536 - // save RRs 537 - mov ar.lc=0x08-1 538 - movl r4=0x00;; 539 - 540 - cStRR: 541 - dep.z r5=r4,61,3;; 542 - mov r3=rr[r5];; 543 - st8 [r2]=r3,8 544 - add r4=1,r4 545 - br.cloop.sptk.few cStRR 546 - ;; 547 - end_os_mca_dump: 548 - br ia64_os_mca_done_dump;; 549 - 550 - //EndStub////////////////////////////////////////////////////////////////////// 551 - 552 - 553 - //++ 554 - // Name: 555 - // ia64_os_mca_proc_state_restore() 556 - // 557 - // Stub Description: 558 - // 559 - // This is a stub to restore the saved processor state during MCHK 560 - // 561 - //-- 562 - 563 - ia64_os_mca_proc_state_restore: 564 - 565 - // Restore bank1 GR16-31 566 - GET_IA64_MCA_DATA(r2) 567 - ;; 568 - add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 569 - 570 - restore_GRs: // restore bank-1 GRs 16-31 571 - bsw.1;; 572 - add r3=16*8,r2;; // to get to NaT of GR 16-31 573 - ld8 r3=[r3];; 574 - mov ar.unat=r3;; // first restore NaT 575 - 576 - ld8.fill r16=[r2],8;; 577 - ld8.fill r17=[r2],8;; 578 - ld8.fill r18=[r2],8;; 579 - ld8.fill r19=[r2],8;; 580 - ld8.fill r20=[r2],8;; 581 - ld8.fill r21=[r2],8;; 582 - ld8.fill r22=[r2],8;; 583 - ld8.fill r23=[r2],8;; 584 - ld8.fill r24=[r2],8;; 585 - ld8.fill r25=[r2],8;; 586 - ld8.fill r26=[r2],8;; 587 - ld8.fill r27=[r2],8;; 588 - ld8.fill r28=[r2],8;; 589 - ld8.fill r29=[r2],8;; 590 - ld8.fill r30=[r2],8;; 591 - ld8.fill r31=[r2],8;; 592 - 593 - ld8 r3=[r2],8;; // increment to skip NaT 594 - bsw.0;; 595 - 596 - restore_BRs: 597 - add r4=8,r2 // duplicate r2 in r4 598 - add r6=2*8,r2;; // duplicate r2 in r4 599 - 600 - ld8 r3=[r2],3*8 601 - ld8 r5=[r4],3*8 602 - ld8 r7=[r6],3*8;; 603 - mov b0=r3 604 - mov b1=r5 605 - mov b2=r7;; 606 - 607 - ld8 r3=[r2],3*8 608 - ld8 r5=[r4],3*8 609 - ld8 r7=[r6],3*8;; 610 - mov b3=r3 611 - mov b4=r5 612 - mov b5=r7;; 613 - 614 - ld8 r3=[r2],2*8 615 - ld8 r5=[r4],2*8;; 616 - mov b6=r3 617 - mov b7=r5;; 618 - 619 - restore_CRs: 620 - add r4=8,r2 // duplicate r2 in r4 621 - add r6=2*8,r2;; // duplicate r2 in r4 622 - 623 - ld8 r3=[r2],8*8 624 - ld8 r5=[r4],3*8 625 - ld8 r7=[r6],3*8;; // 48 byte increments 626 - mov cr.dcr=r3 627 - mov cr.itm=r5 628 - mov cr.iva=r7;; 629 - 630 - ld8 r3=[r2],8*8;; // 64 byte increments 631 - // mov cr.pta=r3 632 - 633 - 634 - // if PSR.ic=1, reading interruption registers causes an illegal operation fault 635 - mov r3=psr;; 636 - tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test 637 - (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. 638 - 639 - begin_rskip_intr_regs: 640 - (p6) br rSkipIntrRegs;; 641 - 642 - add r4=8,r2 // duplicate r2 in r4 643 - add r6=2*8,r2;; // duplicate r2 in r4 644 - 645 - ld8 r3=[r2],3*8 646 - ld8 r5=[r4],3*8 647 - ld8 r7=[r6],3*8;; 648 - mov cr.ipsr=r3 649 - // mov cr.isr=r5 // cr.isr is read only 650 - 651 - ld8 r3=[r2],3*8 652 - ld8 r5=[r4],3*8 653 - ld8 r7=[r6],3*8;; 654 - mov cr.iip=r3 655 - mov cr.ifa=r5 656 - mov cr.itir=r7;; 657 - 658 - ld8 r3=[r2],3*8 659 - ld8 r5=[r4],3*8 660 - ld8 r7=[r6],3*8;; 661 - mov cr.iipa=r3 662 - mov cr.ifs=r5 663 - mov cr.iim=r7 664 - 665 - ld8 r3=[r2],160;; // 160 byte increment 666 - mov cr.iha=r3 667 - 668 - rSkipIntrRegs: 669 - ld8 r3=[r2],152;; // another 152 byte inc. 670 - 671 - add r4=8,r2 // duplicate r2 in r4 672 - add r6=2*8,r2;; // duplicate r2 in r6 673 - 674 - ld8 r3=[r2],8*3 675 - ld8 r5=[r4],8*3 676 - ld8 r7=[r6],8*3;; 677 - mov cr.lid=r3 678 - // mov cr.ivr=r5 // cr.ivr is read only 679 - mov cr.tpr=r7;; 680 - 681 - ld8 r3=[r2],8*3 682 - ld8 r5=[r4],8*3 683 - ld8 r7=[r6],8*3;; 684 - // mov cr.eoi=r3 685 - // mov cr.irr0=r5 // cr.irr0 is read only 686 - // mov cr.irr1=r7;; // cr.irr1 is read only 687 - 688 - ld8 r3=[r2],8*3 689 - ld8 r5=[r4],8*3 690 - ld8 r7=[r6],8*3;; 691 - // mov cr.irr2=r3 // cr.irr2 is read only 692 - // mov cr.irr3=r5 // cr.irr3 is read only 693 - mov cr.itv=r7;; 694 - 695 - ld8 r3=[r2],8*7 696 - ld8 r5=[r4],8*7;; 697 - mov cr.pmv=r3 698 - mov cr.cmcv=r5;; 699 - 700 - ld8 r3=[r2],8*23 701 - ld8 r5=[r4],8*23;; 702 - adds r2=8*23,r2 703 - adds r4=8*23,r4;; 704 - // mov cr.lrr0=r3 705 - // mov cr.lrr1=r5 706 - 707 - adds r2=8*2,r2;; 708 - 709 - restore_ARs: 710 - add r4=8,r2 // duplicate r2 in r4 711 - add r6=2*8,r2;; // duplicate r2 in r4 712 - 713 - ld8 r3=[r2],3*8 714 - ld8 r5=[r4],3*8 715 - ld8 r7=[r6],3*8;; 716 - mov ar.k0=r3 717 - mov ar.k1=r5 718 - mov ar.k2=r7;; 719 - 720 - ld8 r3=[r2],3*8 721 - ld8 r5=[r4],3*8 722 - ld8 r7=[r6],3*8;; 723 - mov ar.k3=r3 724 - mov ar.k4=r5 725 - mov ar.k5=r7;; 726 - 727 - ld8 r3=[r2],10*8 728 - ld8 r5=[r4],10*8 729 - ld8 r7=[r6],10*8;; 730 - mov ar.k6=r3 731 - mov ar.k7=r5 732 - ;; 733 - 734 - ld8 r3=[r2],3*8 735 - ld8 r5=[r4],3*8 736 - ld8 r7=[r6],3*8;; 737 - // mov ar.rsc=r3 738 - // mov ar.bsp=r5 // ar.bsp is read only 739 - mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode 740 - ;; 741 - mov ar.bspstore=r7;; 742 - 743 - ld8 r9=[r2],8*13;; 744 - mov ar.rnat=r9 745 - 746 - mov ar.rsc=r3 747 - ld8 r3=[r2],8*4;; 748 - mov ar.ccv=r3 749 - 750 - ld8 r3=[r2],8*4;; 751 - mov ar.unat=r3 752 - 753 - ld8 r3=[r2],8*4;; 754 - mov ar.fpsr=r3 755 - 756 - ld8 r3=[r2],160;; // 160 757 - // mov ar.itc=r3 758 - 759 - ld8 r3=[r2],8;; 760 - mov ar.pfs=r3 761 - 762 - ld8 r3=[r2],8;; 763 - mov ar.lc=r3 764 - 765 - ld8 r3=[r2];; 766 - mov ar.ec=r3 767 - add r2=8*62,r2;; // padding 768 - 769 - restore_RRs: 770 - mov r5=ar.lc 771 - mov ar.lc=0x08-1 772 - movl r4=0x00;; 773 - cStRRr: 774 - dep.z r7=r4,61,3 775 - ld8 r3=[r2],8;; 776 - mov rr[r7]=r3 // what are its access previledges? 777 - add r4=1,r4 778 - br.cloop.sptk.few cStRRr 779 - ;; 780 - mov ar.lc=r5 781 - ;; 782 - end_os_mca_restore: 783 - br ia64_os_mca_done_restore;; 784 - 785 - //EndStub////////////////////////////////////////////////////////////////////// 786 - 787 - 788 - // ok, the issue here is that we need to save state information so 789 - // it can be useable by the kernel debugger and show regs routines. 790 - // In order to do this, our best bet is save the current state (plus 791 - // the state information obtain from the MIN_STATE_AREA) into a pt_regs 792 - // format. This way we can pass it on in a useable format. 793 - // 794 - 795 - // 796 - // SAL to OS entry point for INIT on the monarch processor 797 - // This has been defined for registration purposes with SAL 798 - // as a part of ia64_mca_init. 799 - // 800 - // When we get here, the following registers have been 801 - // set by the SAL for our use 802 - // 803 - // 1. GR1 = OS INIT GP 804 - // 2. GR8 = PAL_PROC physical address 805 - // 3. GR9 = SAL_PROC physical address 806 - // 4. GR10 = SAL GP (physical) 807 - // 5. GR11 = Init Reason 808 - // 0 = Received INIT for event other than crash dump switch 809 - // 1 = Received wakeup at the end of an OS_MCA corrected machine check 810 - // 2 = Received INIT dude to CrashDump switch assertion 811 - // 812 - // 6. GR12 = Return address to location within SAL_INIT procedure 813 - 814 - 815 - GLOBAL_ENTRY(ia64_monarch_init_handler) 438 + mov r7=r2 439 + 440 + // Enter virtual mode from physical mode 441 + VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4) 442 + 443 + // This code returns to SAL via SOS r2, in general SAL has no unwind 444 + // data. To get a clean termination when backtracing the C MCA/INIT 445 + // handler, set a dummy return address of 0 in this routine. That 446 + // requires that ia64_os_init_virtual_begin be a global function. 447 + ENTRY(ia64_os_init_virtual_begin) 816 448 .prologue 817 - // stash the information the SAL passed to os 818 - SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) 819 - ;; 820 - SAVE_MIN_WITH_COVER 821 - ;; 822 - mov r8=cr.ifa 823 - mov r9=cr.isr 824 - adds r3=8,r2 // set up second base pointer 825 - ;; 826 - SAVE_REST 827 - 828 - // ok, enough should be saved at this point to be dangerous, and supply 829 - // information for a dump 830 - // We need to switch to Virtual mode before hitting the C functions. 831 - 832 - movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN 833 - mov r3=psr // get the current psr, minimum enabled at this point 834 - ;; 835 - or r2=r2,r3 836 - ;; 837 - movl r3=IVirtual_Switch 838 - ;; 839 - mov cr.iip=r3 // short return to set the appropriate bits 840 - mov cr.ipsr=r2 // need to do an rfi to set appropriate bits 841 - ;; 842 - rfi 843 - ;; 844 - IVirtual_Switch: 845 - // 846 - // We should now be running virtual 847 - // 848 - // Let's call the C handler to get the rest of the state info 849 - // 850 - alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 851 - ;; 852 - adds out0=16,sp // out0 = pointer to pt_regs 853 - ;; 854 - DO_SAVE_SWITCH_STACK 449 + .save rp,r0 855 450 .body 856 - adds out1=16,sp // out0 = pointer to switch_stack 857 451 858 - br.call.sptk.many rp=ia64_init_handler 859 - .ret1: 452 + mov ar.rsc=3 // set eager mode for C handler 453 + mov r2=r7 // see GET_IA64_MCA_DATA above 454 + ;; 860 455 861 - return_from_init: 862 - br.sptk return_from_init 863 - END(ia64_monarch_init_handler) 456 + // Call virtual mode handler 457 + alloc r14=ar.pfs,0,0,3,0 458 + ;; 459 + DATA_PA_TO_VA(r2,r7) 460 + ;; 461 + add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2 462 + add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2 463 + add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2 464 + br.call.sptk.many b0=ia64_init_handler 864 465 466 + // Revert back to physical mode before going back to SAL 467 + PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4) 468 + ia64_os_init_virtual_end: 469 + 470 + END(ia64_os_init_virtual_begin) 471 + 472 + mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack 473 + LOAD_PHYSICAL(p0,r2,1f) // return address 474 + br.sptk ia64_state_restore // restore the SAL state 475 + 1: 476 + 477 + // switch back to previous stack 478 + alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame 479 + mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack 480 + LOAD_PHYSICAL(p0,r2,1f) // return address 481 + br.sptk ia64_old_stack 482 + 1: 483 + 484 + mov b0=r12 // SAL_CHECK return address 485 + br b0 486 + 487 + //EndMain////////////////////////////////////////////////////////////////////// 488 + 489 + // common defines for the stubs 490 + #define ms r4 491 + #define regs r5 492 + #define temp1 r2 /* careful, it overlaps with input registers */ 493 + #define temp2 r3 /* careful, it overlaps with input registers */ 494 + #define temp3 r7 495 + #define temp4 r14 496 + 497 + 498 + //++ 499 + // Name: 500 + // ia64_state_save() 865 501 // 866 - // SAL to OS entry point for INIT on the slave processor 867 - // This has been defined for registration purposes with SAL 868 - // as a part of ia64_mca_init. 502 + // Stub Description: 869 503 // 504 + // Save the state that is not in minstate. This is sensitive to the layout of 505 + // struct ia64_sal_os_state in mca.h. 506 + // 507 + // r2 contains the return address, r3 contains either 508 + // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. 509 + // 510 + // The OS to SAL section of struct ia64_sal_os_state is set to a default 511 + // value of cold boot (MCA) or warm boot (INIT) and return to the same 512 + // context. ia64_sal_os_state is also used to hold some registers that 513 + // need to be saved and restored across the stack switches. 514 + // 515 + // Most input registers to this stub come from PAL/SAL 516 + // r1 os gp, physical 517 + // r8 pal_proc entry point 518 + // r9 sal_proc entry point 519 + // r10 sal gp 520 + // r11 MCA - rendevzous state, INIT - reason code 521 + // r12 sal return address 522 + // r17 pal min_state 523 + // r18 processor state parameter 524 + // r19 monarch flag, set by the caller of this routine 525 + // 526 + // In addition to the SAL to OS state, this routine saves all the 527 + // registers that appear in struct pt_regs and struct switch_stack, 528 + // excluding those that are already in the PAL minstate area. This 529 + // results in a partial pt_regs and switch_stack, the C code copies the 530 + // remaining registers from PAL minstate to pt_regs and switch_stack. The 531 + // resulting structures contain all the state of the original process when 532 + // MCA/INIT occurred. 533 + // 534 + //-- 870 535 871 - GLOBAL_ENTRY(ia64_slave_init_handler) 872 - 1: br.sptk 1b 873 - END(ia64_slave_init_handler) 536 + ia64_state_save: 537 + add regs=MCA_SOS_OFFSET, r3 538 + add ms=MCA_SOS_OFFSET+8, r3 539 + mov b0=r2 // save return address 540 + cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3 541 + ;; 542 + GET_IA64_MCA_DATA(temp2) 543 + ;; 544 + add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack 545 + add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack 546 + ;; 547 + mov regs=temp1 // save the start of sos 548 + st8 [temp1]=r1,16 // os_gp 549 + st8 [temp2]=r8,16 // pal_proc 550 + ;; 551 + st8 [temp1]=r9,16 // sal_proc 552 + st8 [temp2]=r11,16 // rv_rc 553 + mov r11=cr.iipa 554 + ;; 555 + st8 [temp1]=r18,16 // proc_state_param 556 + st8 [temp2]=r19,16 // monarch 557 + mov r6=IA64_KR(CURRENT) 558 + ;; 559 + st8 [temp1]=r12,16 // sal_ra 560 + st8 [temp2]=r10,16 // sal_gp 561 + mov r12=cr.isr 562 + ;; 563 + st8 [temp1]=r17,16 // pal_min_state 564 + st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT 565 + mov r6=cr.ifa 566 + ;; 567 + st8 [temp1]=r0,16 // prev_task, starts off as NULL 568 + st8 [temp2]=r12,16 // cr.isr 569 + mov r12=cr.itir 570 + ;; 571 + st8 [temp1]=r6,16 // cr.ifa 572 + st8 [temp2]=r12,16 // cr.itir 573 + mov r12=cr.iim 574 + ;; 575 + st8 [temp1]=r11,16 // cr.iipa 576 + st8 [temp2]=r12,16 // cr.iim 577 + mov r6=cr.iha 578 + (p1) mov r12=IA64_MCA_COLD_BOOT 579 + (p2) mov r12=IA64_INIT_WARM_BOOT 580 + ;; 581 + st8 [temp1]=r6,16 // cr.iha 582 + st8 [temp2]=r12 // os_status, default is cold boot 583 + mov r6=IA64_MCA_SAME_CONTEXT 584 + ;; 585 + st8 [temp1]=r6 // context, default is same context 586 + 587 + // Save the pt_regs data that is not in minstate. The previous code 588 + // left regs at sos. 589 + add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs 590 + ;; 591 + add temp1=PT(B6), regs 592 + mov temp3=b6 593 + mov temp4=b7 594 + add temp2=PT(B7), regs 595 + ;; 596 + st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6 597 + st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7 598 + mov temp3=ar.csd 599 + mov temp4=ar.ssd 600 + cover // must be last in group 601 + ;; 602 + st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd 603 + st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd 604 + mov temp3=ar.unat 605 + mov temp4=ar.pfs 606 + ;; 607 + st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat 608 + st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs 609 + mov temp3=ar.rnat 610 + mov temp4=ar.bspstore 611 + ;; 612 + st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat 613 + st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore 614 + mov temp3=ar.bsp 615 + ;; 616 + sub temp3=temp3, temp4 // ar.bsp - ar.bspstore 617 + mov temp4=ar.fpsr 618 + ;; 619 + shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs" 620 + ;; 621 + st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs 622 + st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr 623 + mov temp3=ar.ccv 624 + ;; 625 + st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv 626 + stf.spill [temp2]=f6,PT(F8)-PT(F6) 627 + ;; 628 + stf.spill [temp1]=f7,PT(F9)-PT(F7) 629 + stf.spill [temp2]=f8,PT(F10)-PT(F8) 630 + ;; 631 + stf.spill [temp1]=f9,PT(F11)-PT(F9) 632 + stf.spill [temp2]=f10 633 + ;; 634 + stf.spill [temp1]=f11 635 + 636 + // Save the switch_stack data that is not in minstate nor pt_regs. The 637 + // previous code left regs at pt_regs. 638 + add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs 639 + ;; 640 + add temp1=SW(F2), regs 641 + add temp2=SW(F3), regs 642 + ;; 643 + stf.spill [temp1]=f2,32 644 + stf.spill [temp2]=f3,32 645 + ;; 646 + stf.spill [temp1]=f4,32 647 + stf.spill [temp2]=f5,32 648 + ;; 649 + stf.spill [temp1]=f12,32 650 + stf.spill [temp2]=f13,32 651 + ;; 652 + stf.spill [temp1]=f14,32 653 + stf.spill [temp2]=f15,32 654 + ;; 655 + stf.spill [temp1]=f16,32 656 + stf.spill [temp2]=f17,32 657 + ;; 658 + stf.spill [temp1]=f18,32 659 + stf.spill [temp2]=f19,32 660 + ;; 661 + stf.spill [temp1]=f20,32 662 + stf.spill [temp2]=f21,32 663 + ;; 664 + stf.spill [temp1]=f22,32 665 + stf.spill [temp2]=f23,32 666 + ;; 667 + stf.spill [temp1]=f24,32 668 + stf.spill [temp2]=f25,32 669 + ;; 670 + stf.spill [temp1]=f26,32 671 + stf.spill [temp2]=f27,32 672 + ;; 673 + stf.spill [temp1]=f28,32 674 + stf.spill [temp2]=f29,32 675 + ;; 676 + stf.spill [temp1]=f30,SW(B2)-SW(F30) 677 + stf.spill [temp2]=f31,SW(B3)-SW(F31) 678 + mov temp3=b2 679 + mov temp4=b3 680 + ;; 681 + st8 [temp1]=temp3,16 // save b2 682 + st8 [temp2]=temp4,16 // save b3 683 + mov temp3=b4 684 + mov temp4=b5 685 + ;; 686 + st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4 687 + st8 [temp2]=temp4 // save b5 688 + mov temp3=ar.lc 689 + ;; 690 + st8 [temp1]=temp3 // save ar.lc 691 + 692 + // FIXME: Some proms are incorrectly accessing the minstate area as 693 + // cached data. The C code uses region 6, uncached virtual. Ensure 694 + // that there is no cache data lying around for the first 1K of the 695 + // minstate area. 696 + // Remove this code in September 2006, that gives platforms a year to 697 + // fix their proms and get their customers updated. 698 + 699 + add r1=32*1,r17 700 + add r2=32*2,r17 701 + add r3=32*3,r17 702 + add r4=32*4,r17 703 + add r5=32*5,r17 704 + add r6=32*6,r17 705 + add r7=32*7,r17 706 + ;; 707 + fc r17 708 + fc r1 709 + fc r2 710 + fc r3 711 + fc r4 712 + fc r5 713 + fc r6 714 + fc r7 715 + add r17=32*8,r17 716 + add r1=32*8,r1 717 + add r2=32*8,r2 718 + add r3=32*8,r3 719 + add r4=32*8,r4 720 + add r5=32*8,r5 721 + add r6=32*8,r6 722 + add r7=32*8,r7 723 + ;; 724 + fc r17 725 + fc r1 726 + fc r2 727 + fc r3 728 + fc r4 729 + fc r5 730 + fc r6 731 + fc r7 732 + add r17=32*8,r17 733 + add r1=32*8,r1 734 + add r2=32*8,r2 735 + add r3=32*8,r3 736 + add r4=32*8,r4 737 + add r5=32*8,r5 738 + add r6=32*8,r6 739 + add r7=32*8,r7 740 + ;; 741 + fc r17 742 + fc r1 743 + fc r2 744 + fc r3 745 + fc r4 746 + fc r5 747 + fc r6 748 + fc r7 749 + add r17=32*8,r17 750 + add r1=32*8,r1 751 + add r2=32*8,r2 752 + add r3=32*8,r3 753 + add r4=32*8,r4 754 + add r5=32*8,r5 755 + add r6=32*8,r6 756 + add r7=32*8,r7 757 + ;; 758 + fc r17 759 + fc r1 760 + fc r2 761 + fc r3 762 + fc r4 763 + fc r5 764 + fc r6 765 + fc r7 766 + 767 + br.sptk b0 768 + 769 + //EndStub////////////////////////////////////////////////////////////////////// 770 + 771 + 772 + //++ 773 + // Name: 774 + // ia64_state_restore() 775 + // 776 + // Stub Description: 777 + // 778 + // Restore the SAL/OS state. This is sensitive to the layout of struct 779 + // ia64_sal_os_state in mca.h. 780 + // 781 + // r2 contains the return address, r3 contains either 782 + // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. 783 + // 784 + // In addition to the SAL to OS state, this routine restores all the 785 + // registers that appear in struct pt_regs and struct switch_stack, 786 + // excluding those in the PAL minstate area. 787 + // 788 + //-- 789 + 790 + ia64_state_restore: 791 + // Restore the switch_stack data that is not in minstate nor pt_regs. 792 + add regs=MCA_SWITCH_STACK_OFFSET, r3 793 + mov b0=r2 // save return address 794 + ;; 795 + GET_IA64_MCA_DATA(temp2) 796 + ;; 797 + add regs=temp2, regs 798 + ;; 799 + add temp1=SW(F2), regs 800 + add temp2=SW(F3), regs 801 + ;; 802 + ldf.fill f2=[temp1],32 803 + ldf.fill f3=[temp2],32 804 + ;; 805 + ldf.fill f4=[temp1],32 806 + ldf.fill f5=[temp2],32 807 + ;; 808 + ldf.fill f12=[temp1],32 809 + ldf.fill f13=[temp2],32 810 + ;; 811 + ldf.fill f14=[temp1],32 812 + ldf.fill f15=[temp2],32 813 + ;; 814 + ldf.fill f16=[temp1],32 815 + ldf.fill f17=[temp2],32 816 + ;; 817 + ldf.fill f18=[temp1],32 818 + ldf.fill f19=[temp2],32 819 + ;; 820 + ldf.fill f20=[temp1],32 821 + ldf.fill f21=[temp2],32 822 + ;; 823 + ldf.fill f22=[temp1],32 824 + ldf.fill f23=[temp2],32 825 + ;; 826 + ldf.fill f24=[temp1],32 827 + ldf.fill f25=[temp2],32 828 + ;; 829 + ldf.fill f26=[temp1],32 830 + ldf.fill f27=[temp2],32 831 + ;; 832 + ldf.fill f28=[temp1],32 833 + ldf.fill f29=[temp2],32 834 + ;; 835 + ldf.fill f30=[temp1],SW(B2)-SW(F30) 836 + ldf.fill f31=[temp2],SW(B3)-SW(F31) 837 + ;; 838 + ld8 temp3=[temp1],16 // restore b2 839 + ld8 temp4=[temp2],16 // restore b3 840 + ;; 841 + mov b2=temp3 842 + mov b3=temp4 843 + ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4 844 + ld8 temp4=[temp2] // restore b5 845 + ;; 846 + mov b4=temp3 847 + mov b5=temp4 848 + ld8 temp3=[temp1] // restore ar.lc 849 + ;; 850 + mov ar.lc=temp3 851 + 852 + // Restore the pt_regs data that is not in minstate. The previous code 853 + // left regs at switch_stack. 854 + add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs 855 + ;; 856 + add temp1=PT(B6), regs 857 + add temp2=PT(B7), regs 858 + ;; 859 + ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6 860 + ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7 861 + ;; 862 + mov b6=temp3 863 + mov b7=temp4 864 + ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd 865 + ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd 866 + ;; 867 + mov ar.csd=temp3 868 + mov ar.ssd=temp4 869 + ld8 temp3=[temp1] // restore ar.unat 870 + add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1 871 + ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs 872 + ;; 873 + mov ar.unat=temp3 874 + mov ar.pfs=temp4 875 + // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack. 876 + ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv 877 + ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr 878 + ;; 879 + mov ar.ccv=temp3 880 + mov ar.fpsr=temp4 881 + ldf.fill f6=[temp1],PT(F8)-PT(F6) 882 + ldf.fill f7=[temp2],PT(F9)-PT(F7) 883 + ;; 884 + ldf.fill f8=[temp1],PT(F10)-PT(F8) 885 + ldf.fill f9=[temp2],PT(F11)-PT(F9) 886 + ;; 887 + ldf.fill f10=[temp1] 888 + ldf.fill f11=[temp2] 889 + 890 + // Restore the SAL to OS state. The previous code left regs at pt_regs. 891 + add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs 892 + ;; 893 + add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs 894 + add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs 895 + ;; 896 + ld8 r12=[temp1],16 // sal_ra 897 + ld8 r9=[temp2],16 // sal_gp 898 + ;; 899 + ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task 900 + ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT 901 + ;; 902 + ld8 temp3=[temp1],16 // cr.isr 903 + ld8 temp4=[temp2],16 // cr.ifa 904 + ;; 905 + mov cr.isr=temp3 906 + mov cr.ifa=temp4 907 + ld8 temp3=[temp1],16 // cr.itir 908 + ld8 temp4=[temp2],16 // cr.iipa 909 + ;; 910 + mov cr.itir=temp3 911 + mov cr.iipa=temp4 912 + ld8 temp3=[temp1],16 // cr.iim 913 + ld8 temp4=[temp2],16 // cr.iha 914 + ;; 915 + mov cr.iim=temp3 916 + mov cr.iha=temp4 917 + dep r22=0,r22,62,2 // pal_min_state, physical, uncached 918 + mov IA64_KR(CURRENT)=r21 919 + ld8 r8=[temp1] // os_status 920 + ld8 r10=[temp2] // context 921 + 922 + br.sptk b0 923 + 924 + //EndStub////////////////////////////////////////////////////////////////////// 925 + 926 + 927 + //++ 928 + // Name: 929 + // ia64_new_stack() 930 + // 931 + // Stub Description: 932 + // 933 + // Switch to the MCA/INIT stack. 934 + // 935 + // r2 contains the return address, r3 contains either 936 + // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. 937 + // 938 + // On entry RBS is still on the original stack, this routine switches RBS 939 + // to use the MCA/INIT stack. 940 + // 941 + // On entry, sos->pal_min_state is physical, on exit it is virtual. 942 + // 943 + //-- 944 + 945 + ia64_new_stack: 946 + add regs=MCA_PT_REGS_OFFSET, r3 947 + add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3 948 + mov b0=r2 // save return address 949 + GET_IA64_MCA_DATA(temp1) 950 + invala 951 + ;; 952 + add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack 953 + add regs=regs, temp1 // struct pt_regs on MCA or INIT stack 954 + ;; 955 + // Address of minstate area provided by PAL is physical, uncacheable. 956 + // Convert to Linux virtual address in region 6 for C code. 957 + ld8 ms=[temp2] // pal_min_state, physical 958 + ;; 959 + dep temp1=-1,ms,62,2 // set region 6 960 + mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET 961 + ;; 962 + st8 [temp2]=temp1 // pal_min_state, virtual 963 + 964 + add temp4=temp3, regs // start of bspstore on new stack 965 + ;; 966 + mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack 967 + ;; 968 + flushrs // must be first in group 969 + br.sptk b0 970 + 971 + //EndStub////////////////////////////////////////////////////////////////////// 972 + 973 + 974 + //++ 975 + // Name: 976 + // ia64_old_stack() 977 + // 978 + // Stub Description: 979 + // 980 + // Switch to the old stack. 981 + // 982 + // r2 contains the return address, r3 contains either 983 + // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. 984 + // 985 + // On entry, pal_min_state is virtual, on exit it is physical. 986 + // 987 + // On entry RBS is on the MCA/INIT stack, this routine switches RBS 988 + // back to the previous stack. 989 + // 990 + // The psr is set to all zeroes. SAL return requires either all zeroes or 991 + // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this 992 + // code does not perform correctly. 993 + // 994 + // The dirty registers at the time of the event were flushed to the 995 + // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers 996 + // before reverting to the previous bspstore. 997 + //-- 998 + 999 + ia64_old_stack: 1000 + add regs=MCA_PT_REGS_OFFSET, r3 1001 + mov b0=r2 // save return address 1002 + GET_IA64_MCA_DATA(temp2) 1003 + LOAD_PHYSICAL(p0,temp1,1f) 1004 + ;; 1005 + mov cr.ipsr=r0 1006 + mov cr.ifs=r0 1007 + mov cr.iip=temp1 1008 + ;; 1009 + invala 1010 + rfi 1011 + 1: 1012 + 1013 + add regs=regs, temp2 // struct pt_regs on MCA or INIT stack 1014 + ;; 1015 + add temp1=PT(LOADRS), regs 1016 + ;; 1017 + ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs 1018 + ;; 1019 + ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore 1020 + mov ar.rsc=temp2 1021 + ;; 1022 + loadrs 1023 + ld8 temp4=[temp1] // restore ar.rnat 1024 + ;; 1025 + mov ar.bspstore=temp3 // back to old stack 1026 + ;; 1027 + mov ar.rnat=temp4 1028 + ;; 1029 + 1030 + br.sptk b0 1031 + 1032 + //EndStub////////////////////////////////////////////////////////////////////// 1033 + 1034 + 1035 + //++ 1036 + // Name: 1037 + // ia64_set_kernel_registers() 1038 + // 1039 + // Stub Description: 1040 + // 1041 + // Set the registers that are required by the C code in order to run on an 1042 + // MCA/INIT stack. 1043 + // 1044 + // r2 contains the return address, r3 contains either 1045 + // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET. 1046 + // 1047 + //-- 1048 + 1049 + ia64_set_kernel_registers: 1050 + add temp3=MCA_SP_OFFSET, r3 1051 + add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3 1052 + mov b0=r2 // save return address 1053 + GET_IA64_MCA_DATA(temp1) 1054 + ;; 1055 + add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp 1056 + add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack 1057 + add r13=temp1, r3 // set current to start of MCA/INIT stack 1058 + ;; 1059 + ld8 r1=[temp4] // OS GP from SAL OS state 1060 + ;; 1061 + DATA_PA_TO_VA(r1,temp1) 1062 + DATA_PA_TO_VA(r12,temp2) 1063 + DATA_PA_TO_VA(r13,temp3) 1064 + ;; 1065 + mov IA64_KR(CURRENT)=r13 1066 + 1067 + // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? 1068 + 1069 + br.sptk b0 1070 + 1071 + //EndStub////////////////////////////////////////////////////////////////////// 1072 + 1073 + #undef ms 1074 + #undef regs 1075 + #undef temp1 1076 + #undef temp2 1077 + #undef temp3 1078 + #undef temp4 1079 + 1080 + 1081 + // Support function for mca.c, it is here to avoid using inline asm. Given the 1082 + // address of an rnat slot, if that address is below the current ar.bspstore 1083 + // then return the contents of that slot, otherwise return the contents of 1084 + // ar.rnat. 1085 + GLOBAL_ENTRY(ia64_get_rnat) 1086 + alloc r14=ar.pfs,1,0,0,0 1087 + mov ar.rsc=0 1088 + ;; 1089 + mov r14=ar.bspstore 1090 + ;; 1091 + cmp.lt p6,p7=in0,r14 1092 + ;; 1093 + (p6) ld8 r8=[in0] 1094 + (p7) mov r8=ar.rnat 1095 + mov ar.rsc=3 1096 + br.ret.sptk.many rp 1097 + END(ia64_get_rnat)
+17 -20
arch/ia64/kernel/mca_drv.c
··· 4 4 * 5 5 * Copyright (C) 2004 FUJITSU LIMITED 6 6 * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 7 + * Copyright (C) 2005 Silicon Graphics, Inc 8 + * Copyright (C) 2005 Keith Owens <kaos@sgi.com> 7 9 */ 8 10 #include <linux/config.h> 9 11 #include <linux/types.h> ··· 39 37 40 38 /* max size of SAL error record (default) */ 41 39 static int sal_rec_max = 10000; 42 - 43 - /* from mca.c */ 44 - static ia64_mca_sal_to_os_state_t *sal_to_os_handoff_state; 45 - static ia64_mca_os_to_sal_state_t *os_to_sal_handoff_state; 46 40 47 41 /* from mca_drv_asm.S */ 48 42 extern void *mca_handler_bhhook(void); ··· 314 316 */ 315 317 316 318 static mca_type_t 317 - is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci) 319 + is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, 320 + struct ia64_sal_os_state *sos) 318 321 { 319 322 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 320 323 ··· 326 327 * Therefore it is local MCA when rendezvous has not been requested. 327 328 * Failed to rendezvous, the system must be down. 328 329 */ 329 - switch (sal_to_os_handoff_state->imsto_rendez_state) { 330 + switch (sos->rv_rc) { 330 331 case -1: /* SAL rendezvous unsuccessful */ 331 332 return MCA_IS_GLOBAL; 332 333 case 0: /* SAL rendezvous not required */ ··· 387 388 */ 388 389 389 390 static int 390 - recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 391 + recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 392 + struct ia64_sal_os_state *sos) 391 393 { 392 394 sal_log_mod_error_info_t *smei; 393 395 pal_min_state_area_t *pmsa; ··· 426 426 * setup for resume to bottom half of MCA, 427 427 * "mca_handler_bhhook" 428 428 */ 429 - pmsa = (pal_min_state_area_t *)(sal_to_os_handoff_state->pal_min_state | (6ul<<61)); 429 + pmsa = sos->pal_min_state; 430 430 /* pass to bhhook as 1st argument (gr8) */ 431 431 pmsa->pmsa_gr[8-1] = smei->target_identifier; 432 432 /* set interrupted return address (but no use) */ ··· 459 459 */ 460 460 461 461 static int 462 - recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 462 + recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 463 + struct ia64_sal_os_state *sos) 463 464 { 464 465 int status = 0; 465 466 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); ··· 470 469 case 1: /* partial read */ 471 470 case 3: /* full line(cpu) read */ 472 471 case 9: /* I/O space read */ 473 - status = recover_from_read_error(slidx, peidx, pbci); 472 + status = recover_from_read_error(slidx, peidx, pbci, sos); 474 473 break; 475 474 case 0: /* unknown */ 476 475 case 2: /* partial write */ ··· 509 508 */ 510 509 511 510 static int 512 - recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci) 511 + recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, 512 + struct ia64_sal_os_state *sos) 513 513 { 514 514 pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); 515 515 ··· 547 545 * This means "there are some platform errors". 548 546 */ 549 547 if (platform) 550 - return recover_from_platform_error(slidx, peidx, pbci); 548 + return recover_from_platform_error(slidx, peidx, pbci, sos); 551 549 /* 552 550 * On account of strange SAL error record, we cannot recover. 553 551 */ ··· 564 562 565 563 static int 566 564 mca_try_to_recover(void *rec, 567 - ia64_mca_sal_to_os_state_t *sal_to_os_state, 568 - ia64_mca_os_to_sal_state_t *os_to_sal_state) 565 + struct ia64_sal_os_state *sos) 569 566 { 570 567 int platform_err; 571 568 int n_proc_err; 572 569 slidx_table_t slidx; 573 570 peidx_table_t peidx; 574 571 pal_bus_check_info_t pbci; 575 - 576 - /* handoff state from/to mca.c */ 577 - sal_to_os_handoff_state = sal_to_os_state; 578 - os_to_sal_handoff_state = os_to_sal_state; 579 572 580 573 /* Make index of SAL error record */ 581 574 platform_err = mca_make_slidx(rec, &slidx); ··· 594 597 *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); 595 598 596 599 /* Check whether MCA is global or not */ 597 - if (is_mca_global(&peidx, &pbci)) 600 + if (is_mca_global(&peidx, &pbci, sos)) 598 601 return 0; 599 602 600 603 /* Try to recover a processor error */ 601 - return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci); 604 + return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); 602 605 } 603 606 604 607 /*
+18 -70
arch/ia64/kernel/minstate.h
··· 5 5 #include "entry.h" 6 6 7 7 /* 8 - * For ivt.s we want to access the stack virtually so we don't have to disable translation 9 - * on interrupts. 10 - * 11 - * On entry: 12 - * r1: pointer to current task (ar.k6) 13 - */ 14 - #define MINSTATE_START_SAVE_MIN_VIRT \ 15 - (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 16 - ;; \ 17 - (pUStk) mov.m r24=ar.rnat; \ 18 - (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 19 - (pKStk) mov r1=sp; /* get sp */ \ 20 - ;; \ 21 - (pUStk) lfetch.fault.excl.nt1 [r22]; \ 22 - (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 23 - (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 24 - ;; \ 25 - (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 26 - (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ 27 - ;; \ 28 - (pUStk) mov r18=ar.bsp; \ 29 - (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ 30 - 31 - #define MINSTATE_END_SAVE_MIN_VIRT \ 32 - bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 33 - ;; 34 - 35 - /* 36 - * For mca_asm.S we want to access the stack physically since the state is saved before we 37 - * go virtual and don't want to destroy the iip or ipsr. 38 - */ 39 - #define MINSTATE_START_SAVE_MIN_PHYS \ 40 - (pKStk) mov r3=IA64_KR(PER_CPU_DATA);; \ 41 - (pKStk) addl r3=THIS_CPU(ia64_mca_data),r3;; \ 42 - (pKStk) ld8 r3 = [r3];; \ 43 - (pKStk) addl r3=IA64_MCA_CPU_INIT_STACK_OFFSET,r3;; \ 44 - (pKStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r3; \ 45 - (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 46 - (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \ 47 - ;; \ 48 - (pUStk) mov r24=ar.rnat; \ 49 - (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 50 - (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 51 - (pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \ 52 - ;; \ 53 - (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 54 - ;; \ 55 - (pUStk) mov r18=ar.bsp; \ 56 - (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ 57 - 58 - #define MINSTATE_END_SAVE_MIN_PHYS \ 59 - dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \ 60 - ;; 61 - 62 - #ifdef MINSTATE_VIRT 63 - # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT) 64 - # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_VIRT 65 - # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_VIRT 66 - #endif 67 - 68 - #ifdef MINSTATE_PHYS 69 - # define MINSTATE_GET_CURRENT(reg) mov reg=IA64_KR(CURRENT);; tpa reg=reg 70 - # define MINSTATE_START_SAVE_MIN MINSTATE_START_SAVE_MIN_PHYS 71 - # define MINSTATE_END_SAVE_MIN MINSTATE_END_SAVE_MIN_PHYS 72 - #endif 73 - 74 - /* 75 8 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 76 9 * the minimum state necessary that allows us to turn psr.ic back 77 10 * on. ··· 30 97 * we can pass interruption state as arguments to a handler. 31 98 */ 32 99 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \ 33 - MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \ 100 + mov r16=IA64_KR(CURRENT); /* M */ \ 34 101 mov r27=ar.rsc; /* M */ \ 35 102 mov r20=r1; /* A */ \ 36 103 mov r25=ar.unat; /* M */ \ ··· 51 118 SAVE_IFS; \ 52 119 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \ 53 120 ;; \ 54 - MINSTATE_START_SAVE_MIN \ 121 + (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \ 122 + ;; \ 123 + (pUStk) mov.m r24=ar.rnat; \ 124 + (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \ 125 + (pKStk) mov r1=sp; /* get sp */ \ 126 + ;; \ 127 + (pUStk) lfetch.fault.excl.nt1 [r22]; \ 128 + (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ 129 + (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \ 130 + ;; \ 131 + (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \ 132 + (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \ 133 + ;; \ 134 + (pUStk) mov r18=ar.bsp; \ 135 + (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \ 55 136 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \ 56 137 adds r16=PT(CR_IPSR),r1; \ 57 138 ;; \ ··· 128 181 EXTRA; \ 129 182 movl r1=__gp; /* establish kernel global pointer */ \ 130 183 ;; \ 131 - MINSTATE_END_SAVE_MIN 184 + bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 185 + ;; 132 186 133 187 /* 134 188 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
+66 -61
arch/ia64/kernel/palinfo.c
··· 307 307 308 308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { 309 309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); 310 - return 0; 311 - } 310 + } else { 312 311 313 - 314 - p += sprintf(p, 312 + p += sprintf(p, 315 313 "Physical Address Space : %d bits\n" 316 314 "Virtual Address Space : %d bits\n" 317 315 "Protection Key Registers(PKR) : %d\n" ··· 317 319 "Hash Tag ID : 0x%x\n" 318 320 "Size of RR.rid : %d\n", 319 321 vm_info_1.pal_vm_info_1_s.phys_add_size, 320 - vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, 321 - vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, 322 + vm_info_2.pal_vm_info_2_s.impl_va_msb+1, 323 + vm_info_1.pal_vm_info_1_s.max_pkr+1, 324 + vm_info_1.pal_vm_info_1_s.key_size, 325 + vm_info_1.pal_vm_info_1_s.hash_tag_id, 322 326 vm_info_2.pal_vm_info_2_s.rid_size); 323 - 324 - if (ia64_pal_mem_attrib(&attrib) != 0) 325 - return 0; 326 - 327 - p += sprintf(p, "Supported memory attributes : "); 328 - sep = ""; 329 - for (i = 0; i < 8; i++) { 330 - if (attrib & (1 << i)) { 331 - p += sprintf(p, "%s%s", sep, mem_attrib[i]); 332 - sep = ", "; 333 - } 334 327 } 335 - p += sprintf(p, "\n"); 328 + 329 + if (ia64_pal_mem_attrib(&attrib) == 0) { 330 + p += sprintf(p, "Supported memory attributes : "); 331 + sep = ""; 332 + for (i = 0; i < 8; i++) { 333 + if (attrib & (1 << i)) { 334 + p += sprintf(p, "%s%s", sep, mem_attrib[i]); 335 + sep = ", "; 336 + } 337 + } 338 + p += sprintf(p, "\n"); 339 + } 336 340 337 341 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { 338 342 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); 339 - return 0; 343 + } else { 344 + 345 + p += sprintf(p, 346 + "\nTLB walker : %simplemented\n" 347 + "Number of DTR : %d\n" 348 + "Number of ITR : %d\n" 349 + "TLB insertable page sizes : ", 350 + vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", 351 + vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, 352 + vm_info_1.pal_vm_info_1_s.max_itr_entry+1); 353 + 354 + 355 + p = bitvector_process(p, tr_pages); 356 + 357 + p += sprintf(p, "\nTLB purgeable page sizes : "); 358 + 359 + p = bitvector_process(p, vw_pages); 340 360 } 341 - 342 - p += sprintf(p, 343 - "\nTLB walker : %simplemented\n" 344 - "Number of DTR : %d\n" 345 - "Number of ITR : %d\n" 346 - "TLB insertable page sizes : ", 347 - vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", 348 - vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, 349 - vm_info_1.pal_vm_info_1_s.max_itr_entry+1); 350 - 351 - 352 - p = bitvector_process(p, tr_pages); 353 - 354 - p += sprintf(p, "\nTLB purgeable page sizes : "); 355 - 356 - p = bitvector_process(p, vw_pages); 357 - 358 361 if ((status=ia64_get_ptce(&ptce)) != 0) { 359 362 printk(KERN_ERR "ia64_get_ptce=%ld\n", status); 360 - return 0; 361 - } 362 - 363 - p += sprintf(p, 363 + } else { 364 + p += sprintf(p, 364 365 "\nPurge base address : 0x%016lx\n" 365 366 "Purge outer loop count : %d\n" 366 367 "Purge inner loop count : %d\n" 367 368 "Purge outer loop stride : %d\n" 368 369 "Purge inner loop stride : %d\n", 369 - ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); 370 + ptce.base, ptce.count[0], ptce.count[1], 371 + ptce.stride[0], ptce.stride[1]); 370 372 371 - p += sprintf(p, 373 + p += sprintf(p, 372 374 "TC Levels : %d\n" 373 375 "Unique TC(s) : %d\n", 374 376 vm_info_1.pal_vm_info_1_s.num_tc_levels, 375 377 vm_info_1.pal_vm_info_1_s.max_unique_tcs); 376 378 377 - for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { 378 - for (j=2; j>0 ; j--) { 379 - tc_pages = 0; /* just in case */ 379 + for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { 380 + for (j=2; j>0 ; j--) { 381 + tc_pages = 0; /* just in case */ 380 382 381 383 382 - /* even without unification, some levels may not be present */ 383 - if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { 384 - continue; 385 - } 384 + /* even without unification, some levels may not be present */ 385 + if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { 386 + continue; 387 + } 386 388 387 - p += sprintf(p, 389 + p += sprintf(p, 388 390 "\n%s Translation Cache Level %d:\n" 389 391 "\tHash sets : %d\n" 390 392 "\tAssociativity : %d\n" 391 393 "\tNumber of entries : %d\n" 392 394 "\tFlags : ", 393 - cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, 394 - tc_info.tc_associativity, tc_info.tc_num_entries); 395 + cache_types[j+tc_info.tc_unified], i+1, 396 + tc_info.tc_num_sets, 397 + tc_info.tc_associativity, 398 + tc_info.tc_num_entries); 395 399 396 - if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); 397 - if (tc_info.tc_unified) p += sprintf(p, "Unified "); 398 - if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction"); 400 + if (tc_info.tc_pf) 401 + p += sprintf(p, "PreferredPageSizeOptimized "); 402 + if (tc_info.tc_unified) 403 + p += sprintf(p, "Unified "); 404 + if (tc_info.tc_reduce_tr) 405 + p += sprintf(p, "TCReduction"); 399 406 400 - p += sprintf(p, "\n\tSupported page sizes: "); 407 + p += sprintf(p, "\n\tSupported page sizes: "); 401 408 402 - p = bitvector_process(p, tc_pages); 409 + p = bitvector_process(p, tc_pages); 403 410 404 - /* when unified date (j=2) is enough */ 405 - if (tc_info.tc_unified) break; 411 + /* when unified date (j=2) is enough */ 412 + if (tc_info.tc_unified) 413 + break; 414 + } 406 415 } 407 416 } 408 417 p += sprintf(p, "\n"); ··· 445 440 p += sprintf(p, "\n"); 446 441 } 447 442 448 - if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; 443 + if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { 449 444 450 445 p += sprintf(p, 451 446 "RSE stacked physical registers : %ld\n" 452 447 "RSE load/store hints : %ld (%s)\n", 453 448 phys_stacked, hints.ph_data, 454 449 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); 455 - 450 + } 456 451 if (ia64_pal_debug_info(&iregs, &dregs)) 457 452 return 0; 458 453
+34 -28
arch/ia64/kernel/salinfo.c
··· 22 22 * 23 23 * Dec 5 2004 kaos@sgi.com 24 24 * Standardize which records are cleared automatically. 25 + * 26 + * Aug 18 2005 kaos@sgi.com 27 + * mca.c may not pass a buffer, a NULL buffer just indicates that a new 28 + * record is available in SAL. 29 + * Replace some NR_CPUS by cpus_online, for hotplug cpu. 25 30 */ 26 31 27 32 #include <linux/types.h> ··· 198 193 * The buffer passed from mca.c points to the output from ia64_log_get. This is 199 194 * a persistent buffer but its contents can change between the interrupt and 200 195 * when user space processes the record. Save the record id to identify 201 - * changes. 196 + * changes. If the buffer is NULL then just update the bitmap. 202 197 */ 203 198 void 204 199 salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) ··· 211 206 212 207 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); 213 208 214 - if (irqsafe) 215 - spin_lock_irqsave(&data_saved_lock, flags); 216 - for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 217 - if (!data_saved->buffer) 218 - break; 209 + if (buffer) { 210 + if (irqsafe) 211 + spin_lock_irqsave(&data_saved_lock, flags); 212 + for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 213 + if (!data_saved->buffer) 214 + break; 215 + } 216 + if (i == saved_size) { 217 + if (!data->saved_num) { 218 + shift1_data_saved(data, 0); 219 + data_saved = data->data_saved + saved_size - 1; 220 + } else 221 + data_saved = NULL; 222 + } 223 + if (data_saved) { 224 + data_saved->cpu = smp_processor_id(); 225 + data_saved->id = ((sal_log_record_header_t *)buffer)->id; 226 + data_saved->size = size; 227 + data_saved->buffer = buffer; 228 + } 229 + if (irqsafe) 230 + spin_unlock_irqrestore(&data_saved_lock, flags); 219 231 } 220 - if (i == saved_size) { 221 - if (!data->saved_num) { 222 - shift1_data_saved(data, 0); 223 - data_saved = data->data_saved + saved_size - 1; 224 - } else 225 - data_saved = NULL; 226 - } 227 - if (data_saved) { 228 - data_saved->cpu = smp_processor_id(); 229 - data_saved->id = ((sal_log_record_header_t *)buffer)->id; 230 - data_saved->size = size; 231 - data_saved->buffer = buffer; 232 - } 233 - if (irqsafe) 234 - spin_unlock_irqrestore(&data_saved_lock, flags); 235 232 236 233 if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { 237 234 if (irqsafe) ··· 251 244 int i; 252 245 if (!data->open) 253 246 return; 254 - for (i = 0; i < NR_CPUS; ++i) { 247 + for_each_online_cpu(i) { 255 248 if (test_bit(i, &data->cpu_event)) { 256 249 /* double up() is not a problem, user space will see no 257 250 * records for the additional "events". ··· 298 291 299 292 n = data->cpu_check; 300 293 for (i = 0; i < NR_CPUS; i++) { 301 - if (test_bit(n, &data->cpu_event)) { 294 + if (test_bit(n, &data->cpu_event) && cpu_online(n)) { 302 295 cpu = n; 303 296 break; 304 297 } ··· 592 585 593 586 /* we missed any events before now */ 594 587 online = 0; 595 - for (j = 0; j < NR_CPUS; j++) 596 - if (cpu_online(j)) { 597 - set_bit(j, &data->cpu_event); 598 - ++online; 599 - } 588 + for_each_online_cpu(j) { 589 + set_bit(j, &data->cpu_event); 590 + ++online; 591 + } 600 592 sema_init(&data->sem, online); 601 593 602 594 *sdir++ = dir;
-22
arch/ia64/kernel/unwind.c
··· 2020 2020 } 2021 2021 2022 2022 void 2023 - unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, 2024 - struct pt_regs *pt, struct switch_stack *sw) 2025 - { 2026 - unsigned long sof; 2027 - 2028 - init_frame_info(info, t, sw, pt->r12); 2029 - info->cfm_loc = &pt->cr_ifs; 2030 - info->unat_loc = &pt->ar_unat; 2031 - info->pfs_loc = &pt->ar_pfs; 2032 - sof = *info->cfm_loc & 0x7f; 2033 - info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); 2034 - info->ip = pt->cr_iip + ia64_psr(pt)->ri; 2035 - info->pt = (unsigned long) pt; 2036 - UNW_DPRINT(3, "unwind.%s:\n" 2037 - " bsp 0x%lx\n" 2038 - " sof 0x%lx\n" 2039 - " ip 0x%lx\n", 2040 - __FUNCTION__, info->bsp, sof, info->ip); 2041 - find_save_locs(info); 2042 - } 2043 - 2044 - void 2045 2023 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) 2046 2024 { 2047 2025 unsigned long sol;
+12 -3
arch/ia64/mm/init.c
··· 382 382 383 383 if (impl_va_bits < 51 || impl_va_bits > 61) 384 384 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); 385 + /* 386 + * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, 387 + * which must fit into "vmlpt_bits - pte_bits" slots. Second half of 388 + * the test makes sure that our mapped space doesn't overlap the 389 + * unimplemented hole in the middle of the region. 390 + */ 391 + if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || 392 + (mapped_space_bits > impl_va_bits - 1)) 393 + panic("Cannot build a big enough virtual-linear page table" 394 + " to cover mapped address space.\n" 395 + " Try using a smaller page size.\n"); 396 + 385 397 386 398 /* place the VMLPT at the end of each page-table mapped region: */ 387 399 pta = POW2(61) - POW2(vmlpt_bits); 388 400 389 - if (POW2(mapped_space_bits) >= pta) 390 - panic("mm/init: overlap between virtually mapped linear page table and " 391 - "mapped kernel space!"); 392 401 /* 393 402 * Set the (virtually mapped linear) page table address. Bit 394 403 * 8 selects between the short and long format, bits 2-7 the
+19 -11
arch/ia64/sn/kernel/setup.c
··· 49 49 #include <asm/sn/clksupport.h> 50 50 #include <asm/sn/sn_sal.h> 51 51 #include <asm/sn/geo.h> 52 + #include <asm/sn/sn_feature_sets.h> 52 53 #include "xtalk/xwidgetdev.h" 53 54 #include "xtalk/hubdev.h" 54 55 #include <asm/sn/klconfig.h> ··· 98 97 int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ 99 98 100 99 short physical_node_map[MAX_PHYSNODE_ID]; 100 + static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS]; 101 101 102 102 EXPORT_SYMBOL(physical_node_map); 103 103 ··· 273 271 u32 version = sn_sal_rev(); 274 272 extern void sn_cpu_init(void); 275 273 276 - ia64_sn_plat_set_error_handling_features(); 274 + ia64_sn_plat_set_error_handling_features(); // obsolete 275 + ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); 276 + ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); 277 + 277 278 278 279 #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) 279 280 /* ··· 318 313 } 319 314 320 315 printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); 321 - 322 - /* 323 - * Confirm the SAL we're running on is recent enough... 324 - */ 325 - if (version < SN_SAL_MIN_VERSION) { 326 - printk(KERN_ERR "This kernel needs SGI SAL version >= " 327 - "%x.%02x\n", SN_SAL_MIN_VERSION >> 8, 328 - SN_SAL_MIN_VERSION & 0x00FF); 329 - panic("PROM version too old\n"); 330 - } 331 316 332 317 master_nasid = boot_get_nasid(); 333 318 ··· 474 479 */ 475 480 if (nodepdaindr[0] == NULL) 476 481 return; 482 + 483 + for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) 484 + if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) 485 + break; 477 486 478 487 cpuid = smp_processor_id(); 479 488 cpuphyid = get_sapicid(); ··· 650 651 651 652 return -1; 652 653 } 654 + 655 + int sn_prom_feature_available(int id) 656 + { 657 + if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS) 658 + return 0; 659 + return test_bit(id, sn_prom_features); 660 + } 661 + EXPORT_SYMBOL(sn_prom_feature_available); 662 +
+64 -38
include/asm-ia64/mca.h
··· 11 11 #ifndef _ASM_IA64_MCA_H 12 12 #define _ASM_IA64_MCA_H 13 13 14 - #define IA64_MCA_STACK_SIZE 8192 15 - 16 14 #if !defined(__ASSEMBLY__) 17 15 18 16 #include <linux/interrupt.h> ··· 46 48 47 49 enum { 48 50 IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, 49 - IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 51 + IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1, 52 + IA64_MCA_RENDEZ_CHECKIN_INIT = 0x2, 50 53 }; 51 54 52 55 /* Information maintained by the MC infrastructure */ ··· 62 63 63 64 } ia64_mc_info_t; 64 65 65 - typedef struct ia64_mca_sal_to_os_state_s { 66 - u64 imsto_os_gp; /* GP of the os registered with the SAL */ 67 - u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ 68 - u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ 69 - u64 imsto_sal_gp; /* GP of the SAL - physical */ 70 - u64 imsto_rendez_state; /* Rendez state information */ 71 - u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going 72 - * back to SAL from OS after MCA handling. 73 - */ 74 - u64 pal_min_state; /* from PAL in r17 */ 75 - u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ 76 - } ia64_mca_sal_to_os_state_t; 66 + /* Handover state from SAL to OS and vice versa, for both MCA and INIT events. 67 + * Besides the handover state, it also contains some saved registers from the 68 + * time of the event. 69 + * Note: mca_asm.S depends on the precise layout of this structure. 70 + */ 71 + 72 + struct ia64_sal_os_state { 73 + /* SAL to OS, must be at offset 0 */ 74 + u64 os_gp; /* GP of the os registered with the SAL, physical */ 75 + u64 pal_proc; /* PAL_PROC entry point, physical */ 76 + u64 sal_proc; /* SAL_PROC entry point, physical */ 77 + u64 rv_rc; /* MCA - Rendezvous state, INIT - reason code */ 78 + u64 proc_state_param; /* from R18 */ 79 + u64 monarch; /* 1 for a monarch event, 0 for a slave */ 80 + /* common, must follow SAL to OS */ 81 + u64 sal_ra; /* Return address in SAL, physical */ 82 + u64 sal_gp; /* GP of the SAL - physical */ 83 + pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ 84 + u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ 85 + struct task_struct *prev_task; /* previous task, NULL if it is not useful */ 86 + /* Some interrupt registers are not saved in minstate, pt_regs or 87 + * switch_stack. Because MCA/INIT can occur when interrupts are 88 + * disabled, we need to save the additional interrupt registers over 89 + * MCA/INIT and resume. 90 + */ 91 + u64 isr; 92 + u64 ifa; 93 + u64 itir; 94 + u64 iipa; 95 + u64 iim; 96 + u64 iha; 97 + /* OS to SAL, must follow common */ 98 + u64 os_status; /* OS status to SAL, enum below */ 99 + u64 context; /* 0 if return to same context 100 + 1 if return to new context */ 101 + }; 77 102 78 103 enum { 79 104 IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ ··· 107 84 }; 108 85 109 86 enum { 87 + IA64_INIT_RESUME = 0x0, /* Resume after return from INIT */ 88 + IA64_INIT_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ 89 + }; 90 + 91 + enum { 110 92 IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ 111 93 IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ 112 94 }; 113 95 114 - typedef struct ia64_mca_os_to_sal_state_s { 115 - u64 imots_os_status; /* OS status to SAL as to what happened 116 - * with the MCA handling. 117 - */ 118 - u64 imots_sal_gp; /* GP of the SAL - physical */ 119 - u64 imots_context; /* 0 if return to same context 120 - 1 if return to new context */ 121 - u64 *imots_new_min_state; /* Pointer to structure containing 122 - * new values of registers in the min state 123 - * save area. 124 - */ 125 - u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going 126 - * back to SAL from OS after MCA handling. 127 - */ 128 - } ia64_mca_os_to_sal_state_t; 129 - 130 96 /* Per-CPU MCA state that is too big for normal per-CPU variables. */ 131 97 132 98 struct ia64_mca_cpu { 133 - u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ 134 - u64 proc_state_dump[512]; 135 - u64 stackframe[32]; 136 - u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ 99 + u64 mca_stack[KERNEL_STACK_SIZE/8]; 137 100 u64 init_stack[KERNEL_STACK_SIZE/8]; 138 - } __attribute__ ((aligned(16))); 101 + }; 139 102 140 103 /* Array of physical addresses of each CPU's MCA area. */ 141 104 extern unsigned long __per_cpu_mca[NR_CPUS]; ··· 130 121 extern void ia64_mca_cpu_init(void *); 131 122 extern void ia64_os_mca_dispatch(void); 132 123 extern void ia64_os_mca_dispatch_end(void); 133 - extern void ia64_mca_ucmc_handler(void); 124 + extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); 125 + extern void ia64_init_handler(struct pt_regs *, 126 + struct switch_stack *, 127 + struct ia64_sal_os_state *); 134 128 extern void ia64_monarch_init_handler(void); 135 129 extern void ia64_slave_init_handler(void); 136 130 extern void ia64_mca_cmc_vector_setup(void); 137 - extern int ia64_reg_MCA_extension(void*); 131 + extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); 138 132 extern void ia64_unreg_MCA_extension(void); 133 + extern u64 ia64_get_rnat(u64 *); 134 + 135 + #else /* __ASSEMBLY__ */ 136 + 137 + #define IA64_MCA_CORRECTED 0x0 /* Error has been corrected by OS_MCA */ 138 + #define IA64_MCA_WARM_BOOT -1 /* Warm boot of the system need from SAL */ 139 + #define IA64_MCA_COLD_BOOT -2 /* Cold boot of the system need from SAL */ 140 + #define IA64_MCA_HALT -3 /* System to be halted by SAL */ 141 + 142 + #define IA64_INIT_RESUME 0x0 /* Resume after return from INIT */ 143 + #define IA64_INIT_WARM_BOOT -1 /* Warm boot of the system need from SAL */ 144 + 145 + #define IA64_MCA_SAME_CONTEXT 0x0 /* SAL to return to same context */ 146 + #define IA64_MCA_NEW_CONTEXT -1 /* SAL to return to new context */ 139 147 140 148 #endif /* !__ASSEMBLY__ */ 141 149 #endif /* _ASM_IA64_MCA_H */
+27 -98
include/asm-ia64/mca_asm.h
··· 8 8 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> 9 9 * Copyright (C) 2002 Intel Corp. 10 10 * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> 11 + * Copyright (C) 2005 Silicon Graphics, Inc 12 + * Copyright (C) 2005 Keith Owens <kaos@sgi.com> 11 13 */ 12 14 #ifndef _ASM_IA64_MCA_ASM_H 13 15 #define _ASM_IA64_MCA_ASM_H ··· 209 207 ;; 210 208 211 209 /* 212 - * The following offsets capture the order in which the 213 - * RSE related registers from the old context are 214 - * saved onto the new stack frame. 210 + * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel 211 + * stacks, except that the SAL/OS state and a switch_stack are stored near the 212 + * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as 213 + * well as MCA over INIT, each event needs its own SAL/OS state. All entries 214 + * are 16 byte aligned. 215 215 * 216 - * +-----------------------+ 217 - * |NDIRTY [BSP - BSPSTORE]| 218 - * +-----------------------+ 219 - * | RNAT | 220 - * +-----------------------+ 221 - * | BSPSTORE | 222 - * +-----------------------+ 223 - * | IFS | 224 - * +-----------------------+ 225 - * | PFS | 226 - * +-----------------------+ 227 - * | RSC | 228 - * +-----------------------+ <-------- Bottom of new stack frame 216 + * +---------------------------+ 217 + * | pt_regs | 218 + * +---------------------------+ 219 + * | switch_stack | 220 + * +---------------------------+ 221 + * | SAL/OS state | 222 + * +---------------------------+ 223 + * | 16 byte scratch area | 224 + * +---------------------------+ <-------- SP at start of C MCA handler 225 + * | ..... | 226 + * +---------------------------+ 227 + * | RBS for MCA/INIT handler | 228 + * +---------------------------+ 229 + * | struct task for MCA/INIT | 230 + * +---------------------------+ <-------- Bottom of MCA/INIT stack 229 231 */ 230 - #define rse_rsc_offset 0 231 - #define rse_pfs_offset (rse_rsc_offset+0x08) 232 - #define rse_ifs_offset (rse_pfs_offset+0x08) 233 - #define rse_bspstore_offset (rse_ifs_offset+0x08) 234 - #define rse_rnat_offset (rse_bspstore_offset+0x08) 235 - #define rse_ndirty_offset (rse_rnat_offset+0x08) 236 232 237 - /* 238 - * rse_switch_context 239 - * 240 - * 1. Save old RSC onto the new stack frame 241 - * 2. Save PFS onto new stack frame 242 - * 3. Cover the old frame and start a new frame. 243 - * 4. Save IFS onto new stack frame 244 - * 5. Save the old BSPSTORE on the new stack frame 245 - * 6. Save the old RNAT on the new stack frame 246 - * 7. Write BSPSTORE with the new backing store pointer 247 - * 8. Read and save the new BSP to calculate the #dirty registers 248 - * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 249 - */ 250 - #define rse_switch_context(temp,p_stackframe,p_bspstore) \ 251 - ;; \ 252 - mov temp=ar.rsc;; \ 253 - st8 [p_stackframe]=temp,8;; \ 254 - mov temp=ar.pfs;; \ 255 - st8 [p_stackframe]=temp,8; \ 256 - cover ;; \ 257 - mov temp=cr.ifs;; \ 258 - st8 [p_stackframe]=temp,8;; \ 259 - mov temp=ar.bspstore;; \ 260 - st8 [p_stackframe]=temp,8;; \ 261 - mov temp=ar.rnat;; \ 262 - st8 [p_stackframe]=temp,8; \ 263 - mov ar.bspstore=p_bspstore;; \ 264 - mov temp=ar.bsp;; \ 265 - sub temp=temp,p_bspstore;; \ 266 - st8 [p_stackframe]=temp,8;; 267 - 268 - /* 269 - * rse_return_context 270 - * 1. Allocate a zero-sized frame 271 - * 2. Store the number of dirty registers RSC.loadrs field 272 - * 3. Issue a loadrs to insure that any registers from the interrupted 273 - * context which were saved on the new stack frame have been loaded 274 - * back into the stacked registers 275 - * 4. Restore BSPSTORE 276 - * 5. Restore RNAT 277 - * 6. Restore PFS 278 - * 7. Restore IFS 279 - * 8. Restore RSC 280 - * 9. Issue an RFI 281 - */ 282 - #define rse_return_context(psr_mask_reg,temp,p_stackframe) \ 283 - ;; \ 284 - alloc temp=ar.pfs,0,0,0,0; \ 285 - add p_stackframe=rse_ndirty_offset,p_stackframe;; \ 286 - ld8 temp=[p_stackframe];; \ 287 - shl temp=temp,16;; \ 288 - mov ar.rsc=temp;; \ 289 - loadrs;; \ 290 - add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ 291 - ld8 temp=[p_stackframe];; \ 292 - mov ar.bspstore=temp;; \ 293 - add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ 294 - ld8 temp=[p_stackframe];; \ 295 - mov ar.rnat=temp;; \ 296 - add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ 297 - ld8 temp=[p_stackframe];; \ 298 - mov ar.pfs=temp;; \ 299 - add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ 300 - ld8 temp=[p_stackframe];; \ 301 - mov cr.ifs=temp;; \ 302 - add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ 303 - ld8 temp=[p_stackframe];; \ 304 - mov ar.rsc=temp ; \ 305 - mov temp=psr;; \ 306 - or temp=temp,psr_mask_reg;; \ 307 - mov cr.ipsr=temp;; \ 308 - mov temp=ip;; \ 309 - add temp=0x30,temp;; \ 310 - mov cr.iip=temp;; \ 311 - srlz.i;; \ 312 - rfi;; 233 + #define ALIGN16(x) ((x)&~15) 234 + #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE) 235 + #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE) 236 + #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE) 237 + #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16) 313 238 314 239 #endif /* _ASM_IA64_MCA_ASM_H */
+1 -1
include/asm-ia64/ptrace.h
··· 119 119 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ 120 120 unsigned long ar_pfs; /* prev function state */ 121 121 unsigned long ar_rsc; /* RSE configuration */ 122 - /* The following two are valid only if cr_ipsr.cpl > 0: */ 122 + /* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */ 123 123 unsigned long ar_rnat; /* RSE NaT */ 124 124 unsigned long ar_bspstore; /* RSE bspstore */ 125 125
+57
include/asm-ia64/sn/sn_feature_sets.h
··· 1 + #ifndef _ASM_IA64_SN_FEATURE_SETS_H 2 + #define _ASM_IA64_SN_FEATURE_SETS_H 3 + 4 + /* 5 + * SN PROM Features 6 + * 7 + * This file is subject to the terms and conditions of the GNU General Public 8 + * License. See the file "COPYING" in the main directory of this archive 9 + * for more details. 10 + * 11 + * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. 12 + */ 13 + 14 + 15 + #include <asm/types.h> 16 + #include <asm/bitops.h> 17 + 18 + /* --------------------- PROM Features -----------------------------*/ 19 + extern int sn_prom_feature_available(int id); 20 + 21 + #define MAX_PROM_FEATURE_SETS 2 22 + 23 + /* 24 + * The following defines features that may or may not be supported by the 25 + * current PROM. The OS uses sn_prom_feature_available(feature) to test for 26 + * the presence of a PROM feature. Down rev (old) PROMs will always test 27 + * "false" for new features. 28 + * 29 + * Use: 30 + * if (sn_prom_feature_available(PRF_FEATURE_XXX)) 31 + * ... 32 + */ 33 + 34 + /* 35 + * Example: feature XXX 36 + */ 37 + #define PRF_FEATURE_XXX 0 38 + 39 + 40 + 41 + /* --------------------- OS Features -------------------------------*/ 42 + 43 + /* 44 + * The following defines OS features that are optionally present in 45 + * the operating system. 46 + * During boot, PROM is notified of these features via a series of calls: 47 + * 48 + * ia64_sn_set_os_feature(feature1); 49 + * 50 + * Once enabled, a feature cannot be disabled. 51 + * 52 + * By default, features are disabled unless explicitly enabled. 53 + */ 54 + #define OSF_MCA_SLV_TO_OS_INIT_SLV 0 55 + #define OSF_FEAT_LOG_SBES 1 56 + 57 + #endif /* _ASM_IA64_SN_FEATURE_SETS_H */
+27 -9
include/asm-ia64/sn/sn_sal.h
··· 80 80 #define SN_SAL_RESERVED_DO_NOT_USE 0x02000062 81 81 #define SN_SAL_IOIF_GET_PCI_TOPOLOGY 0x02000064 82 82 83 + #define SN_SAL_GET_PROM_FEATURE_SET 0x02000065 84 + #define SN_SAL_SET_OS_FEATURE_SET 0x02000066 85 + 83 86 /* 84 87 * Service-specific constants 85 88 */ ··· 121 118 /* 122 119 * Error Handling Features 123 120 */ 124 - #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 125 - #define SAL_ERR_FEAT_LOG_SBES 0x2 121 + #define SAL_ERR_FEAT_MCA_SLV_TO_OS_INIT_SLV 0x1 // obsolete 122 + #define SAL_ERR_FEAT_LOG_SBES 0x2 // obsolete 126 123 #define SAL_ERR_FEAT_MFR_OVERRIDE 0x4 127 124 #define SAL_ERR_FEAT_SBE_THRESHOLD 0xffff0000 128 125 ··· 153 150 154 151 return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); 155 152 } 156 - 157 - /* 158 - * Specify the minimum PROM revsion required for this kernel. 159 - * Note that they're stored in hex format... 160 - */ 161 - #define SN_SAL_MIN_VERSION 0x0404 162 153 163 154 /* 164 155 * Returns the master console nasid, if the call fails, return an illegal ··· 333 336 } 334 337 335 338 /* 336 - * Set Error Handling Features 339 + * Set Error Handling Features (Obsolete) 337 340 */ 338 341 static inline u64 339 342 ia64_sn_plat_set_error_handling_features(void) ··· 1047 1050 struct ia64_sal_retval rv; 1048 1051 SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0); 1049 1052 return (rv.status == 0); 1053 + } 1054 + 1055 + static inline int 1056 + ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set) 1057 + { 1058 + struct ia64_sal_retval rv; 1059 + 1060 + SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0); 1061 + if (rv.status != 0) 1062 + return rv.status; 1063 + *feature_set = rv.v0; 1064 + return 0; 1065 + } 1066 + 1067 + static inline int 1068 + ia64_sn_set_os_feature(int feature) 1069 + { 1070 + struct ia64_sal_retval rv; 1071 + 1072 + SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 0); 1073 + return rv.status; 1050 1074 } 1051 1075 1052 1076 #endif /* _ASM_IA64_SN_SN_SAL_H */
+2
include/asm-ia64/thread_info.h
··· 76 76 #define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ 77 77 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 78 78 #define TIF_MEMDIE 17 79 + #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 79 80 80 81 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 81 82 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ··· 86 85 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 87 86 #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) 88 87 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 88 + #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 89 89 90 90 /* "work to do on user-return" bits */ 91 91 #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
-7
include/asm-ia64/unwind.h
··· 114 114 */ 115 115 extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); 116 116 117 - /* 118 - * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have 119 - * be "adjacent" (no state modifications between pt-regs and switch-stack). 120 - */ 121 - extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, 122 - struct pt_regs *pt, struct switch_stack *sw); 123 - 124 117 extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, 125 118 struct switch_stack *sw); 126 119
+2
include/linux/sched.h
··· 904 904 extern int idle_cpu(int cpu); 905 905 extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); 906 906 extern task_t *idle_task(int cpu); 907 + extern task_t *curr_task(int cpu); 908 + extern void set_curr_task(int cpu, task_t *p); 907 909 908 910 void yield(void); 909 911
+26
kernel/sched.c
··· 3577 3577 } 3578 3578 3579 3579 /** 3580 + * curr_task - return the current task for a given cpu. 3581 + * @cpu: the processor in question. 3582 + */ 3583 + task_t *curr_task(int cpu) 3584 + { 3585 + return cpu_curr(cpu); 3586 + } 3587 + 3588 + /** 3589 + * set_curr_task - set the current task for a given cpu. 3590 + * @cpu: the processor in question. 3591 + * @p: the task pointer to set. 3592 + * 3593 + * Description: This function must only be used when non-maskable interrupts 3594 + * are serviced on a separate stack. It allows the architecture to switch the 3595 + * notion of the current task on a cpu in a non-blocking manner. This function 3596 + * must be called with interrupts disabled, the caller must save the original 3597 + * value of the current task (see curr_task() above) and restore that value 3598 + * before reenabling interrupts. 3599 + */ 3600 + void set_curr_task(int cpu, task_t *p) 3601 + { 3602 + cpu_curr(cpu) = p; 3603 + } 3604 + 3605 + /** 3580 3606 * find_process_by_pid - find a process with a matching PID value. 3581 3607 * @pid: the pid in question. 3582 3608 */