Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

+565 -256
+2 -8
arch/ia64/hp/sim/simserial.c
··· 167 } 168 } 169 seen_esc = 0; 170 - if (tty->flip.count >= TTY_FLIPBUF_SIZE) break; 171 172 - *tty->flip.char_buf_ptr = ch; 173 - 174 - *tty->flip.flag_buf_ptr = 0; 175 - 176 - tty->flip.flag_buf_ptr++; 177 - tty->flip.char_buf_ptr++; 178 - tty->flip.count++; 179 } 180 tty_flip_buffer_push(tty); 181 }
··· 167 } 168 } 169 seen_esc = 0; 170 171 + if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0) 172 + break; 173 } 174 tty_flip_buffer_push(tty); 175 }
+1
arch/ia64/kernel/fsys.S
··· 903 data8 0 904 data8 0 905 data8 0 906 907 .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
··· 903 data8 0 904 data8 0 905 data8 0 906 + data8 0 // 1280 907 908 .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+27
arch/ia64/kernel/jprobes.S
··· 60 GLOBAL_ENTRY(jprobe_inst_return) 61 br.call.sptk.many b0=jprobe_break 62 END(jprobe_inst_return)
··· 60 GLOBAL_ENTRY(jprobe_inst_return) 61 br.call.sptk.many b0=jprobe_break 62 END(jprobe_inst_return) 63 + 64 + GLOBAL_ENTRY(invalidate_stacked_regs) 65 + movl r16=invalidate_restore_cfm 66 + ;; 67 + mov b6=r16 68 + ;; 69 + br.ret.sptk.many b6 70 + ;; 71 + invalidate_restore_cfm: 72 + mov r16=ar.rsc 73 + ;; 74 + mov ar.rsc=r0 75 + ;; 76 + loadrs 77 + ;; 78 + mov ar.rsc=r16 79 + ;; 80 + br.cond.sptk.many rp 81 + END(invalidate_stacked_regs) 82 + 83 + GLOBAL_ENTRY(flush_register_stack) 84 + // flush dirty regs to backing store (must be first in insn group) 85 + flushrs 86 + ;; 87 + br.ret.sptk.many rp 88 + END(flush_register_stack) 89 +
+57
arch/ia64/kernel/kprobes.c
··· 766 return ret; 767 } 768 769 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 770 { 771 struct jprobe *jp = container_of(p, struct jprobe, kp); 772 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 773 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 774 775 /* save architectural state */ 776 kcb->jprobe_saved_regs = *regs; ··· 837 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 838 { 839 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 840 841 *regs = kcb->jprobe_saved_regs; 842 preempt_enable_no_resched(); 843 return 1; 844 }
··· 766 return ret; 767 } 768 769 + struct param_bsp_cfm { 770 + unsigned long ip; 771 + unsigned long *bsp; 772 + unsigned long cfm; 773 + }; 774 + 775 + static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) 776 + { 777 + unsigned long ip; 778 + struct param_bsp_cfm *lp = arg; 779 + 780 + do { 781 + unw_get_ip(info, &ip); 782 + if (ip == 0) 783 + break; 784 + if (ip == lp->ip) { 785 + unw_get_bsp(info, (unsigned long*)&lp->bsp); 786 + unw_get_cfm(info, (unsigned long*)&lp->cfm); 787 + return; 788 + } 789 + } while (unw_unwind(info) >= 0); 790 + lp->bsp = 0; 791 + lp->cfm = 0; 792 + return; 793 + } 794 + 795 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 796 { 797 struct jprobe *jp = container_of(p, struct jprobe, kp); 798 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 799 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 800 + struct param_bsp_cfm pa; 801 + int bytes; 802 + 803 + /* 804 + * Callee owns the argument space and could overwrite it, eg 805 + * tail call optimization. So to be absolutely safe 806 + * we save the argument space before transfering the control 807 + * to instrumented jprobe function which runs in 808 + * the process context 809 + */ 810 + pa.ip = regs->cr_iip; 811 + unw_init_running(ia64_get_bsp_cfm, &pa); 812 + bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) 813 + - (char *)pa.bsp; 814 + memcpy( kcb->jprobes_saved_stacked_regs, 815 + pa.bsp, 816 + bytes ); 817 + kcb->bsp = pa.bsp; 818 + kcb->cfm = pa.cfm; 819 820 /* save architectural state */ 821 kcb->jprobe_saved_regs = *regs; ··· 792 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 793 { 794 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 795 + int bytes; 796 797 + /* restoring architectural state */ 798 *regs = kcb->jprobe_saved_regs; 799 + 800 + /* restoring the original argument space */ 801 + flush_register_stack(); 802 + bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) 803 + - (char *)kcb->bsp; 804 + memcpy( kcb->bsp, 805 + kcb->jprobes_saved_stacked_regs, 806 + bytes ); 807 + invalidate_stacked_regs(); 808 + 809 preempt_enable_no_resched(); 810 return 1; 811 }
+1 -1
arch/ia64/kernel/mca_asm.S
··· 847 ;; 848 mov cr.iim=temp3 849 mov cr.iha=temp4 850 - dep r22=0,r22,62,2 // pal_min_state, physical, uncached 851 mov IA64_KR(CURRENT)=r21 852 ld8 r8=[temp1] // os_status 853 ld8 r10=[temp2] // context
··· 847 ;; 848 mov cr.iim=temp3 849 mov cr.iha=temp4 850 + dep r22=0,r22,62,1 // pal_min_state, physical, uncached 851 mov IA64_KR(CURRENT)=r21 852 ld8 r8=[temp1] // os_status 853 ld8 r10=[temp2] // context
+121 -53
arch/ia64/kernel/salinfo.c
··· 3 * 4 * Creates entries in /proc/sal for various system features. 5 * 6 - * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 7 * Copyright (c) 2003 Hewlett-Packard Co 8 * Bjorn Helgaas <bjorn.helgaas@hp.com> 9 * ··· 27 * mca.c may not pass a buffer, a NULL buffer just indicates that a new 28 * record is available in SAL. 29 * Replace some NR_CPUS by cpus_online, for hotplug cpu. 30 */ 31 32 #include <linux/capability.h> 33 #include <linux/types.h> 34 #include <linux/proc_fs.h> 35 #include <linux/module.h> ··· 140 }; 141 142 struct salinfo_data { 143 - volatile cpumask_t cpu_event; /* which cpus have outstanding events */ 144 - struct semaphore sem; /* count of cpus with outstanding events (bits set in cpu_event) */ 145 u8 *log_buffer; 146 u64 log_size; 147 u8 *oemdata; /* decoded oem data */ ··· 182 int ret; 183 }; 184 185 static void 186 salinfo_platform_oemdata_cpu(void *context) 187 { ··· 235 236 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); 237 238 if (buffer) { 239 - if (irqsafe) 240 - spin_lock_irqsave(&data_saved_lock, flags); 241 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 242 if (!data_saved->buffer) 243 break; ··· 255 data_saved->size = size; 256 data_saved->buffer = buffer; 257 } 258 - if (irqsafe) 259 - spin_unlock_irqrestore(&data_saved_lock, flags); 260 } 261 - 262 - if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) { 263 - if (irqsafe) 264 - up(&data->sem); 265 } 266 } 267 ··· 270 static void 271 salinfo_timeout_check(struct salinfo_data *data) 272 { 273 - int i; 274 if (!data->open) 275 return; 276 - for_each_online_cpu(i) { 277 - if (test_bit(i, &data->cpu_event)) { 278 - /* double up() is not a problem, user space will see no 279 - * records for the additional "events". 280 - */ 281 - up(&data->sem); 282 - } 283 } 284 } 285 286 - static void 287 salinfo_timeout (unsigned long arg) 288 { 289 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); ··· 308 int i, n, cpu = -1; 309 310 retry: 311 - if (down_trylock(&data->sem)) { 312 if (file->f_flags & O_NONBLOCK) 313 return -EAGAIN; 314 - if (down_interruptible(&data->sem)) 315 return -EINTR; 316 } 317 318 n = data->cpu_check; 319 for (i = 0; i < NR_CPUS; i++) { 320 - if (test_bit(n, &data->cpu_event) && cpu_online(n)) { 321 cpu = n; 322 break; 323 } ··· 331 332 if (cpu == -1) 333 goto retry; 334 - 335 - /* events are sticky until the user says "clear" */ 336 - up(&data->sem); 337 338 /* for next read, start checking at next CPU */ 339 data->cpu_check = cpu; ··· 400 static void 401 call_on_cpu(int cpu, void (*fn)(void *), void *arg) 402 { 403 - cpumask_t save_cpus_allowed, new_cpus_allowed; 404 - memcpy(&save_cpus_allowed, &current->cpus_allowed, sizeof(save_cpus_allowed)); 405 - memset(&new_cpus_allowed, 0, sizeof(new_cpus_allowed)); 406 - set_bit(cpu, &new_cpus_allowed); 407 set_cpus_allowed(current, new_cpus_allowed); 408 (*fn)(arg); 409 set_cpus_allowed(current, save_cpus_allowed); ··· 450 if (!data->saved_num) 451 call_on_cpu(cpu, salinfo_log_read_cpu, data); 452 if (!data->log_size) { 453 - data->state = STATE_NO_DATA; 454 - clear_bit(cpu, &data->cpu_event); 455 } else { 456 - data->state = STATE_LOG_RECORD; 457 } 458 } 459 ··· 490 salinfo_log_clear(struct salinfo_data *data, int cpu) 491 { 492 sal_log_record_header_t *rh; 493 data->state = STATE_NO_DATA; 494 - if (!test_bit(cpu, &data->cpu_event)) 495 - return 0; 496 - down(&data->sem); 497 - clear_bit(cpu, &data->cpu_event); 498 - if (data->saved_num) { 499 - unsigned long flags; 500 - spin_lock_irqsave(&data_saved_lock, flags); 501 - shift1_data_saved(data, data->saved_num - 1 ); 502 - data->saved_num = 0; 503 spin_unlock_irqrestore(&data_saved_lock, flags); 504 } 505 rh = (sal_log_record_header_t *)(data->log_buffer); 506 /* Corrected errors have already been cleared from SAL */ 507 if (rh->severity != sal_log_severity_corrected) 508 call_on_cpu(cpu, salinfo_log_clear_cpu, data); 509 /* clearing a record may make a new record visible */ 510 salinfo_log_new_read(cpu, data); 511 - if (data->state == STATE_LOG_RECORD && 512 - !test_and_set_bit(cpu, &data->cpu_event)) 513 - up(&data->sem); 514 return 0; 515 } 516 ··· 571 .write = salinfo_log_write, 572 }; 573 574 static int __init 575 salinfo_init(void) 576 { ··· 625 struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ 626 struct proc_dir_entry *dir, *entry; 627 struct salinfo_data *data; 628 - int i, j, online; 629 630 salinfo_dir = proc_mkdir("sal", NULL); 631 if (!salinfo_dir) ··· 640 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 641 data = salinfo_data + i; 642 data->type = i; 643 - sema_init(&data->sem, 0); 644 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 645 if (!dir) 646 continue; ··· 660 *sdir++ = entry; 661 662 /* we missed any events before now */ 663 - online = 0; 664 - for_each_online_cpu(j) { 665 - set_bit(j, &data->cpu_event); 666 - ++online; 667 - } 668 - sema_init(&data->sem, online); 669 670 *sdir++ = dir; 671 } ··· 672 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; 673 salinfo_timer.function = &salinfo_timeout; 674 add_timer(&salinfo_timer); 675 676 return 0; 677 }
··· 3 * 4 * Creates entries in /proc/sal for various system features. 5 * 6 + * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved. 7 * Copyright (c) 2003 Hewlett-Packard Co 8 * Bjorn Helgaas <bjorn.helgaas@hp.com> 9 * ··· 27 * mca.c may not pass a buffer, a NULL buffer just indicates that a new 28 * record is available in SAL. 29 * Replace some NR_CPUS by cpus_online, for hotplug cpu. 30 + * 31 + * Jan 5 2006 kaos@sgi.com 32 + * Handle hotplug cpus coming online. 33 + * Handle hotplug cpus going offline while they still have outstanding records. 34 + * Use the cpu_* macros consistently. 35 + * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty. 36 + * Modify the locking to make the test for "work to do" an atomic operation. 37 */ 38 39 #include <linux/capability.h> 40 + #include <linux/cpu.h> 41 #include <linux/types.h> 42 #include <linux/proc_fs.h> 43 #include <linux/module.h> ··· 132 }; 133 134 struct salinfo_data { 135 + cpumask_t cpu_event; /* which cpus have outstanding events */ 136 + struct semaphore mutex; 137 u8 *log_buffer; 138 u64 log_size; 139 u8 *oemdata; /* decoded oem data */ ··· 174 int ret; 175 }; 176 177 + /* Kick the mutex that tells user space that there is work to do. Instead of 178 + * trying to track the state of the mutex across multiple cpus, in user 179 + * context, interrupt context, non-maskable interrupt context and hotplug cpu, 180 + * it is far easier just to grab the mutex if it is free then release it. 181 + * 182 + * This routine must be called with data_saved_lock held, to make the down/up 183 + * operation atomic. 184 + */ 185 + static void 186 + salinfo_work_to_do(struct salinfo_data *data) 187 + { 188 + down_trylock(&data->mutex); 189 + up(&data->mutex); 190 + } 191 + 192 static void 193 salinfo_platform_oemdata_cpu(void *context) 194 { ··· 212 213 BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); 214 215 + if (irqsafe) 216 + spin_lock_irqsave(&data_saved_lock, flags); 217 if (buffer) { 218 for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { 219 if (!data_saved->buffer) 220 break; ··· 232 data_saved->size = size; 233 data_saved->buffer = buffer; 234 } 235 } 236 + cpu_set(smp_processor_id(), data->cpu_event); 237 + if (irqsafe) { 238 + salinfo_work_to_do(data); 239 + spin_unlock_irqrestore(&data_saved_lock, flags); 240 } 241 } 242 ··· 249 static void 250 salinfo_timeout_check(struct salinfo_data *data) 251 { 252 + unsigned long flags; 253 if (!data->open) 254 return; 255 + if (!cpus_empty(data->cpu_event)) { 256 + spin_lock_irqsave(&data_saved_lock, flags); 257 + salinfo_work_to_do(data); 258 + spin_unlock_irqrestore(&data_saved_lock, flags); 259 } 260 } 261 262 + static void 263 salinfo_timeout (unsigned long arg) 264 { 265 salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); ··· 290 int i, n, cpu = -1; 291 292 retry: 293 + if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) { 294 if (file->f_flags & O_NONBLOCK) 295 return -EAGAIN; 296 + if (down_interruptible(&data->mutex)) 297 return -EINTR; 298 } 299 300 n = data->cpu_check; 301 for (i = 0; i < NR_CPUS; i++) { 302 + if (cpu_isset(n, data->cpu_event)) { 303 + if (!cpu_online(n)) { 304 + cpu_clear(n, data->cpu_event); 305 + continue; 306 + } 307 cpu = n; 308 break; 309 } ··· 309 310 if (cpu == -1) 311 goto retry; 312 313 /* for next read, start checking at next CPU */ 314 data->cpu_check = cpu; ··· 381 static void 382 call_on_cpu(int cpu, void (*fn)(void *), void *arg) 383 { 384 + cpumask_t save_cpus_allowed = current->cpus_allowed; 385 + cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu); 386 set_cpus_allowed(current, new_cpus_allowed); 387 (*fn)(arg); 388 set_cpus_allowed(current, save_cpus_allowed); ··· 433 if (!data->saved_num) 434 call_on_cpu(cpu, salinfo_log_read_cpu, data); 435 if (!data->log_size) { 436 + data->state = STATE_NO_DATA; 437 + cpu_clear(cpu, data->cpu_event); 438 } else { 439 + data->state = STATE_LOG_RECORD; 440 } 441 } 442 ··· 473 salinfo_log_clear(struct salinfo_data *data, int cpu) 474 { 475 sal_log_record_header_t *rh; 476 + unsigned long flags; 477 + spin_lock_irqsave(&data_saved_lock, flags); 478 data->state = STATE_NO_DATA; 479 + if (!cpu_isset(cpu, data->cpu_event)) { 480 spin_unlock_irqrestore(&data_saved_lock, flags); 481 + return 0; 482 } 483 + cpu_clear(cpu, data->cpu_event); 484 + if (data->saved_num) { 485 + shift1_data_saved(data, data->saved_num - 1); 486 + data->saved_num = 0; 487 + } 488 + spin_unlock_irqrestore(&data_saved_lock, flags); 489 rh = (sal_log_record_header_t *)(data->log_buffer); 490 /* Corrected errors have already been cleared from SAL */ 491 if (rh->severity != sal_log_severity_corrected) 492 call_on_cpu(cpu, salinfo_log_clear_cpu, data); 493 /* clearing a record may make a new record visible */ 494 salinfo_log_new_read(cpu, data); 495 + if (data->state == STATE_LOG_RECORD) { 496 + spin_lock_irqsave(&data_saved_lock, flags); 497 + cpu_set(cpu, data->cpu_event); 498 + salinfo_work_to_do(data); 499 + spin_unlock_irqrestore(&data_saved_lock, flags); 500 + } 501 return 0; 502 } 503 ··· 550 .write = salinfo_log_write, 551 }; 552 553 + #ifdef CONFIG_HOTPLUG_CPU 554 + static int __devinit 555 + salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 556 + { 557 + unsigned int i, cpu = (unsigned long)hcpu; 558 + unsigned long flags; 559 + struct salinfo_data *data; 560 + switch (action) { 561 + case CPU_ONLINE: 562 + spin_lock_irqsave(&data_saved_lock, flags); 563 + for (i = 0, data = salinfo_data; 564 + i < ARRAY_SIZE(salinfo_data); 565 + ++i, ++data) { 566 + cpu_set(cpu, data->cpu_event); 567 + salinfo_work_to_do(data); 568 + } 569 + spin_unlock_irqrestore(&data_saved_lock, flags); 570 + break; 571 + case CPU_DEAD: 572 + spin_lock_irqsave(&data_saved_lock, flags); 573 + for (i = 0, data = salinfo_data; 574 + i < ARRAY_SIZE(salinfo_data); 575 + ++i, ++data) { 576 + struct salinfo_data_saved *data_saved; 577 + int j; 578 + for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j; 579 + j >= 0; 580 + --j, --data_saved) { 581 + if (data_saved->buffer && data_saved->cpu == cpu) { 582 + shift1_data_saved(data, j); 583 + } 584 + } 585 + cpu_clear(cpu, data->cpu_event); 586 + } 587 + spin_unlock_irqrestore(&data_saved_lock, flags); 588 + break; 589 + } 590 + return NOTIFY_OK; 591 + } 592 + 593 + static struct notifier_block salinfo_cpu_notifier = 594 + { 595 + .notifier_call = salinfo_cpu_callback, 596 + .priority = 0, 597 + }; 598 + #endif /* CONFIG_HOTPLUG_CPU */ 599 + 600 static int __init 601 salinfo_init(void) 602 { ··· 557 struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ 558 struct proc_dir_entry *dir, *entry; 559 struct salinfo_data *data; 560 + int i, j; 561 562 salinfo_dir = proc_mkdir("sal", NULL); 563 if (!salinfo_dir) ··· 572 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 573 data = salinfo_data + i; 574 data->type = i; 575 + init_MUTEX(&data->mutex); 576 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 577 if (!dir) 578 continue; ··· 592 *sdir++ = entry; 593 594 /* we missed any events before now */ 595 + for_each_online_cpu(j) 596 + cpu_set(j, data->cpu_event); 597 598 *sdir++ = dir; 599 } ··· 608 salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; 609 salinfo_timer.function = &salinfo_timeout; 610 add_timer(&salinfo_timer); 611 + 612 + #ifdef CONFIG_HOTPLUG_CPU 613 + register_cpu_notifier(&salinfo_cpu_notifier); 614 + #endif 615 616 return 0; 617 }
+19 -7
arch/ia64/kernel/traps.c
··· 530 if (fsys_mode(current, &regs)) { 531 extern char __kernel_syscall_via_break[]; 532 /* 533 - * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap 534 - * need special handling; Debug trap is not supposed to happen. 535 */ 536 if (unlikely(vector == 29)) { 537 - die("Got debug trap in fsys-mode---not supposed to happen!", 538 - &regs, 0); 539 return; 540 } 541 /* re-do the system call via break 0x100000: */ ··· 592 case 34: 593 if (isr & 0x2) { 594 /* Lower-Privilege Transfer Trap */ 595 /* 596 - * Just clear PSR.lp and then return immediately: all the 597 - * interesting work (e.g., signal delivery is done in the kernel 598 - * exit path). 599 */ 600 ia64_psr(&regs)->lp = 0; 601 return;
··· 530 if (fsys_mode(current, &regs)) { 531 extern char __kernel_syscall_via_break[]; 532 /* 533 + * Got a trap in fsys-mode: Taken Branch Trap 534 + * and Single Step trap need special handling; 535 + * Debug trap is ignored (we disable it here 536 + * and re-enable it in the lower-privilege trap). 537 */ 538 if (unlikely(vector == 29)) { 539 + set_thread_flag(TIF_DB_DISABLED); 540 + ia64_psr(&regs)->db = 0; 541 + ia64_psr(&regs)->lp = 1; 542 return; 543 } 544 /* re-do the system call via break 0x100000: */ ··· 589 case 34: 590 if (isr & 0x2) { 591 /* Lower-Privilege Transfer Trap */ 592 + 593 + /* If we disabled debug traps during an fsyscall, 594 + * re-enable them here. 595 + */ 596 + if (test_thread_flag(TIF_DB_DISABLED)) { 597 + clear_thread_flag(TIF_DB_DISABLED); 598 + ia64_psr(&regs)->db = 1; 599 + } 600 + 601 /* 602 + * Just clear PSR.lp and then return immediately: 603 + * all the interesting work (e.g., signal delivery) 604 + * is done in the kernel exit path. 605 */ 606 ia64_psr(&regs)->lp = 0; 607 return;
+1 -1
arch/ia64/mm/tlb.c
··· 90 { 91 static DEFINE_SPINLOCK(ptcg_lock); 92 93 - if (mm != current->active_mm) { 94 flush_tlb_all(); 95 return; 96 }
··· 90 { 91 static DEFINE_SPINLOCK(ptcg_lock); 92 93 + if (mm != current->active_mm || !current->mm) { 94 flush_tlb_all(); 95 return; 96 }
+12 -4
arch/ia64/sn/include/xtalk/hubdev.h
··· 26 #define IIO_NUM_ITTES 7 27 #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 28 29 - struct sn_flush_device_list { 30 int sfdl_bus; 31 int sfdl_slot; 32 int sfdl_pin; 33 - struct bar_list { 34 unsigned long start; 35 unsigned long end; 36 } sfdl_bar_list[6]; ··· 43 uint32_t sfdl_persistent_busnum; 44 uint32_t sfdl_persistent_segment; 45 struct pcibus_info *sfdl_pcibus_info; 46 spinlock_t sfdl_flush_lock; 47 }; 48 49 /* 50 - * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list. 51 */ 52 struct sn_flush_nasid_entry { 53 - struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */ 54 uint64_t iio_itte[8]; 55 }; 56
··· 26 #define IIO_NUM_ITTES 7 27 #define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) 28 29 + /* This struct is shared between the PROM and the kernel. 30 + * Changes to this struct will require corresponding changes to the kernel. 31 + */ 32 + struct sn_flush_device_common { 33 int sfdl_bus; 34 int sfdl_slot; 35 int sfdl_pin; 36 + struct common_bar_list { 37 unsigned long start; 38 unsigned long end; 39 } sfdl_bar_list[6]; ··· 40 uint32_t sfdl_persistent_busnum; 41 uint32_t sfdl_persistent_segment; 42 struct pcibus_info *sfdl_pcibus_info; 43 + }; 44 + 45 + /* This struct is kernel only and is not used by the PROM */ 46 + struct sn_flush_device_kernel { 47 spinlock_t sfdl_flush_lock; 48 + struct sn_flush_device_common *common; 49 }; 50 51 /* 52 + * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. 53 */ 54 struct sn_flush_nasid_entry { 55 + struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num 56 uint64_t iio_itte[8]; 57 }; 58
+48 -10
arch/ia64/sn/kernel/bte_error.c
··· 33 * Wait until all BTE related CRBs are completed 34 * and then reset the interfaces. 35 */ 36 - void shub1_bte_error_handler(unsigned long _nodepda) 37 { 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; ··· 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 55 smp_processor_id())); 56 - return; 57 } 58 59 /* Determine information about our hub */ ··· 81 mod_timer(recovery_timer, HZ * 5); 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 83 smp_processor_id())); 84 - return; 85 } 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 87 ··· 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 100 err_nodepda, smp_processor_id(), 101 i)); 102 - return; 103 } 104 } 105 } ··· 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 125 126 del_timer(recovery_timer); 127 } 128 129 /* ··· 171 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 172 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; 173 int i; 174 - nasid_t nasid; 175 unsigned long irq_flags; 176 volatile u64 *notify; 177 bte_result_t bh_error; ··· 195 } 196 197 if (is_shub1()) { 198 - shub1_bte_error_handler(_nodepda); 199 } else { 200 - nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 201 - 202 - if (ia64_sn_bte_recovery(nasid)) 203 - panic("bte_error_handler(): Fatal BTE Error"); 204 } 205 206 for (i = 0; i < BTES_PER_NODE; i++) {
··· 33 * Wait until all BTE related CRBs are completed 34 * and then reset the interfaces. 35 */ 36 + int shub1_bte_error_handler(unsigned long _nodepda) 37 { 38 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 39 struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; ··· 53 (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { 54 BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, 55 smp_processor_id())); 56 + return 1; 57 } 58 59 /* Determine information about our hub */ ··· 81 mod_timer(recovery_timer, HZ * 5); 82 BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 83 smp_processor_id())); 84 + return 1; 85 } 86 if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { 87 ··· 99 BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", 100 err_nodepda, smp_processor_id(), 101 i)); 102 + return 1; 103 } 104 } 105 } ··· 124 REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); 125 126 del_timer(recovery_timer); 127 + return 0; 128 + } 129 + 130 + /* 131 + * Wait until all BTE related CRBs are completed 132 + * and then reset the interfaces. 133 + */ 134 + int shub2_bte_error_handler(unsigned long _nodepda) 135 + { 136 + struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 137 + struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; 138 + struct bteinfo_s *bte; 139 + nasid_t nasid; 140 + u64 status; 141 + int i; 142 + 143 + nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); 144 + 145 + /* 146 + * Verify that all the BTEs are complete 147 + */ 148 + for (i = 0; i < BTES_PER_NODE; i++) { 149 + bte = &err_nodepda->bte_if[i]; 150 + status = BTE_LNSTAT_LOAD(bte); 151 + if ((status & IBLS_ERROR) || !(status & IBLS_BUSY)) 152 + continue; 153 + mod_timer(recovery_timer, HZ * 5); 154 + BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, 155 + smp_processor_id())); 156 + return 1; 157 + } 158 + if (ia64_sn_bte_recovery(nasid)) 159 + panic("bte_error_handler(): Fatal BTE Error"); 160 + 161 + del_timer(recovery_timer); 162 + return 0; 163 } 164 165 /* ··· 135 struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; 136 spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; 137 int i; 138 unsigned long irq_flags; 139 volatile u64 *notify; 140 bte_result_t bh_error; ··· 160 } 161 162 if (is_shub1()) { 163 + if (shub1_bte_error_handler(_nodepda)) { 164 + spin_unlock_irqrestore(recovery_lock, irq_flags); 165 + return; 166 + } 167 } else { 168 + if (shub2_bte_error_handler(_nodepda)) { 169 + spin_unlock_irqrestore(recovery_lock, irq_flags); 170 + return; 171 + } 172 } 173 174 for (i = 0; i < BTES_PER_NODE; i++) {
+6 -5
arch/ia64/sn/kernel/huberror.c
··· 32 ret_stuff.v0 = 0; 33 hubdev_info = (struct hubdev_info *)arg; 34 nasid = hubdev_info->hdi_nasid; 35 - SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, 36 - (u64) nasid, 0, 0, 0, 0, 0, 0); 37 - 38 - if ((int)ret_stuff.v0) 39 - panic("hubii_eint_handler(): Fatal TIO Error"); 40 41 if (is_shub1()) { 42 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 43 (void)hubiio_crb_error_handler(hubdev_info); 44 } else
··· 32 ret_stuff.v0 = 0; 33 hubdev_info = (struct hubdev_info *)arg; 34 nasid = hubdev_info->hdi_nasid; 35 36 if (is_shub1()) { 37 + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, 38 + (u64) nasid, 0, 0, 0, 0, 0, 0); 39 + 40 + if ((int)ret_stuff.v0) 41 + panic("hubii_eint_handler(): Fatal TIO Error"); 42 + 43 if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ 44 (void)hubiio_crb_error_handler(hubdev_info); 45 } else
+52 -40
arch/ia64/sn/kernel/io_init.c
··· 76 }; 77 78 /* 79 - * Retrieve the DMA Flush List given nasid. This list is needed 80 - * to implement the WAR - Flush DMA data on PIO Reads. 81 */ 82 - static inline uint64_t 83 - sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address) 84 { 85 86 struct ia64_sal_retval ret_stuff; ··· 89 ret_stuff.v0 = 0; 90 91 SAL_CALL_NOLOCK(ret_stuff, 92 - (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, 93 - (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0, 94 - 0); 95 - return ret_stuff.v0; 96 97 } 98 99 /* 100 * Retrieve the hub device info structure for the given nasid. 101 */ 102 - static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address) 103 { 104 105 struct ia64_sal_retval ret_stuff; ··· 115 /* 116 * Retrieve the pci bus information given the bus number. 117 */ 118 - static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) 119 { 120 121 struct ia64_sal_retval ret_stuff; ··· 131 /* 132 * Retrieve the pci device information given the bus and device|function number. 133 */ 134 - static inline uint64_t 135 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, 136 u64 sn_irq_info) 137 { ··· 171 */ 172 static void sn_fixup_ionodes(void) 173 { 174 - 175 - struct sn_flush_device_list *sn_flush_device_list; 176 struct hubdev_info *hubdev; 177 - uint64_t status; 178 - uint64_t nasid; 179 - int i, widget; 180 181 /* 182 * Get SGI Specific HUB chipset information. ··· 187 nasid = cnodeid_to_nasid(i); 188 hubdev->max_segment_number = 0xffffffff; 189 hubdev->max_pcibus_number = 0xff; 190 - status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev)); 191 if (status) 192 continue; 193 ··· 214 215 hubdev->hdi_flush_nasid_list.widget_p = 216 kmalloc((HUB_WIDGET_ID_MAX + 1) * 217 - sizeof(struct sn_flush_device_list *), GFP_KERNEL); 218 - 219 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0, 220 (HUB_WIDGET_ID_MAX + 1) * 221 - sizeof(struct sn_flush_device_list *)); 222 223 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 224 - sn_flush_device_list = kmalloc(DEV_PER_WIDGET * 225 - sizeof(struct 226 - sn_flush_device_list), 227 - GFP_KERNEL); 228 - memset(sn_flush_device_list, 0x0, 229 DEV_PER_WIDGET * 230 - sizeof(struct sn_flush_device_list)); 231 232 - status = 233 - sal_get_widget_dmaflush_list(nasid, widget, 234 - (uint64_t) 235 - __pa 236 - (sn_flush_device_list)); 237 - if (status) { 238 - kfree(sn_flush_device_list); 239 - continue; 240 } 241 242 - spin_lock_init(&sn_flush_device_list->sfdl_flush_lock); 243 - hubdev->hdi_flush_nasid_list.widget_p[widget] = 244 - sn_flush_device_list; 245 - } 246 - 247 } 248 - 249 } 250 251 /*
··· 76 }; 77 78 /* 79 + * Retrieve the DMA Flush List given nasid, widget, and device. 80 + * This list is needed to implement the WAR - Flush DMA data on PIO Reads. 81 */ 82 + static inline u64 83 + sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, 84 + u64 address) 85 { 86 87 struct ia64_sal_retval ret_stuff; ··· 88 ret_stuff.v0 = 0; 89 90 SAL_CALL_NOLOCK(ret_stuff, 91 + (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST, 92 + (u64) nasid, (u64) widget_num, 93 + (u64) device_num, (u64) address, 0, 0, 0); 94 + return ret_stuff.status; 95 96 } 97 98 /* 99 * Retrieve the hub device info structure for the given nasid. 100 */ 101 + static inline u64 sal_get_hubdev_info(u64 handle, u64 address) 102 { 103 104 struct ia64_sal_retval ret_stuff; ··· 114 /* 115 * Retrieve the pci bus information given the bus number. 116 */ 117 + static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) 118 { 119 120 struct ia64_sal_retval ret_stuff; ··· 130 /* 131 * Retrieve the pci device information given the bus and device|function number. 132 */ 133 + static inline u64 134 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, 135 u64 sn_irq_info) 136 { ··· 170 */ 171 static void sn_fixup_ionodes(void) 172 { 173 + struct sn_flush_device_kernel *sn_flush_device_kernel; 174 + struct sn_flush_device_kernel *dev_entry; 175 struct hubdev_info *hubdev; 176 + u64 status; 177 + u64 nasid; 178 + int i, widget, device; 179 180 /* 181 * Get SGI Specific HUB chipset information. ··· 186 nasid = cnodeid_to_nasid(i); 187 hubdev->max_segment_number = 0xffffffff; 188 hubdev->max_pcibus_number = 0xff; 189 + status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev)); 190 if (status) 191 continue; 192 ··· 213 214 hubdev->hdi_flush_nasid_list.widget_p = 215 kmalloc((HUB_WIDGET_ID_MAX + 1) * 216 + sizeof(struct sn_flush_device_kernel *), 217 + GFP_KERNEL); 218 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0, 219 (HUB_WIDGET_ID_MAX + 1) * 220 + sizeof(struct sn_flush_device_kernel *)); 221 222 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 223 + sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET * 224 + sizeof(struct 225 + sn_flush_device_kernel), 226 + GFP_KERNEL); 227 + if (!sn_flush_device_kernel) 228 + BUG(); 229 + memset(sn_flush_device_kernel, 0x0, 230 DEV_PER_WIDGET * 231 + sizeof(struct sn_flush_device_kernel)); 232 233 + dev_entry = sn_flush_device_kernel; 234 + for (device = 0; device < DEV_PER_WIDGET; 235 + device++,dev_entry++) { 236 + dev_entry->common = kmalloc(sizeof(struct 237 + sn_flush_device_common), 238 + GFP_KERNEL); 239 + if (!dev_entry->common) 240 + BUG(); 241 + memset(dev_entry->common, 0x0, sizeof(struct 242 + sn_flush_device_common)); 243 + 244 + status = sal_get_device_dmaflush_list(nasid, 245 + widget, 246 + device, 247 + (u64)(dev_entry->common)); 248 + if (status) 249 + BUG(); 250 + 251 + spin_lock_init(&dev_entry->sfdl_flush_lock); 252 } 253 254 + if (sn_flush_device_kernel) 255 + hubdev->hdi_flush_nasid_list.widget_p[widget] = 256 + sn_flush_device_kernel; 257 + } 258 } 259 } 260 261 /*
+6 -5
arch/ia64/sn/kernel/xpc.h include/asm-ia64/sn/xpc.h
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 11 * Cross Partition Communication (XPC) structures and macros. 12 */ 13 14 - #ifndef _IA64_SN_KERNEL_XPC_H 15 - #define _IA64_SN_KERNEL_XPC_H 16 17 18 #include <linux/config.h> ··· 663 extern struct device *xpc_part; 664 extern struct device *xpc_chan; 665 extern int xpc_disengage_request_timelimit; 666 extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); 667 extern void xpc_dropped_IPI_check(struct xpc_partition *); 668 extern void xpc_activate_partition(struct xpc_partition *); ··· 708 extern void xpc_deliver_msg(struct xpc_channel *); 709 extern void xpc_disconnect_channel(const int, struct xpc_channel *, 710 enum xpc_retval, unsigned long *); 711 - extern void xpc_disconnecting_callout(struct xpc_channel *); 712 extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 713 extern void xpc_teardown_infrastructure(struct xpc_partition *); 714 ··· 1270 } 1271 1272 1273 - #endif /* _IA64_SN_KERNEL_XPC_H */ 1274
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 + * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 11 * Cross Partition Communication (XPC) structures and macros. 12 */ 13 14 + #ifndef _ASM_IA64_SN_XPC_H 15 + #define _ASM_IA64_SN_XPC_H 16 17 18 #include <linux/config.h> ··· 663 extern struct device *xpc_part; 664 extern struct device *xpc_chan; 665 extern int xpc_disengage_request_timelimit; 666 + extern int xpc_disengage_request_timedout; 667 extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); 668 extern void xpc_dropped_IPI_check(struct xpc_partition *); 669 extern void xpc_activate_partition(struct xpc_partition *); ··· 707 extern void xpc_deliver_msg(struct xpc_channel *); 708 extern void xpc_disconnect_channel(const int, struct xpc_channel *, 709 enum xpc_retval, unsigned long *); 710 + extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); 711 extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); 712 extern void xpc_teardown_infrastructure(struct xpc_partition *); 713 ··· 1269 } 1270 1271 1272 + #endif /* _ASM_IA64_SN_XPC_H */ 1273
+14 -10
arch/ia64/sn/kernel/xpc_channel.c
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 24 #include <linux/slab.h> 25 #include <asm/sn/bte.h> 26 #include <asm/sn/sn_sal.h> 27 - #include "xpc.h" 28 29 30 /* ··· 778 } 779 780 /* both sides are disconnected now */ 781 782 /* it's now safe to free the channel's message queues */ 783 xpc_free_msgqueues(ch); ··· 1651 1652 1653 void 1654 - xpc_disconnecting_callout(struct xpc_channel *ch) 1655 { 1656 /* 1657 * Let the channel's registerer know that the channel is being ··· 1660 */ 1661 1662 if (ch->func != NULL) { 1663 - dev_dbg(xpc_chan, "ch->func() called, reason=xpcDisconnecting," 1664 - " partid=%d, channel=%d\n", ch->partid, ch->number); 1665 1666 - ch->func(xpcDisconnecting, ch->partid, ch->number, NULL, 1667 - ch->key); 1668 1669 - dev_dbg(xpc_chan, "ch->func() returned, reason=" 1670 - "xpcDisconnecting, partid=%d, channel=%d\n", 1671 - ch->partid, ch->number); 1672 } 1673 } 1674
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 + * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 24 #include <linux/slab.h> 25 #include <asm/sn/bte.h> 26 #include <asm/sn/sn_sal.h> 27 + #include <asm/sn/xpc.h> 28 29 30 /* ··· 778 } 779 780 /* both sides are disconnected now */ 781 + 782 + if (ch->flags & XPC_C_CONNECTCALLOUT) { 783 + spin_unlock_irqrestore(&ch->lock, *irq_flags); 784 + xpc_disconnect_callout(ch, xpcDisconnected); 785 + spin_lock_irqsave(&ch->lock, *irq_flags); 786 + } 787 788 /* it's now safe to free the channel's message queues */ 789 xpc_free_msgqueues(ch); ··· 1645 1646 1647 void 1648 + xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) 1649 { 1650 /* 1651 * Let the channel's registerer know that the channel is being ··· 1654 */ 1655 1656 if (ch->func != NULL) { 1657 + dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " 1658 + "channel=%d\n", reason, ch->partid, ch->number); 1659 1660 + ch->func(reason, ch->partid, ch->number, NULL, ch->key); 1661 1662 + dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " 1663 + "channel=%d\n", reason, ch->partid, ch->number); 1664 } 1665 } 1666
+148 -79
arch/ia64/sn/kernel/xpc_main.c
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 59 #include <asm/sn/sn_sal.h> 60 #include <asm/kdebug.h> 61 #include <asm/uaccess.h> 62 - #include "xpc.h" 63 64 65 /* define two XPC debug device structures to be used with dev_dbg() et al */ ··· 80 81 struct device *xpc_part = &xpc_part_dbg_subname; 82 struct device *xpc_chan = &xpc_chan_dbg_subname; 83 84 85 /* systune related variables for /proc/sys directories */ ··· 165 }; 166 static struct ctl_table_header *xpc_sysctl; 167 168 169 /* #of IRQs received */ 170 static atomic_t xpc_act_IRQ_rcvd; ··· 778 ch->flags |= XPC_C_DISCONNECTCALLOUT; 779 spin_unlock_irqrestore(&ch->lock, irq_flags); 780 781 - xpc_disconnecting_callout(ch); 782 } else { 783 spin_unlock_irqrestore(&ch->lock, irq_flags); 784 } ··· 926 xpc_do_exit(enum xpc_retval reason) 927 { 928 partid_t partid; 929 - int active_part_count; 930 struct xpc_partition *part; 931 - unsigned long printmsg_time; 932 933 934 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ ··· 958 959 /* wait for all partitions to become inactive */ 960 961 - printmsg_time = jiffies; 962 963 do { 964 active_part_count = 0; ··· 975 active_part_count++; 976 977 XPC_DEACTIVATE_PARTITION(part, reason); 978 } 979 980 - if (active_part_count == 0) { 981 - break; 982 - } 983 - 984 - if (jiffies >= printmsg_time) { 985 - dev_info(xpc_part, "waiting for partitions to " 986 - "deactivate/disengage, active count=%d, remote " 987 - "engaged=0x%lx\n", active_part_count, 988 - xpc_partition_engaged(1UL << partid)); 989 - 990 - printmsg_time = jiffies + 991 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 992 } 993 994 /* sleep for a 1/3 of a second or so */ ··· 1025 del_timer_sync(&xpc_hb_timer); 1026 DBUG_ON(xpc_vars->heartbeating_to_mask != 0); 1027 1028 - /* take ourselves off of the reboot_notifier_list */ 1029 - (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1030 1031 - /* take ourselves off of the die_notifier list */ 1032 - (void) unregister_die_notifier(&xpc_die_notifier); 1033 1034 /* close down protections for IPI operations */ 1035 xpc_restrict_IPI_ops(); ··· 1043 if (xpc_sysctl) { 1044 unregister_sysctl_table(xpc_sysctl); 1045 } 1046 - } 1047 - 1048 - 1049 - /* 1050 - * Called when the system is about to be either restarted or halted. 1051 - */ 1052 - static void 1053 - xpc_die_disengage(void) 1054 - { 1055 - struct xpc_partition *part; 1056 - partid_t partid; 1057 - unsigned long engaged; 1058 - long time, print_time, disengage_request_timeout; 1059 - 1060 - 1061 - /* keep xpc_hb_checker thread from doing anything (just in case) */ 1062 - xpc_exiting = 1; 1063 - 1064 - xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1065 - 1066 - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1067 - part = &xpc_partitions[partid]; 1068 - 1069 - if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1070 - remote_vars_version)) { 1071 - 1072 - /* just in case it was left set by an earlier XPC */ 1073 - xpc_clear_partition_engaged(1UL << partid); 1074 - continue; 1075 - } 1076 - 1077 - if (xpc_partition_engaged(1UL << partid) || 1078 - part->act_state != XPC_P_INACTIVE) { 1079 - xpc_request_partition_disengage(part); 1080 - xpc_mark_partition_disengaged(part); 1081 - xpc_IPI_send_disengage(part); 1082 - } 1083 - } 1084 - 1085 - print_time = rtc_time(); 1086 - disengage_request_timeout = print_time + 1087 - (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1088 - 1089 - /* wait for all other partitions to disengage from us */ 1090 - 1091 - while ((engaged = xpc_partition_engaged(-1UL)) && 1092 - (time = rtc_time()) < disengage_request_timeout) { 1093 - 1094 - if (time >= print_time) { 1095 - dev_info(xpc_part, "waiting for remote partitions to " 1096 - "disengage, engaged=0x%lx\n", engaged); 1097 - print_time = time + (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1098 - sn_rtc_cycles_per_second); 1099 - } 1100 - } 1101 - dev_info(xpc_part, "finished waiting for remote partitions to " 1102 - "disengage, engaged=0x%lx\n", engaged); 1103 } 1104 1105 ··· 1075 1076 1077 /* 1078 - * This function is called when the system is being rebooted. 1079 */ 1080 static int 1081 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ··· 1166 case DIE_MACHINE_HALT: 1167 xpc_die_disengage(); 1168 break; 1169 case DIE_MCA_MONARCH_ENTER: 1170 case DIE_INIT_MONARCH_ENTER: 1171 xpc_vars->heartbeat++; 1172 xpc_vars->heartbeat_offline = 1; 1173 break; 1174 case DIE_MCA_MONARCH_LEAVE: 1175 case DIE_INIT_MONARCH_LEAVE: 1176 xpc_vars->heartbeat++; ··· 1408 module_param(xpc_disengage_request_timelimit, int, 0); 1409 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1410 "for disengage request to complete."); 1411
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 + * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 59 #include <asm/sn/sn_sal.h> 60 #include <asm/kdebug.h> 61 #include <asm/uaccess.h> 62 + #include <asm/sn/xpc.h> 63 64 65 /* define two XPC debug device structures to be used with dev_dbg() et al */ ··· 80 81 struct device *xpc_part = &xpc_part_dbg_subname; 82 struct device *xpc_chan = &xpc_chan_dbg_subname; 83 + 84 + 85 + static int xpc_kdebug_ignore; 86 87 88 /* systune related variables for /proc/sys directories */ ··· 162 }; 163 static struct ctl_table_header *xpc_sysctl; 164 165 + /* non-zero if any remote partition disengage request was timed out */ 166 + int xpc_disengage_request_timedout; 167 168 /* #of IRQs received */ 169 static atomic_t xpc_act_IRQ_rcvd; ··· 773 ch->flags |= XPC_C_DISCONNECTCALLOUT; 774 spin_unlock_irqrestore(&ch->lock, irq_flags); 775 776 + xpc_disconnect_callout(ch, xpcDisconnecting); 777 } else { 778 spin_unlock_irqrestore(&ch->lock, irq_flags); 779 } ··· 921 xpc_do_exit(enum xpc_retval reason) 922 { 923 partid_t partid; 924 + int active_part_count, printed_waiting_msg = 0; 925 struct xpc_partition *part; 926 + unsigned long printmsg_time, disengage_request_timeout = 0; 927 928 929 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ ··· 953 954 /* wait for all partitions to become inactive */ 955 956 + printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 957 + xpc_disengage_request_timedout = 0; 958 959 do { 960 active_part_count = 0; ··· 969 active_part_count++; 970 971 XPC_DEACTIVATE_PARTITION(part, reason); 972 + 973 + if (part->disengage_request_timeout > 974 + disengage_request_timeout) { 975 + disengage_request_timeout = 976 + part->disengage_request_timeout; 977 + } 978 } 979 980 + if (xpc_partition_engaged(-1UL)) { 981 + if (time_after(jiffies, printmsg_time)) { 982 + dev_info(xpc_part, "waiting for remote " 983 + "partitions to disengage, timeout in " 984 + "%ld seconds\n", 985 + (disengage_request_timeout - jiffies) 986 + / HZ); 987 + printmsg_time = jiffies + 988 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 989 + printed_waiting_msg = 1; 990 + } 991 + 992 + } else if (active_part_count > 0) { 993 + if (printed_waiting_msg) { 994 + dev_info(xpc_part, "waiting for local partition" 995 + " to disengage\n"); 996 + printed_waiting_msg = 0; 997 + } 998 + 999 + } else { 1000 + if (!xpc_disengage_request_timedout) { 1001 + dev_info(xpc_part, "all partitions have " 1002 + "disengaged\n"); 1003 + } 1004 + break; 1005 } 1006 1007 /* sleep for a 1/3 of a second or so */ ··· 1000 del_timer_sync(&xpc_hb_timer); 1001 DBUG_ON(xpc_vars->heartbeating_to_mask != 0); 1002 1003 + if (reason == xpcUnloading) { 1004 + /* take ourselves off of the reboot_notifier_list */ 1005 + (void) unregister_reboot_notifier(&xpc_reboot_notifier); 1006 1007 + /* take ourselves off of the die_notifier list */ 1008 + (void) unregister_die_notifier(&xpc_die_notifier); 1009 + } 1010 1011 /* close down protections for IPI operations */ 1012 xpc_restrict_IPI_ops(); ··· 1016 if (xpc_sysctl) { 1017 unregister_sysctl_table(xpc_sysctl); 1018 } 1019 } 1020 1021 ··· 1105 1106 1107 /* 1108 + * Notify other partitions to disengage from all references to our memory. 1109 + */ 1110 + static void 1111 + xpc_die_disengage(void) 1112 + { 1113 + struct xpc_partition *part; 1114 + partid_t partid; 1115 + unsigned long engaged; 1116 + long time, printmsg_time, disengage_request_timeout; 1117 + 1118 + 1119 + /* keep xpc_hb_checker thread from doing anything (just in case) */ 1120 + xpc_exiting = 1; 1121 + 1122 + xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ 1123 + 1124 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1125 + part = &xpc_partitions[partid]; 1126 + 1127 + if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 1128 + remote_vars_version)) { 1129 + 1130 + /* just in case it was left set by an earlier XPC */ 1131 + xpc_clear_partition_engaged(1UL << partid); 1132 + continue; 1133 + } 1134 + 1135 + if (xpc_partition_engaged(1UL << partid) || 1136 + part->act_state != XPC_P_INACTIVE) { 1137 + xpc_request_partition_disengage(part); 1138 + xpc_mark_partition_disengaged(part); 1139 + xpc_IPI_send_disengage(part); 1140 + } 1141 + } 1142 + 1143 + time = rtc_time(); 1144 + printmsg_time = time + 1145 + (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 1146 + disengage_request_timeout = time + 1147 + (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 1148 + 1149 + /* wait for all other partitions to disengage from us */ 1150 + 1151 + while (1) { 1152 + engaged = xpc_partition_engaged(-1UL); 1153 + if (!engaged) { 1154 + dev_info(xpc_part, "all partitions have disengaged\n"); 1155 + break; 1156 + } 1157 + 1158 + time = rtc_time(); 1159 + if (time >= disengage_request_timeout) { 1160 + for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 1161 + if (engaged & (1UL << partid)) { 1162 + dev_info(xpc_part, "disengage from " 1163 + "remote partition %d timed " 1164 + "out\n", partid); 1165 + } 1166 + } 1167 + break; 1168 + } 1169 + 1170 + if (time >= printmsg_time) { 1171 + dev_info(xpc_part, "waiting for remote partitions to " 1172 + "disengage, timeout in %ld seconds\n", 1173 + (disengage_request_timeout - time) / 1174 + sn_rtc_cycles_per_second); 1175 + printmsg_time = time + 1176 + (XPC_DISENGAGE_PRINTMSG_INTERVAL * 1177 + sn_rtc_cycles_per_second); 1178 + } 1179 + } 1180 + } 1181 + 1182 + 1183 + /* 1184 + * This function is called when the system is being restarted or halted due 1185 + * to some sort of system failure. If this is the case we need to notify the 1186 + * other partitions to disengage from all references to our memory. 1187 + * This function can also be called when our heartbeater could be offlined 1188 + * for a time. In this case we need to notify other partitions to not worry 1189 + * about the lack of a heartbeat. 1190 */ 1191 static int 1192 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) ··· 1115 case DIE_MACHINE_HALT: 1116 xpc_die_disengage(); 1117 break; 1118 + 1119 + case DIE_KDEBUG_ENTER: 1120 + /* Should lack of heartbeat be ignored by other partitions? */ 1121 + if (!xpc_kdebug_ignore) { 1122 + break; 1123 + } 1124 + /* fall through */ 1125 case DIE_MCA_MONARCH_ENTER: 1126 case DIE_INIT_MONARCH_ENTER: 1127 xpc_vars->heartbeat++; 1128 xpc_vars->heartbeat_offline = 1; 1129 break; 1130 + 1131 + case DIE_KDEBUG_LEAVE: 1132 + /* Is lack of heartbeat being ignored by other partitions? */ 1133 + if (!xpc_kdebug_ignore) { 1134 + break; 1135 + } 1136 + /* fall through */ 1137 case DIE_MCA_MONARCH_LEAVE: 1138 case DIE_INIT_MONARCH_LEAVE: 1139 xpc_vars->heartbeat++; ··· 1343 module_param(xpc_disengage_request_timelimit, int, 0); 1344 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1345 "for disengage request to complete."); 1346 + 1347 + module_param(xpc_kdebug_ignore, int, 0); 1348 + MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1349 + "other partitions when dropping into kdebug."); 1350
+7 -3
arch/ia64/sn/kernel/xpc_partition.c
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 - * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 28 #include <asm/sn/sn_sal.h> 29 #include <asm/sn/nodepda.h> 30 #include <asm/sn/addrs.h> 31 - #include "xpc.h" 32 33 34 /* XPC is exiting flag */ ··· 771 } 772 } 773 774 - if (!xpc_partition_disengaged(part)) { 775 /* still waiting on other side to disengage from us */ 776 return; 777 } ··· 874 * request in a timely fashion, so assume it's dead. 875 */ 876 877 xpc_clear_partition_engaged(1UL << partid); 878 disengaged = 1; 879 }
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 + * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. 7 */ 8 9 ··· 28 #include <asm/sn/sn_sal.h> 29 #include <asm/sn/nodepda.h> 30 #include <asm/sn/addrs.h> 31 + #include <asm/sn/xpc.h> 32 33 34 /* XPC is exiting flag */ ··· 771 } 772 } 773 774 + if (part->disengage_request_timeout > 0 && 775 + !xpc_partition_disengaged(part)) { 776 /* still waiting on other side to disengage from us */ 777 return; 778 } ··· 873 * request in a timely fashion, so assume it's dead. 874 */ 875 876 + dev_info(xpc_part, "disengage from remote partition %d " 877 + "timed out\n", partid); 878 + xpc_disengage_request_timedout = 1; 879 xpc_clear_partition_engaged(1UL << partid); 880 disengaged = 1; 881 }
+18 -16
arch/ia64/sn/pci/pcibr/pcibr_dma.c
··· 218 uint64_t flags; 219 uint64_t itte; 220 struct hubdev_info *hubinfo; 221 - volatile struct sn_flush_device_list *p; 222 struct sn_flush_nasid_entry *flush_nasid_list; 223 224 if (!sn_ioif_inited) ··· 270 p = &flush_nasid_list->widget_p[wid_num][0]; 271 272 /* find a matching BAR */ 273 - for (i = 0; i < DEV_PER_WIDGET; i++) { 274 for (j = 0; j < PCI_ROM_RESOURCE; j++) { 275 - if (p->sfdl_bar_list[j].start == 0) 276 break; 277 - if (addr >= p->sfdl_bar_list[j].start 278 - && addr <= p->sfdl_bar_list[j].end) 279 break; 280 } 281 - if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0) 282 break; 283 - p++; 284 } 285 286 /* if no matching BAR, return without doing anything. */ ··· 306 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { 307 return; 308 } else { 309 - pcireg_wrb_flush_get(p->sfdl_pcibus_info, 310 - (p->sfdl_slot - 1)); 311 } 312 } else { 313 - spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> 314 - sfdl_flush_lock, flags); 315 - 316 - *p->sfdl_flush_addr = 0; 317 318 /* force an interrupt. */ 319 - *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; 320 321 /* wait for the interrupt to come back. */ 322 - while (*(p->sfdl_flush_addr) != 0x10f) 323 cpu_relax(); 324 325 /* okay, everything is synched up. */ 326 - spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags); 327 } 328 return; 329 }
··· 218 uint64_t flags; 219 uint64_t itte; 220 struct hubdev_info *hubinfo; 221 + volatile struct sn_flush_device_kernel *p; 222 + volatile struct sn_flush_device_common *common; 223 + 224 struct sn_flush_nasid_entry *flush_nasid_list; 225 226 if (!sn_ioif_inited) ··· 268 p = &flush_nasid_list->widget_p[wid_num][0]; 269 270 /* find a matching BAR */ 271 + for (i = 0; i < DEV_PER_WIDGET; i++,p++) { 272 + common = p->common; 273 for (j = 0; j < PCI_ROM_RESOURCE; j++) { 274 + if (common->sfdl_bar_list[j].start == 0) 275 break; 276 + if (addr >= common->sfdl_bar_list[j].start 277 + && addr <= common->sfdl_bar_list[j].end) 278 break; 279 } 280 + if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0) 281 break; 282 } 283 284 /* if no matching BAR, return without doing anything. */ ··· 304 if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) { 305 return; 306 } else { 307 + pcireg_wrb_flush_get(common->sfdl_pcibus_info, 308 + (common->sfdl_slot - 1)); 309 } 310 } else { 311 + spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock, 312 + flags); 313 + *common->sfdl_flush_addr = 0; 314 315 /* force an interrupt. */ 316 + *(volatile uint32_t *)(common->sfdl_force_int_addr) = 1; 317 318 /* wait for the interrupt to come back. */ 319 + while (*(common->sfdl_flush_addr) != 0x10f) 320 cpu_relax(); 321 322 /* okay, everything is synched up. */ 323 + spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, 324 + flags); 325 } 326 return; 327 }
+10 -10
arch/ia64/sn/pci/pcibr/pcibr_provider.c
··· 92 cnodeid_t near_cnode; 93 struct hubdev_info *hubdev_info; 94 struct pcibus_info *soft; 95 - struct sn_flush_device_list *sn_flush_device_list; 96 97 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { 98 return NULL; ··· 138 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); 139 140 if (hubdev_info->hdi_flush_nasid_list.widget_p) { 141 - sn_flush_device_list = hubdev_info->hdi_flush_nasid_list. 142 widget_p[(int)soft->pbi_buscommon.bs_xid]; 143 - if (sn_flush_device_list) { 144 for (j = 0; j < DEV_PER_WIDGET; 145 - j++, sn_flush_device_list++) { 146 - if (sn_flush_device_list->sfdl_slot == -1) 147 continue; 148 - if ((sn_flush_device_list-> 149 - sfdl_persistent_segment == 150 soft->pbi_buscommon.bs_persist_segment) && 151 - (sn_flush_device_list-> 152 - sfdl_persistent_busnum == 153 soft->pbi_buscommon.bs_persist_busnum)) 154 - sn_flush_device_list->sfdl_pcibus_info = 155 soft; 156 } 157 }
··· 92 cnodeid_t near_cnode; 93 struct hubdev_info *hubdev_info; 94 struct pcibus_info *soft; 95 + struct sn_flush_device_kernel *sn_flush_device_kernel; 96 + struct sn_flush_device_common *common; 97 98 if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) { 99 return NULL; ··· 137 hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); 138 139 if (hubdev_info->hdi_flush_nasid_list.widget_p) { 140 + sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list. 141 widget_p[(int)soft->pbi_buscommon.bs_xid]; 142 + if (sn_flush_device_kernel) { 143 for (j = 0; j < DEV_PER_WIDGET; 144 + j++, sn_flush_device_kernel++) { 145 + common = sn_flush_device_kernel->common; 146 + if (common->sfdl_slot == -1) 147 continue; 148 + if ((common->sfdl_persistent_segment == 149 soft->pbi_buscommon.bs_persist_segment) && 150 + (common->sfdl_persistent_busnum == 151 soft->pbi_buscommon.bs_persist_busnum)) 152 + common->sfdl_pcibus_info = 153 soft; 154 } 155 }
+6
include/asm-ia64/kprobes.h
··· 68 unsigned long status; 69 }; 70 71 /* per-cpu kprobe control block */ 72 struct kprobe_ctlblk { 73 unsigned long kprobe_status; 74 struct pt_regs jprobe_saved_regs; 75 struct prev_kprobe prev_kprobe; 76 }; 77 ··· 122 static inline void jprobe_return(void) 123 { 124 } 125 126 #endif /* _ASM_KPROBES_H */
··· 68 unsigned long status; 69 }; 70 71 + #define MAX_PARAM_RSE_SIZE (0x60+0x60/0x3f) 72 /* per-cpu kprobe control block */ 73 struct kprobe_ctlblk { 74 unsigned long kprobe_status; 75 struct pt_regs jprobe_saved_regs; 76 + unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE]; 77 + unsigned long *bsp; 78 + unsigned long cfm; 79 struct prev_kprobe prev_kprobe; 80 }; 81 ··· 118 static inline void jprobe_return(void) 119 { 120 } 121 + extern void invalidate_stacked_regs(void); 122 + extern void flush_register_stack(void); 123 124 #endif /* _ASM_KPROBES_H */
+3 -2
include/asm-ia64/sn/sn_sal.h
··· 75 #define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 76 #define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056 77 #define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057 78 - #define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 79 80 #define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 81 #define SN_SAL_BTE_RECOVER 0x02000061 ··· 1101 struct ia64_sal_retval rv; 1102 1103 rv.status = 0; 1104 - SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, 0, 0, 0, 0, 0, 0, 0); 1105 if (rv.status == SALRET_NOT_IMPLEMENTED) 1106 return 0; 1107 return (int) rv.status;
··· 75 #define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 76 #define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056 77 #define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057 78 + #define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 // deprecated 79 + #define SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST 0x0200005a 80 81 #define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 82 #define SN_SAL_BTE_RECOVER 0x02000061 ··· 1100 struct ia64_sal_retval rv; 1101 1102 rv.status = 0; 1103 + SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0); 1104 if (rv.status == SALRET_NOT_IMPLEMENTED) 1105 return 0; 1106 return (int) rv.status;
+3 -1
include/asm-ia64/sn/xp.h
··· 227 228 xpcOpenCloseError, /* 50: channel open/close protocol error */ 229 230 - xpcUnknownReason /* 51: unknown reason -- must be last in list */ 231 }; 232 233
··· 227 228 xpcOpenCloseError, /* 50: channel open/close protocol error */ 229 230 + xpcDisconnected, /* 51: channel disconnected (closed) */ 231 + 232 + xpcUnknownReason /* 52: unknown reason -- must be last in list */ 233 }; 234 235
+3 -1
include/asm-ia64/thread_info.h
··· 93 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 94 #define TIF_MEMDIE 17 95 #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 96 97 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 98 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ··· 101 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 102 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 103 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 104 - #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) 105 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 106 #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 107 108 /* "work to do on user-return" bits */ 109 #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
··· 93 #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 94 #define TIF_MEMDIE 17 95 #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 96 + #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 97 98 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 99 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ··· 100 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 101 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 102 #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 103 + #define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) 104 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 105 #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 106 + #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 107 108 /* "work to do on user-return" bits */ 109 #define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)