Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] kprobe clears qp bits for special instructions
[IA64] enable trap code on slot 1
[IA64] Take defensive stance on ia64_pal_get_brand_info()
[IA64] fix possible XPC deadlock when disconnecting
[IA64] - Reduce overhead of FP exception logging messages
[IA64] fix arch/ia64/mm/contig.c:235: warning: unused variable `nid'
[IA64] s/termios/ktermios/ in simserial.c
[IA64] kexec/kdump: tidy up declaration of relocate_new_kernel_t
[IA64] Kexec/Kdump: honour non-zero crashkernel offset.
[IA64] CONFIG_KEXEC/CONFIG_CRASH_DUMP permutations
[IA64] Do not call SN_SAL_SET_CPU_NUMBER twice on cpu 0

+362 -152
+1 -1
arch/ia64/hp/sim/simserial.c
··· 488 488 489 489 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 490 490 491 - static void rs_set_termios(struct tty_struct *tty, struct termios *old_termios) 491 + static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 492 492 { 493 493 unsigned int cflag = tty->termios->c_cflag; 494 494
+1
arch/ia64/kernel/Makefile
··· 29 29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 30 30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 31 31 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 32 + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 32 33 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 33 34 obj-$(CONFIG_AUDIT) += audit.o 34 35 obj-$(CONFIG_PCI_MSI) += msi_ia64.o
-22
arch/ia64/kernel/crash.c
··· 19 19 20 20 #include <asm/kdebug.h> 21 21 #include <asm/mca.h> 22 - #include <asm/uaccess.h> 23 22 24 23 int kdump_status[NR_CPUS]; 25 24 atomic_t kdump_cpu_freezed; 26 25 atomic_t kdump_in_progress; 27 26 int kdump_on_init = 1; 28 - ssize_t 29 - copy_oldmem_page(unsigned long pfn, char *buf, 30 - size_t csize, unsigned long offset, int userbuf) 31 - { 32 - void *vaddr; 33 - 34 - if (!csize) 35 - return 0; 36 - vaddr = __va(pfn<<PAGE_SHIFT); 37 - if (userbuf) { 38 - if (copy_to_user(buf, (vaddr + offset), csize)) { 39 - return -EFAULT; 40 - } 41 - } else 42 - memcpy(buf, (vaddr + offset), csize); 43 - return csize; 44 - } 45 27 46 28 static inline Elf64_Word 47 29 *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, ··· 207 225 static int 208 226 machine_crash_setup(void) 209 227 { 210 - char *from = strstr(saved_command_line, "elfcorehdr="); 211 228 static struct notifier_block kdump_init_notifier_nb = { 212 229 .notifier_call = kdump_init_notifier, 213 230 }; 214 231 int ret; 215 - if (from) 216 - elfcorehdr_addr = memparse(from+11, &from); 217 - saved_max_pfn = (unsigned long)-1; 218 232 if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 219 233 return ret; 220 234 #ifdef CONFIG_SYSCTL
+48
arch/ia64/kernel/crash_dump.c
··· 1 + /* 2 + * kernel/crash_dump.c - Memory preserving reboot related code. 3 + * 4 + * Created by: Simon Horman <horms@verge.net.au> 5 + * Original code moved from kernel/crash.c 6 + * Original code comment copied from the i386 version of this file 7 + */ 8 + 9 + #include <linux/errno.h> 10 + #include <linux/types.h> 11 + 12 + #include <linux/uaccess.h> 13 + 14 + /** 15 + * copy_oldmem_page - copy one page from "oldmem" 16 + * @pfn: page frame number to be copied 17 + * @buf: target memory address for the copy; this can be in kernel address 18 + * space or user address space (see @userbuf) 19 + * @csize: number of bytes to copy 20 + * @offset: offset in bytes into the page (based on pfn) to begin the copy 21 + * @userbuf: if set, @buf is in user address space, use copy_to_user(), 22 + * otherwise @buf is in kernel address space, use memcpy(). 23 + * 24 + * Copy a page from "oldmem". For this page, there is no pte mapped 25 + * in the current kernel. We stitch up a pte, similar to kmap_atomic. 26 + * 27 + * Calling copy_to_user() in atomic context is not desirable. Hence first 28 + * copying the data to a pre-allocated kernel page and then copying to user 29 + * space in non-atomic context. 30 + */ 31 + ssize_t 32 + copy_oldmem_page(unsigned long pfn, char *buf, 33 + size_t csize, unsigned long offset, int userbuf) 34 + { 35 + void *vaddr; 36 + 37 + if (!csize) 38 + return 0; 39 + vaddr = __va(pfn<<PAGE_SHIFT); 40 + if (userbuf) { 41 + if (copy_to_user(buf, (vaddr + offset), csize)) { 42 + return -EFAULT; 43 + } 44 + } else 45 + memcpy(buf, (vaddr + offset), csize); 46 + return csize; 47 + } 48 +
+2 -1
arch/ia64/kernel/jprobes.S
··· 45 45 * to the correct location. 46 46 */ 47 47 #include <asm/asmmacro.h> 48 + #include <asm-ia64/break.h> 48 49 49 50 /* 50 51 * void jprobe_break(void) 51 52 */ 52 53 .section .kprobes.text, "ax" 53 54 ENTRY(jprobe_break) 54 - break.m 0x80300 55 + break.m __IA64_BREAK_JPROBE 55 56 END(jprobe_break) 56 57 57 58 /*
+159 -67
arch/ia64/kernel/kprobes.c
··· 88 88 { 89 89 p->ainsn.inst_flag = 0; 90 90 p->ainsn.target_br_reg = 0; 91 + p->ainsn.slot = slot; 91 92 92 93 /* Check for Break instruction 93 94 * Bits 37:40 Major opcode to be zero ··· 130 129 131 130 /* 132 131 * In this function we check to see if the instruction 133 - * on which we are inserting kprobe is supported. 134 - * Returns 0 if supported 135 - * Returns -EINVAL if unsupported 136 - */ 137 - static int __kprobes unsupported_inst(uint template, uint slot, 138 - uint major_opcode, 139 - unsigned long kprobe_inst, 140 - unsigned long addr) 141 - { 142 - if (bundle_encoding[template][slot] == I) { 143 - switch (major_opcode) { 144 - case 0x0: //I_UNIT_MISC_OPCODE: 145 - /* 146 - * Check for Integer speculation instruction 147 - * - Bit 33-35 to be equal to 0x1 148 - */ 149 - if (((kprobe_inst >> 33) & 0x7) == 1) { 150 - printk(KERN_WARNING 151 - "Kprobes on speculation inst at <0x%lx> not supported\n", 152 - addr); 153 - return -EINVAL; 154 - } 155 - 156 - /* 157 - * IP relative mov instruction 158 - * - Bit 27-35 to be equal to 0x30 159 - */ 160 - if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { 161 - printk(KERN_WARNING 162 - "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", 163 - addr); 164 - return -EINVAL; 165 - 166 - } 167 - } 168 - } 169 - return 0; 170 - } 171 - 172 - 173 - /* 174 - * In this function we check to see if the instruction 175 132 * (qp) cmpx.crel.ctype p1,p2=r2,r3 176 133 * on which we are inserting kprobe is cmp instruction 177 134 * with ctype as unc. ··· 165 206 } 166 207 167 208 /* 209 + * In this function we check to see if the instruction 210 + * on which we are inserting kprobe is supported. 211 + * Returns qp value if supported 212 + * Returns -EINVAL if unsupported 213 + */ 214 + static int __kprobes unsupported_inst(uint template, uint slot, 215 + uint major_opcode, 216 + unsigned long kprobe_inst, 217 + unsigned long addr) 218 + { 219 + int qp; 220 + 221 + qp = kprobe_inst & 0x3f; 222 + if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { 223 + if (slot == 1 && qp) { 224 + printk(KERN_WARNING "Kprobes on cmp unc" 225 + "instruction on slot 1 at <0x%lx>" 226 + "is not supported\n", addr); 227 + return -EINVAL; 228 + 229 + } 230 + qp = 0; 231 + } 232 + else if (bundle_encoding[template][slot] == I) { 233 + if (major_opcode == 0) { 234 + /* 235 + * Check for Integer speculation instruction 236 + * - Bit 33-35 to be equal to 0x1 237 + */ 238 + if (((kprobe_inst >> 33) & 0x7) == 1) { 239 + printk(KERN_WARNING 240 + "Kprobes on speculation inst at <0x%lx> not supported\n", 241 + addr); 242 + return -EINVAL; 243 + } 244 + /* 245 + * IP relative mov instruction 246 + * - Bit 27-35 to be equal to 0x30 247 + */ 248 + if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { 249 + printk(KERN_WARNING 250 + "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", 251 + addr); 252 + return -EINVAL; 253 + 254 + } 255 + } 256 + else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && 257 + (kprobe_inst & (0x1UL << 12))) { 258 + /* test bit instructions, tbit,tnat,tf 259 + * bit 33-36 to be equal to 0 260 + * bit 12 to be equal to 1 261 + */ 262 + if (slot == 1 && qp) { 263 + printk(KERN_WARNING "Kprobes on test bit" 264 + "instruction on slot at <0x%lx>" 265 + "is not supported\n", addr); 266 + return -EINVAL; 267 + } 268 + qp = 0; 269 + } 270 + } 271 + else if (bundle_encoding[template][slot] == B) { 272 + if (major_opcode == 7) { 273 + /* IP-Relative Predict major code is 7 */ 274 + printk(KERN_WARNING "Kprobes on IP-Relative" 275 + "Predict is not supported\n"); 276 + return -EINVAL; 277 + } 278 + else if (major_opcode == 2) { 279 + /* Indirect Predict, major code is 2 280 + * bit 27-32 to be equal to 10 or 11 281 + */ 282 + int x6=(kprobe_inst >> 27) & 0x3F; 283 + if ((x6 == 0x10) || (x6 == 0x11)) { 284 + printk(KERN_WARNING "Kprobes on" 285 + "Indirect Predict is not supported\n"); 286 + return -EINVAL; 287 + } 288 + } 289 + } 290 + /* kernel does not use float instruction, here for safety kprobe 291 + * will judge whether it is fcmp/flass/float approximation instruction 292 + */ 293 + else if (unlikely(bundle_encoding[template][slot] == F)) { 294 + if ((major_opcode == 4 || major_opcode == 5) && 295 + (kprobe_inst & (0x1 << 12))) { 296 + /* fcmp/fclass unc instruction */ 297 + if (slot == 1 && qp) { 298 + printk(KERN_WARNING "Kprobes on fcmp/fclass " 299 + "instruction on slot at <0x%lx> " 300 + "is not supported\n", addr); 301 + return -EINVAL; 302 + 303 + } 304 + qp = 0; 305 + } 306 + if ((major_opcode == 0 || major_opcode == 1) && 307 + (kprobe_inst & (0x1UL << 33))) { 308 + /* float Approximation instruction */ 309 + if (slot == 1 && qp) { 310 + printk(KERN_WARNING "Kprobes on float Approx " 311 + "instr at <0x%lx> is not supported\n", 312 + addr); 313 + return -EINVAL; 314 + } 315 + qp = 0; 316 + } 317 + } 318 + return qp; 319 + } 320 + 321 + /* 168 322 * In this function we override the bundle with 169 323 * the break instruction at the given slot. 170 324 */ 171 325 static void __kprobes prepare_break_inst(uint template, uint slot, 172 326 uint major_opcode, 173 327 unsigned long kprobe_inst, 174 - struct kprobe *p) 328 + struct kprobe *p, 329 + int qp) 175 330 { 176 331 unsigned long break_inst = BREAK_INST; 177 332 bundle_t *bundle = &p->opcode.bundle; 178 333 179 334 /* 180 335 * Copy the original kprobe_inst qualifying predicate(qp) 181 - * to the break instruction iff !is_cmp_ctype_unc_inst 182 - * because for cmp instruction with ctype equal to unc, 183 - * which is a special instruction always needs to be 184 - * executed regradless of qp 336 + * to the break instruction 185 337 */ 186 - if (!is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) 187 - break_inst |= (0x3f & kprobe_inst); 338 + break_inst |= qp; 188 339 189 340 switch (slot) { 190 341 case 0: ··· 362 293 if (in_ivt_functions(addr)) { 363 294 printk(KERN_WARNING "Kprobes can't be inserted inside " 364 295 "IVT functions at 0x%lx\n", addr); 365 - return -EINVAL; 366 - } 367 - 368 - if (slot == 1 && bundle_encoding[template][1] != L) { 369 - printk(KERN_WARNING "Inserting kprobes on slot #1 " 370 - "is not supported\n"); 371 296 return -EINVAL; 372 297 } 373 298 ··· 490 427 unsigned long kprobe_inst=0; 491 428 unsigned int slot = addr & 0xf, template, major_opcode = 0; 492 429 bundle_t *bundle; 430 + int qp; 493 431 494 432 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 495 433 template = bundle->quad0.template; ··· 505 441 /* Get kprobe_inst and major_opcode from the bundle */ 506 442 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 507 443 508 - if (unsupported_inst(template, slot, major_opcode, kprobe_inst, addr)) 509 - return -EINVAL; 510 - 444 + qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); 445 + if (qp < 0) 446 + return -EINVAL; 511 447 512 448 p->ainsn.insn = get_insn_slot(); 513 449 if (!p->ainsn.insn) ··· 515 451 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 516 452 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 517 453 518 - prepare_break_inst(template, slot, major_opcode, kprobe_inst, p); 454 + prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 519 455 520 456 return 0; 521 457 } 522 458 523 459 void __kprobes arch_arm_kprobe(struct kprobe *p) 524 460 { 525 - unsigned long addr = (unsigned long)p->addr; 526 - unsigned long arm_addr = addr & ~0xFULL; 461 + unsigned long arm_addr; 462 + bundle_t *src, *dest; 463 + 464 + arm_addr = ((unsigned long)p->addr) & ~0xFUL; 465 + dest = &((kprobe_opcode_t *)arm_addr)->bundle; 466 + src = &p->opcode.bundle; 527 467 528 468 flush_icache_range((unsigned long)p->ainsn.insn, 529 469 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 530 - memcpy((char *)arm_addr, &p->opcode, sizeof(kprobe_opcode_t)); 470 + switch (p->ainsn.slot) { 471 + case 0: 472 + dest->quad0.slot0 = src->quad0.slot0; 473 + break; 474 + case 1: 475 + dest->quad1.slot1_p1 = src->quad1.slot1_p1; 476 + break; 477 + case 2: 478 + dest->quad1.slot2 = src->quad1.slot2; 479 + break; 480 + } 531 481 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 532 482 } 533 483 534 484 void __kprobes arch_disarm_kprobe(struct kprobe *p) 535 485 { 536 - unsigned long addr = (unsigned long)p->addr; 537 - unsigned long arm_addr = addr & ~0xFULL; 486 + unsigned long arm_addr; 487 + bundle_t *src, *dest; 538 488 489 + arm_addr = ((unsigned long)p->addr) & ~0xFUL; 490 + dest = &((kprobe_opcode_t *)arm_addr)->bundle; 539 491 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 540 - memcpy((char *) arm_addr, (char *) p->ainsn.insn, 541 - sizeof(kprobe_opcode_t)); 492 + src = &p->ainsn.insn->bundle; 493 + switch (p->ainsn.slot) { 494 + case 0: 495 + dest->quad0.slot0 = src->quad0.slot0; 496 + break; 497 + case 1: 498 + dest->quad1.slot1_p1 = src->quad1.slot1_p1; 499 + break; 500 + case 2: 501 + dest->quad1.slot2 = src->quad1.slot2; 502 + break; 503 + } 542 504 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 543 505 } 544 506 ··· 897 807 switch(val) { 898 808 case DIE_BREAK: 899 809 /* err is break number from ia64_bad_break() */ 900 - if (args->err == 0x80200 || args->err == 0x80300 || args->err == 0) 810 + if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) 811 + || args->err == __IA64_BREAK_JPROBE 812 + || args->err == 0) 901 813 if (pre_kprobes_handler(args)) 902 814 ret = NOTIFY_STOP; 903 815 break;
+5 -2
arch/ia64/kernel/machine_kexec.c
··· 19 19 #include <asm/delay.h> 20 20 #include <asm/meminit.h> 21 21 22 - typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, 23 - struct ia64_boot_param *, unsigned long); 22 + typedef NORET_TYPE void (*relocate_new_kernel_t)( 23 + unsigned long indirection_page, 24 + unsigned long start_address, 25 + struct ia64_boot_param *boot_param, 26 + unsigned long pal_addr) ATTRIB_NORET; 24 27 25 28 struct kimage *ia64_kimage; 26 29
+1 -1
arch/ia64/kernel/mca.c
··· 1239 1239 } else { 1240 1240 /* Dump buffered message to console */ 1241 1241 ia64_mlogbuf_finish(1); 1242 - #ifdef CONFIG_CRASH_DUMP 1242 + #ifdef CONFIG_KEXEC 1243 1243 atomic_set(&kdump_in_progress, 1); 1244 1244 monarch_cpu = -1; 1245 1245 #endif
+27 -6
arch/ia64/kernel/setup.c
··· 256 256 257 257 #ifdef CONFIG_KEXEC 258 258 /* crashkernel=size@offset specifies the size to reserve for a crash 259 - * kernel.(offset is ingored for keep compatibility with other archs) 259 + * kernel. If offset is 0, then it is determined automatically. 260 260 * By reserving this memory we guarantee that linux never set's it 261 261 * up as a DMA target.Useful for holding code to do something 262 262 * appropriate after a kernel panic. ··· 266 266 unsigned long base, size; 267 267 if (from) { 268 268 size = memparse(from + 12, &from); 269 + if (*from == '@') 270 + base = memparse(from+1, &from); 271 + else 272 + base = 0; 269 273 if (size) { 270 - sort_regions(rsvd_region, n); 271 - base = kdump_find_rsvd_region(size, 272 - rsvd_region, n); 274 + if (!base) { 275 + sort_regions(rsvd_region, n); 276 + base = kdump_find_rsvd_region(size, 277 + rsvd_region, n); 278 + } 273 279 if (base != ~0UL) { 274 280 rsvd_region[n].start = 275 281 (unsigned long)__va(base); ··· 439 433 return 0; 440 434 } 441 435 early_param("nomca", setup_nomca); 436 + 437 + #ifdef CONFIG_PROC_VMCORE 438 + /* elfcorehdr= specifies the location of elf core header 439 + * stored by the crashed kernel. 440 + */ 441 + static int __init parse_elfcorehdr(char *arg) 442 + { 443 + if (!arg) 444 + return -EINVAL; 445 + 446 + elfcorehdr_addr = memparse(arg, &arg); 447 + return 0; 448 + } 449 + early_param("elfcorehdr", parse_elfcorehdr); 450 + #endif /* CONFIG_PROC_VMCORE */ 442 451 443 452 void __init 444 453 setup_arch (char **cmdline_p) ··· 674 653 { 675 654 char brand[128]; 676 655 656 + memcpy(brand, "Unknown", 8); 677 657 if (ia64_pal_get_brand_info(brand)) { 678 658 if (family == 0x7) 679 659 memcpy(brand, "Merced", 7); ··· 682 660 case 0: memcpy(brand, "McKinley", 9); break; 683 661 case 1: memcpy(brand, "Madison", 8); break; 684 662 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 685 - } else 686 - memcpy(brand, "Unknown", 8); 663 + } 687 664 } 688 665 if (brandname[0] == '\0') 689 666 return strcpy(brandname, brand);
+2 -2
arch/ia64/kernel/smp.c
··· 157 157 case IPI_CPU_STOP: 158 158 stop_this_cpu(); 159 159 break; 160 - #ifdef CONFIG_CRASH_DUMP 160 + #ifdef CONFIG_KEXEC 161 161 case IPI_KDUMP_CPU_STOP: 162 162 unw_init_running(kdump_cpu_freeze, NULL); 163 163 break; ··· 219 219 send_IPI_single(smp_processor_id(), op); 220 220 } 221 221 222 - #ifdef CONFIG_CRASH_DUMP 222 + #ifdef CONFIG_KEXEC 223 223 void 224 224 kdump_smp_send_stop() 225 225 {
+40 -10
arch/ia64/kernel/traps.c
··· 307 307 return ret.status; 308 308 } 309 309 310 + struct fpu_swa_msg { 311 + unsigned long count; 312 + unsigned long time; 313 + }; 314 + static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast); 315 + DECLARE_PER_CPU(struct fpu_swa_msg, cpulast); 316 + static struct fpu_swa_msg last __cacheline_aligned; 317 + 318 + 310 319 /* 311 320 * Handle floating-point assist faults and traps. 312 321 */ ··· 325 316 long exception, bundle[2]; 326 317 unsigned long fault_ip; 327 318 struct siginfo siginfo; 328 - static int fpu_swa_count = 0; 329 - static unsigned long last_time; 330 319 331 320 fault_ip = regs->cr_iip; 332 321 if (!fp_fault && (ia64_psr(regs)->ri == 0)) ··· 332 325 if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) 333 326 return -1; 334 327 335 - if (jiffies - last_time > 5*HZ) 336 - fpu_swa_count = 0; 337 - if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { 338 - last_time = jiffies; 339 - ++fpu_swa_count; 340 - printk(KERN_WARNING 341 - "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 342 - current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 328 + if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { 329 + unsigned long count, current_jiffies = jiffies; 330 + struct fpu_swa_msg *cp = &__get_cpu_var(cpulast); 331 + 332 + if (unlikely(current_jiffies > cp->time)) 333 + cp->count = 0; 334 + if (unlikely(cp->count < 5)) { 335 + cp->count++; 336 + cp->time = current_jiffies + 5 * HZ; 337 + 338 + /* minimize races by grabbing a copy of count BEFORE checking last.time. */ 339 + count = last.count; 340 + barrier(); 341 + 342 + /* 343 + * Lower 4 bits are used as a count. Upper bits are a sequence 344 + * number that is updated when count is reset. The cmpxchg will 345 + * fail is seqno has changed. This minimizes mutiple cpus 346 + * reseting the count. 347 + */ 348 + if (current_jiffies > last.time) 349 + (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15)); 350 + 351 + /* used fetchadd to atomically update the count */ 352 + if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) { 353 + last.time = current_jiffies + 5 * HZ; 354 + printk(KERN_WARNING 355 + "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 356 + current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 357 + } 358 + } 343 359 } 344 360 345 361 exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
+7 -2
arch/ia64/mm/contig.c
··· 174 174 reserve_bootmem(bootmap_start, bootmap_size); 175 175 176 176 find_initrd(); 177 + 178 + #ifdef CONFIG_CRASH_DUMP 179 + /* If we are doing a crash dump, we still need to know the real mem 180 + * size before original memory map is * reset. */ 181 + saved_max_pfn = max_pfn; 182 + #endif 177 183 } 178 184 179 185 #ifdef CONFIG_SMP ··· 232 226 paging_init (void) 233 227 { 234 228 unsigned long max_dma; 235 - unsigned long nid = 0; 236 229 unsigned long max_zone_pfns[MAX_NR_ZONES]; 237 230 238 231 num_physpages = 0; ··· 243 238 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 244 239 245 240 #ifdef CONFIG_VIRTUAL_MEM_MAP 246 - efi_memmap_walk(register_active_ranges, &nid); 241 + efi_memmap_walk(register_active_ranges, NULL); 247 242 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); 248 243 if (max_gap < LARGE_GAP) { 249 244 vmem_map = (struct page *) 0;
+2 -7
arch/ia64/mm/init.c
··· 595 595 } 596 596 597 597 int __init 598 - register_active_ranges(u64 start, u64 end, void *nid) 598 + register_active_ranges(u64 start, u64 end, void *arg) 599 599 { 600 - BUG_ON(nid == NULL); 601 - BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES); 602 - 603 - add_active_range(*(unsigned long *)nid, 604 - __pa(start) >> PAGE_SHIFT, 605 - __pa(end) >> PAGE_SHIFT); 600 + add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); 606 601 return 0; 607 602 } 608 603 #endif /* CONFIG_VIRTUAL_MEM_MAP */
+10 -2
arch/ia64/sn/kernel/setup.c
··· 580 580 int slice; 581 581 int cnode; 582 582 int i; 583 - static int wars_have_been_checked; 583 + static int wars_have_been_checked, set_cpu0_number; 584 584 585 585 cpuid = smp_processor_id(); 586 586 if (cpuid == 0 && IS_MEDUSA()) { ··· 605 605 /* 606 606 * Don't check status. The SAL call is not supported on all PROMs 607 607 * but a failure is harmless. 608 + * Architechtuallly, cpu_init is always called twice on cpu 0. We 609 + * should set cpu_number on cpu 0 once. 608 610 */ 609 - (void) ia64_sn_set_cpu_number(cpuid); 611 + if (cpuid == 0) { 612 + if (!set_cpu0_number) { 613 + (void) ia64_sn_set_cpu_number(cpuid); 614 + set_cpu0_number = 1; 615 + } 616 + } else 617 + (void) ia64_sn_set_cpu_number(cpuid); 610 618 611 619 /* 612 620 * The boot cpu makes this call again after platform initialization is
+10 -5
arch/ia64/sn/kernel/xpc_channel.c
··· 632 632 ch->number, ch->partid); 633 633 634 634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 635 - xpc_create_kthreads(ch, 1); 635 + xpc_create_kthreads(ch, 1, 0); 636 636 spin_lock_irqsave(&ch->lock, *irq_flags); 637 637 } 638 638 ··· 754 754 755 755 /* make sure all activity has settled down first */ 756 756 757 - if (atomic_read(&ch->references) > 0 || 758 - ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 759 - !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) { 757 + if (atomic_read(&ch->kthreads_assigned) > 0 || 758 + atomic_read(&ch->references) > 0) { 760 759 return; 761 760 } 762 - DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 761 + DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 762 + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); 763 763 764 764 if (part->act_state == XPC_P_DEACTIVATING) { 765 765 /* can't proceed until the other side disengages from us */ ··· 1651 1651 /* wake all idle kthreads so they can exit */ 1652 1652 if (atomic_read(&ch->kthreads_idle) > 0) { 1653 1653 wake_up_all(&ch->idle_wq); 1654 + 1655 + } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 1656 + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 1657 + /* start a kthread that will do the xpcDisconnecting callout */ 1658 + xpc_create_kthreads(ch, 1, 1); 1654 1659 } 1655 1660 1656 1661 /* wake those waiting to allocate an entry from the local msg queue */
+43 -21
arch/ia64/sn/kernel/xpc_main.c
··· 681 681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 682 682 needed, ch->partid, ch->number); 683 683 684 - xpc_create_kthreads(ch, needed); 684 + xpc_create_kthreads(ch, needed, 0); 685 685 } 686 686 687 687 ··· 775 775 xpc_kthread_waitmsgs(part, ch); 776 776 } 777 777 778 - if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 779 - spin_lock_irqsave(&ch->lock, irq_flags); 780 - if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 781 - !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 782 - ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 783 - spin_unlock_irqrestore(&ch->lock, irq_flags); 778 + /* let registerer know that connection is disconnecting */ 784 779 785 - xpc_disconnect_callout(ch, xpcDisconnecting); 786 - 787 - spin_lock_irqsave(&ch->lock, irq_flags); 788 - ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; 789 - } 780 + spin_lock_irqsave(&ch->lock, irq_flags); 781 + if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 782 + !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { 783 + ch->flags |= XPC_C_DISCONNECTINGCALLOUT; 790 784 spin_unlock_irqrestore(&ch->lock, irq_flags); 785 + 786 + xpc_disconnect_callout(ch, xpcDisconnecting); 787 + 788 + spin_lock_irqsave(&ch->lock, irq_flags); 789 + ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; 790 + } 791 + spin_unlock_irqrestore(&ch->lock, irq_flags); 792 + 793 + if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 791 794 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 792 795 xpc_mark_partition_disengaged(part); 793 796 xpc_IPI_send_disengage(part); 794 797 } 795 798 } 796 - 797 799 798 800 xpc_msgqueue_deref(ch); 799 801 ··· 820 818 * partition. 821 819 */ 822 820 void 823 - xpc_create_kthreads(struct xpc_channel *ch, int needed) 821 + xpc_create_kthreads(struct xpc_channel *ch, int needed, 822 + int ignore_disconnecting) 824 823 { 825 824 unsigned long irq_flags; 826 825 pid_t pid; ··· 836 833 * kthread. That kthread is responsible for doing the 837 834 * counterpart to the following before it exits. 838 835 */ 836 + if (ignore_disconnecting) { 837 + if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { 838 + /* kthreads assigned had gone to zero */ 839 + BUG_ON(!(ch->flags & 840 + XPC_C_DISCONNECTINGCALLOUT_MADE)); 841 + break; 842 + } 843 + 844 + } else if (ch->flags & XPC_C_DISCONNECTING) { 845 + break; 846 + 847 + } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { 848 + if (atomic_inc_return(&part->nchannels_engaged) == 1) 849 + xpc_mark_partition_engaged(part); 850 + } 839 851 (void) xpc_part_ref(part); 840 852 xpc_msgqueue_ref(ch); 841 - if (atomic_inc_return(&ch->kthreads_assigned) == 1 && 842 - atomic_inc_return(&part->nchannels_engaged) == 1) { 843 - xpc_mark_partition_engaged(part); 844 - } 845 853 846 854 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 847 855 if (pid < 0) { 848 856 /* the fork failed */ 857 + 858 + /* 859 + * NOTE: if (ignore_disconnecting && 860 + * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, 861 + * then we'll deadlock if all other kthreads assigned 862 + * to this channel are blocked in the channel's 863 + * registerer, because the only thing that will unblock 864 + * them is the xpcDisconnecting callout that this 865 + * failed kernel_thread would have made. 866 + */ 867 + 849 868 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 850 869 atomic_dec_return(&part->nchannels_engaged) == 0) { 851 870 xpc_mark_partition_disengaged(part); ··· 882 857 * Flag this as an error only if we have an 883 858 * insufficient #of kthreads for the channel 884 859 * to function. 885 - * 886 - * No xpc_msgqueue_ref() is needed here since 887 - * the channel mgr is doing this. 888 860 */ 889 861 spin_lock_irqsave(&ch->lock, irq_flags); 890 862 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
+2 -2
include/asm-ia64/break.h
··· 12 12 * OS-specific debug break numbers: 13 13 */ 14 14 #define __IA64_BREAK_KDB 0x80100 15 - #define __IA64_BREAK_KPROBE 0x80200 16 - #define __IA64_BREAK_JPROBE 0x80300 15 + #define __IA64_BREAK_KPROBE 0x81000 /* .. 0x81fff */ 16 + #define __IA64_BREAK_JPROBE 0x82000 17 17 18 18 /* 19 19 * OS-specific break numbers:
+1
include/asm-ia64/kprobes.h
··· 115 115 #define INST_FLAG_BREAK_INST 4 116 116 unsigned long inst_flag; 117 117 unsigned short target_br_reg; 118 + unsigned short slot; 118 119 }; 119 120 120 121 extern int kprobe_exceptions_notify(struct notifier_block *self,
+1 -1
include/asm-ia64/sn/xpc.h
··· 673 673 extern void xpc_dropped_IPI_check(struct xpc_partition *); 674 674 extern void xpc_activate_partition(struct xpc_partition *); 675 675 extern void xpc_activate_kthreads(struct xpc_channel *, int); 676 - extern void xpc_create_kthreads(struct xpc_channel *, int); 676 + extern void xpc_create_kthreads(struct xpc_channel *, int, int); 677 677 extern void xpc_disconnect_wait(int); 678 678 679 679