Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] make pfm_get_task work with virtual pids
[IA64] honor notify_die() returning NOTIFY_STOP
[IA64] remove dead code: __cpu_{down,die} from !HOTPLUG_CPU
[IA64] Appoint kvm/ia64 Maintainers
[IA64] ia64_set_psr should use srlz.i
[IA64] Export three symbols for module use
[IA64] mca style cleanup
[IA64] sn_hwperf semaphore to mutex
[IA64] generalize attribute of fsyscall_gtod_data
[IA64] efi.c Add /* never reached */ annotation
[IA64] efi.c Spelling/punctuation fixes
[IA64] Make efi.c mostly fit in 80 columns
[IA64] aliasing-test: fix gcc warnings on non-ia64
[IA64] Slim-down __clear_bit_unlock
[IA64] Fix the order of atomic operations in restore_previous_kprobes on ia64
[IA64] constify function pointer tables
[IA64] fix userspace compile error in gcc_intrin.h

+465 -373
+9 -6
Documentation/ia64/aliasing-test.c
··· 16 #include <fcntl.h> 17 #include <fnmatch.h> 18 #include <string.h> 19 #include <sys/mman.h> 20 #include <sys/stat.h> 21 #include <unistd.h> ··· 66 { 67 struct dirent **namelist; 68 char *name, *path2; 69 - int i, n, r, rc, result = 0; 70 struct stat buf; 71 72 n = scandir(path, &namelist, 0, alphasort); ··· 114 free(namelist[i]); 115 } 116 free(namelist); 117 - return rc; 118 } 119 120 char buf[1024]; ··· 150 { 151 struct dirent **namelist; 152 char *name, *path2; 153 - int i, n, r, rc, result = 0; 154 struct stat buf; 155 156 n = scandir(path, &namelist, 0, alphasort); ··· 181 * important thing is that no MCA happened. 182 */ 183 if (rc > 0) 184 - fprintf(stderr, "PASS: %s read %ld bytes\n", path2, rc); 185 else { 186 fprintf(stderr, "PASS: %s not readable\n", path2); 187 return rc; ··· 202 free(namelist[i]); 203 } 204 free(namelist); 205 - return rc; 206 } 207 208 - int main() 209 { 210 int rc; 211 ··· 257 scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0); 258 scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1); 259 scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0); 260 }
··· 16 #include <fcntl.h> 17 #include <fnmatch.h> 18 #include <string.h> 19 + #include <sys/ioctl.h> 20 #include <sys/mman.h> 21 #include <sys/stat.h> 22 #include <unistd.h> ··· 65 { 66 struct dirent **namelist; 67 char *name, *path2; 68 + int i, n, r, rc = 0, result = 0; 69 struct stat buf; 70 71 n = scandir(path, &namelist, 0, alphasort); ··· 113 free(namelist[i]); 114 } 115 free(namelist); 116 + return result; 117 } 118 119 char buf[1024]; ··· 149 { 150 struct dirent **namelist; 151 char *name, *path2; 152 + int i, n, r, rc = 0, result = 0; 153 struct stat buf; 154 155 n = scandir(path, &namelist, 0, alphasort); ··· 180 * important thing is that no MCA happened. 181 */ 182 if (rc > 0) 183 + fprintf(stderr, "PASS: %s read %d bytes\n", path2, rc); 184 else { 185 fprintf(stderr, "PASS: %s not readable\n", path2); 186 return rc; ··· 201 free(namelist[i]); 202 } 203 free(namelist); 204 + return result; 205 } 206 207 + int main(void) 208 { 209 int rc; 210 ··· 256 scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0); 257 scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1); 258 scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0); 259 + 260 + return rc; 261 }
+9
MAINTAINERS
··· 2249 W: kvm.sourceforge.net 2250 S: Supported 2251 2252 KEXEC 2253 P: Eric Biederman 2254 M: ebiederm@xmission.com
··· 2249 W: kvm.sourceforge.net 2250 S: Supported 2251 2252 + KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64) 2253 + P: Anthony Xu 2254 + M: anthony.xu@intel.com 2255 + P: Xiantao Zhang 2256 + M: xiantao.zhang@intel.com 2257 + L: kvm-ia64-devel@lists.sourceforge.net 2258 + W: kvm.sourceforge.net 2259 + S: Supported 2260 + 2261 KEXEC 2262 P: Eric Biederman 2263 M: ebiederm@xmission.com
+1 -1
arch/ia64/hp/common/sba_iommu.c
··· 1875 return 0; 1876 } 1877 1878 - static struct seq_operations ioc_seq_ops = { 1879 .start = ioc_start, 1880 .next = ioc_next, 1881 .stop = ioc_stop,
··· 1875 return 0; 1876 } 1877 1878 + static const struct seq_operations ioc_seq_ops = { 1879 .start = ioc_start, 1880 .next = ioc_next, 1881 .stop = ioc_stop,
+3 -2
arch/ia64/ia32/ia32_support.c
··· 27 28 #include "ia32priv.h" 29 30 - extern void die_if_kernel (char *str, struct pt_regs *regs, long err); 31 32 struct exec_domain ia32_exec_domain; 33 struct page *ia32_shared_page[NR_CPUS]; ··· 217 { 218 siginfo_t siginfo; 219 220 - die_if_kernel("Bad IA-32 interrupt", regs, int_num); 221 222 siginfo.si_signo = SIGTRAP; 223 siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
··· 27 28 #include "ia32priv.h" 29 30 + extern int die_if_kernel (char *str, struct pt_regs *regs, long err); 31 32 struct exec_domain ia32_exec_domain; 33 struct page *ia32_shared_page[NR_CPUS]; ··· 217 { 218 siginfo_t siginfo; 219 220 + if (die_if_kernel("Bad IA-32 interrupt", regs, int_num)) 221 + return; 222 223 siginfo.si_signo = SIGTRAP; 224 siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
+269 -227
arch/ia64/kernel/efi.c
··· 1 /* 2 * Extensible Firmware Interface 3 * 4 - * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 5 * 6 * Copyright (C) 1999 VA Linux Systems 7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ··· 49 50 #define efi_call_virt(f, args...) (*(f))(args) 51 52 - #define STUB_GET_TIME(prefix, adjust_arg) \ 53 - static efi_status_t \ 54 - prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 55 - { \ 56 - struct ia64_fpreg fr[6]; \ 57 - efi_time_cap_t *atc = NULL; \ 58 - efi_status_t ret; \ 59 - \ 60 - if (tc) \ 61 - atc = adjust_arg(tc); \ 62 - ia64_save_scratch_fpregs(fr); \ 63 - ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \ 64 - ia64_load_scratch_fpregs(fr); \ 65 - return ret; \ 66 } 67 68 - #define STUB_SET_TIME(prefix, adjust_arg) \ 69 - static efi_status_t \ 70 - prefix##_set_time (efi_time_t *tm) \ 71 - { \ 72 - struct ia64_fpreg fr[6]; \ 73 - efi_status_t ret; \ 74 - \ 75 - ia64_save_scratch_fpregs(fr); \ 76 - ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \ 77 - ia64_load_scratch_fpregs(fr); \ 78 - return ret; \ 79 } 80 81 - #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 82 - static efi_status_t \ 83 - prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \ 84 - { \ 85 - struct ia64_fpreg fr[6]; \ 86 - efi_status_t ret; \ 87 - \ 88 - ia64_save_scratch_fpregs(fr); \ 89 - ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 90 - adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 91 - ia64_load_scratch_fpregs(fr); \ 92 - return ret; \ 93 } 94 95 - #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 96 - static efi_status_t \ 97 - prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 98 - { \ 99 - struct ia64_fpreg fr[6]; \ 100 - efi_time_t *atm = NULL; \ 101 - efi_status_t ret; \ 102 - \ 103 - if (tm) \ 104 - atm = adjust_arg(tm); \ 105 - ia64_save_scratch_fpregs(fr); \ 106 - ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 107 - enabled, atm); \ 108 - ia64_load_scratch_fpregs(fr); \ 109 - return ret; \ 110 } 111 112 - #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 113 - static efi_status_t \ 114 - prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 115 - unsigned long *data_size, void *data) \ 116 - { \ 117 - struct ia64_fpreg fr[6]; \ 118 - u32 *aattr = NULL; \ 119 - efi_status_t ret; \ 120 - \ 121 - if (attr) \ 122 - aattr = adjust_arg(attr); \ 123 - ia64_save_scratch_fpregs(fr); \ 124 - ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \ 125 - adjust_arg(name), adjust_arg(vendor), aattr, \ 126 - adjust_arg(data_size), adjust_arg(data)); \ 127 - ia64_load_scratch_fpregs(fr); \ 128 - return ret; \ 129 } 130 131 - #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 132 - static efi_status_t \ 133 - prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \ 134 - { \ 135 - struct ia64_fpreg fr[6]; \ 136 - efi_status_t ret; \ 137 - \ 138 - ia64_save_scratch_fpregs(fr); \ 139 - ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 140 - adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 141 - ia64_load_scratch_fpregs(fr); \ 142 - return ret; \ 143 } 144 145 - #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 146 - static efi_status_t \ 147 - prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \ 148 - unsigned long data_size, void *data) \ 149 - { \ 150 - struct ia64_fpreg fr[6]; \ 151 - efi_status_t ret; \ 152 - \ 153 - ia64_save_scratch_fpregs(fr); \ 154 - ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \ 155 - adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 156 - adjust_arg(data)); \ 157 - ia64_load_scratch_fpregs(fr); \ 158 - return ret; \ 159 } 160 161 - #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 162 - static efi_status_t \ 163 - prefix##_get_next_high_mono_count (u32 *count) \ 164 - { \ 165 - struct ia64_fpreg fr[6]; \ 166 - efi_status_t ret; \ 167 - \ 168 - ia64_save_scratch_fpregs(fr); \ 169 - ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 170 - __va(runtime->get_next_high_mono_count), adjust_arg(count)); \ 171 - ia64_load_scratch_fpregs(fr); \ 172 - return ret; \ 173 } 174 175 - #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 176 - static void \ 177 - prefix##_reset_system (int reset_type, efi_status_t status, \ 178 - unsigned long data_size, efi_char16_t *data) \ 179 - { \ 180 - struct ia64_fpreg fr[6]; \ 181 - efi_char16_t *adata = NULL; \ 182 - \ 183 - if (data) \ 184 - adata = adjust_arg(data); \ 185 - \ 186 - ia64_save_scratch_fpregs(fr); \ 187 - efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \ 188 - reset_type, status, data_size, adata); \ 189 - /* should not return, but just in case... */ \ 190 - ia64_load_scratch_fpregs(fr); \ 191 } 192 193 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) ··· 236 return; 237 } 238 239 - ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); 240 ts->tv_nsec = tm.nanosecond; 241 } 242 ··· 311 } 312 313 /* 314 - * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 315 - * has memory that is available for OS use. 316 */ 317 void 318 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ··· 321 } 322 323 /* 324 - * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that 325 - * has memory that is available for uncached allocator. 326 */ 327 void 328 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) ··· 331 } 332 333 /* 334 - * Look for the PAL_CODE region reported by EFI and maps it using an 335 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 336 * Abstraction Layer chapter 11 in ADAG 337 */ 338 - 339 void * 340 efi_get_pal_addr (void) 341 { ··· 354 continue; 355 356 if (++pal_code_count > 1) { 357 - printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n", 358 - md->phys_addr); 359 continue; 360 } 361 /* 362 - * The only ITLB entry in region 7 that is used is the one installed by 363 - * __start(). That entry covers a 64MB range. 364 */ 365 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 366 vaddr = PAGE_OFFSET + md->phys_addr; 367 368 /* 369 - * We must check that the PAL mapping won't overlap with the kernel 370 - * mapping. 371 * 372 - * PAL code is guaranteed to be aligned on a power of 2 between 4k and 373 - * 256KB and that only one ITR is needed to map it. This implies that the 374 - * PAL code is always aligned on its size, i.e., the closest matching page 375 - * size supported by the TLB. Therefore PAL code is guaranteed never to 376 - * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for 377 - * now the following test is enough to determine whether or not we need a 378 - * dedicated ITR for the PAL code. 379 */ 380 if ((vaddr & mask) == (KERNEL_START & mask)) { 381 - printk(KERN_INFO "%s: no need to install ITR for PAL code\n", 382 - __FUNCTION__); 383 continue; 384 } 385 386 if (efi_md_size(md) > IA64_GRANULE_SIZE) 387 - panic("Woah! PAL code size bigger than a granule!"); 388 389 #if EFI_DEBUG 390 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 391 392 - printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 393 - smp_processor_id(), md->phys_addr, 394 - md->phys_addr + efi_md_size(md), 395 - vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 396 #endif 397 return __va(md->phys_addr); 398 } ··· 416 * Cannot write to CRx with PSR.ic=1 417 */ 418 psr = ia64_clear_ic(); 419 - ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), 420 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 421 IA64_GRANULE_SHIFT); 422 ia64_set_psr(psr); /* restore psr */ 423 - ia64_srlz_i(); 424 } 425 426 void __init ··· 433 char *cp, vendor[100] = "unknown"; 434 int i; 435 436 - /* it's too early to be able to use the standard kernel command line support... */ 437 for (cp = boot_command_line; *cp; ) { 438 if (memcmp(cp, "mem=", 4) == 0) { 439 mem_limit = memparse(cp + 4, &cp); ··· 452 } 453 } 454 if (min_addr != 0UL) 455 - printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20); 456 if (max_addr != ~0UL) 457 - printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 458 459 efi.systab = __va(ia64_boot_param->efi_systab); 460 ··· 464 * Verify the EFI Table 465 */ 466 if (efi.systab == NULL) 467 - panic("Woah! Can't find EFI system table.\n"); 468 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 469 - panic("Woah! EFI system table signature incorrect\n"); 470 if ((efi.systab->hdr.revision >> 16) == 0) 471 printk(KERN_WARNING "Warning: EFI system table version " 472 "%d.%02d, expected 1.00 or greater\n", ··· 484 } 485 486 printk(KERN_INFO "EFI v%u.%.02u by %s:", 487 - efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); 488 489 efi.mps = EFI_INVALID_TABLE_ADDR; 490 efi.acpi = EFI_INVALID_TABLE_ADDR; ··· 540 efi_memory_desc_t *md; 541 void *p; 542 543 - for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { 544 md = p; 545 - printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n", 546 i, md->type, md->attribute, md->phys_addr, 547 md->phys_addr + efi_md_size(md), 548 md->num_pages >> (20 - EFI_PAGE_SHIFT)); ··· 573 md = p; 574 if (md->attribute & EFI_MEMORY_RUNTIME) { 575 /* 576 - * Some descriptors have multiple bits set, so the order of 577 - * the tests is relevant. 578 */ 579 if (md->attribute & EFI_MEMORY_WB) { 580 md->virt_addr = (u64) __va(md->phys_addr); ··· 582 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 583 } else if (md->attribute & EFI_MEMORY_WC) { 584 #if 0 585 - md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 586 - | _PAGE_D 587 - | _PAGE_MA_WC 588 - | _PAGE_PL_0 589 - | _PAGE_AR_RW)); 590 #else 591 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 592 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 593 #endif 594 } else if (md->attribute & EFI_MEMORY_WT) { 595 #if 0 596 - md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P 597 - | _PAGE_D | _PAGE_MA_WT 598 - | _PAGE_PL_0 599 - | _PAGE_AR_RW)); 600 #else 601 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 602 md->virt_addr = (u64) ioremap(md->phys_addr, 0); ··· 612 613 status = efi_call_phys(__va(runtime->set_virtual_address_map), 614 ia64_boot_param->efi_memmap_size, 615 - efi_desc_size, ia64_boot_param->efi_memdesc_version, 616 ia64_boot_param->efi_memmap); 617 if (status != EFI_SUCCESS) { 618 - printk(KERN_WARNING "warning: unable to switch EFI into virtual mode " 619 - "(status=%lu)\n", status); 620 return; 621 } 622 623 /* 624 - * Now that EFI is in virtual mode, we call the EFI functions more efficiently: 625 */ 626 efi.get_time = virt_get_time; 627 efi.set_time = virt_set_time; ··· 637 } 638 639 /* 640 - * Walk the EFI memory map looking for the I/O port range. There can only be one entry of 641 - * this type, other I/O port ranges should be described via ACPI. 642 */ 643 u64 644 efi_get_iobase (void) ··· 709 710 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 711 md = p; 712 - 713 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 714 return 1; 715 } ··· 761 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 762 return 0; 763 } while (md); 764 - return 0; 765 } 766 767 u64 ··· 797 if (!md || md->attribute != attr) 798 return 0; 799 } while (md); 800 - return 0; 801 } 802 EXPORT_SYMBOL(kern_mem_attribute); 803 ··· 913 return 1; 914 uart = 0; 915 } 916 - hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length); 917 } 918 printk(KERN_ERR "Malformed %s value\n", name); 919 return 0; ··· 951 if (!efi_wb(md)) { 952 continue; 953 } 954 - if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 955 contig_low = GRANULEROUNDUP(md->phys_addr); 956 contig_high = efi_md_end(md); 957 - for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 958 check_md = q; 959 if (!efi_wb(check_md)) 960 break; ··· 1020 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 1021 md = p; 1022 if (!efi_wb(md)) { 1023 - if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || 1024 - md->type == EFI_BOOT_SERVICES_DATA)) { 1025 k->attribute = EFI_MEMORY_UC; 1026 k->start = md->phys_addr; 1027 k->num_pages = md->num_pages; ··· 1030 } 1031 continue; 1032 } 1033 - if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { 1034 contig_low = GRANULEROUNDUP(md->phys_addr); 1035 contig_high = efi_md_end(md); 1036 - for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { 1037 check_md = q; 1038 if (!efi_wb(check_md)) 1039 break; ··· 1060 if (md->phys_addr < contig_low) { 1061 lim = min(efi_md_end(md), contig_low); 1062 if (efi_uc(md)) { 1063 - if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && 1064 kmd_end(k-1) == md->phys_addr) { 1065 - (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1066 } else { 1067 k->attribute = EFI_MEMORY_UC; 1068 k->start = md->phys_addr; 1069 - k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; 1070 k++; 1071 } 1072 } ··· 1088 } else { 1089 k->attribute = EFI_MEMORY_UC; 1090 k->start = lim; 1091 - k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; 1092 k++; 1093 } 1094 } ··· 1191 break; 1192 } 1193 1194 - if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { 1195 - printk(KERN_ERR "failed to alocate resource for iomem\n"); 1196 return; 1197 } 1198 ··· 1229 rsvd_regions are sorted 1230 */ 1231 unsigned long __init 1232 - kdump_find_rsvd_region (unsigned long size, 1233 - struct rsvd_region *r, int n) 1234 { 1235 - int i; 1236 - u64 start, end; 1237 - u64 alignment = 1UL << _PAGE_SIZE_64M; 1238 - void *efi_map_start, *efi_map_end, *p; 1239 - efi_memory_desc_t *md; 1240 - u64 efi_desc_size; 1241 1242 - efi_map_start = __va(ia64_boot_param->efi_memmap); 1243 - efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1244 - efi_desc_size = ia64_boot_param->efi_memdesc_size; 1245 1246 - for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1247 - md = p; 1248 - if (!efi_wb(md)) 1249 - continue; 1250 - start = ALIGN(md->phys_addr, alignment); 1251 - end = efi_md_end(md); 1252 - for (i = 0; i < n; i++) { 1253 - if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1254 - if (__pa(r[i].start) > start + size) 1255 - return start; 1256 - start = ALIGN(__pa(r[i].end), alignment); 1257 - if (i < n-1 && __pa(r[i+1].start) < start + size) 1258 - continue; 1259 - else 1260 - break; 1261 } 1262 - } 1263 - if (end > start + size) 1264 - return start; 1265 - } 1266 1267 - printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", 1268 - size); 1269 - return ~0UL; 1270 } 1271 #endif 1272
··· 1 /* 2 * Extensible Firmware Interface 3 * 4 + * Based on Extensible Firmware Interface Specification version 0.9 5 + * April 30, 1999 6 * 7 * Copyright (C) 1999 VA Linux Systems 8 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> ··· 48 49 #define efi_call_virt(f, args...) (*(f))(args) 50 51 + #define STUB_GET_TIME(prefix, adjust_arg) \ 52 + static efi_status_t \ 53 + prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ 54 + { \ 55 + struct ia64_fpreg fr[6]; \ 56 + efi_time_cap_t *atc = NULL; \ 57 + efi_status_t ret; \ 58 + \ 59 + if (tc) \ 60 + atc = adjust_arg(tc); \ 61 + ia64_save_scratch_fpregs(fr); \ 62 + ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ 63 + adjust_arg(tm), atc); \ 64 + ia64_load_scratch_fpregs(fr); \ 65 + return ret; \ 66 } 67 68 + #define STUB_SET_TIME(prefix, adjust_arg) \ 69 + static efi_status_t \ 70 + prefix##_set_time (efi_time_t *tm) \ 71 + { \ 72 + struct ia64_fpreg fr[6]; \ 73 + efi_status_t ret; \ 74 + \ 75 + ia64_save_scratch_fpregs(fr); \ 76 + ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ 77 + adjust_arg(tm)); \ 78 + ia64_load_scratch_fpregs(fr); \ 79 + return ret; \ 80 } 81 82 + #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ 83 + static efi_status_t \ 84 + prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ 85 + efi_time_t *tm) \ 86 + { \ 87 + struct ia64_fpreg fr[6]; \ 88 + efi_status_t ret; \ 89 + \ 90 + ia64_save_scratch_fpregs(fr); \ 91 + ret = efi_call_##prefix( \ 92 + (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ 93 + adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ 94 + ia64_load_scratch_fpregs(fr); \ 95 + return ret; \ 96 } 97 98 + #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ 99 + static efi_status_t \ 100 + prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ 101 + { \ 102 + struct ia64_fpreg fr[6]; \ 103 + efi_time_t *atm = NULL; \ 104 + efi_status_t ret; \ 105 + \ 106 + if (tm) \ 107 + atm = adjust_arg(tm); \ 108 + ia64_save_scratch_fpregs(fr); \ 109 + ret = efi_call_##prefix( \ 110 + (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ 111 + enabled, atm); \ 112 + ia64_load_scratch_fpregs(fr); \ 113 + return ret; \ 114 } 115 116 + #define STUB_GET_VARIABLE(prefix, adjust_arg) \ 117 + static efi_status_t \ 118 + prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ 119 + unsigned long *data_size, void *data) \ 120 + { \ 121 + struct ia64_fpreg fr[6]; \ 122 + u32 *aattr = NULL; \ 123 + efi_status_t ret; \ 124 + \ 125 + if (attr) \ 126 + aattr = adjust_arg(attr); \ 127 + ia64_save_scratch_fpregs(fr); \ 128 + ret = efi_call_##prefix( \ 129 + (efi_get_variable_t *) __va(runtime->get_variable), \ 130 + adjust_arg(name), adjust_arg(vendor), aattr, \ 131 + adjust_arg(data_size), adjust_arg(data)); \ 132 + ia64_load_scratch_fpregs(fr); \ 133 + return ret; \ 134 } 135 136 + #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ 137 + static efi_status_t \ 138 + prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ 139 + efi_guid_t *vendor) \ 140 + { \ 141 + struct ia64_fpreg fr[6]; \ 142 + efi_status_t ret; \ 143 + \ 144 + ia64_save_scratch_fpregs(fr); \ 145 + ret = efi_call_##prefix( \ 146 + (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ 147 + adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ 148 + ia64_load_scratch_fpregs(fr); \ 149 + return ret; \ 150 } 151 152 + #define STUB_SET_VARIABLE(prefix, adjust_arg) \ 153 + static efi_status_t \ 154 + prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ 155 + unsigned long attr, unsigned long data_size, \ 156 + void *data) \ 157 + { \ 158 + struct ia64_fpreg fr[6]; \ 159 + efi_status_t ret; \ 160 + \ 161 + ia64_save_scratch_fpregs(fr); \ 162 + ret = efi_call_##prefix( \ 163 + (efi_set_variable_t *) __va(runtime->set_variable), \ 164 + adjust_arg(name), adjust_arg(vendor), attr, data_size, \ 165 + adjust_arg(data)); \ 166 + ia64_load_scratch_fpregs(fr); \ 167 + return ret; \ 168 } 169 170 + #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ 171 + static efi_status_t \ 172 + prefix##_get_next_high_mono_count (u32 *count) \ 173 + { \ 174 + struct ia64_fpreg fr[6]; \ 175 + efi_status_t ret; \ 176 + \ 177 + ia64_save_scratch_fpregs(fr); \ 178 + ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ 179 + __va(runtime->get_next_high_mono_count), \ 180 + adjust_arg(count)); \ 181 + ia64_load_scratch_fpregs(fr); \ 182 + return ret; \ 183 } 184 185 + #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ 186 + static void \ 187 + prefix##_reset_system (int reset_type, efi_status_t status, \ 188 + unsigned long data_size, efi_char16_t *data) \ 189 + { \ 190 + struct ia64_fpreg fr[6]; \ 191 + efi_char16_t *adata = NULL; \ 192 + \ 193 + if (data) \ 194 + adata = adjust_arg(data); \ 195 + \ 196 + ia64_save_scratch_fpregs(fr); \ 197 + efi_call_##prefix( \ 198 + (efi_reset_system_t *) __va(runtime->reset_system), \ 199 + reset_type, status, data_size, adata); \ 200 + /* should not return, but just in case... */ \ 201 + ia64_load_scratch_fpregs(fr); \ 202 } 203 204 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) ··· 223 return; 224 } 225 226 + ts->tv_sec = mktime(tm.year, tm.month, tm.day, 227 + tm.hour, tm.minute, tm.second); 228 ts->tv_nsec = tm.nanosecond; 229 } 230 ··· 297 } 298 299 /* 300 + * Walk the EFI memory map and call CALLBACK once for each EFI memory 301 + * descriptor that has memory that is available for OS use. 302 */ 303 void 304 efi_memmap_walk (efi_freemem_callback_t callback, void *arg) ··· 307 } 308 309 /* 310 + * Walk the EFI memory map and call CALLBACK once for each EFI memory 311 + * descriptor that has memory that is available for uncached allocator. 312 */ 313 void 314 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) ··· 317 } 318 319 /* 320 + * Look for the PAL_CODE region reported by EFI and map it using an 321 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor 322 * Abstraction Layer chapter 11 in ADAG 323 */ 324 void * 325 efi_get_pal_addr (void) 326 { ··· 341 continue; 342 343 if (++pal_code_count > 1) { 344 + printk(KERN_ERR "Too many EFI Pal Code memory ranges, " 345 + "dropped @ %lx\n", md->phys_addr); 346 continue; 347 } 348 /* 349 + * The only ITLB entry in region 7 that is used is the one 350 + * installed by __start(). That entry covers a 64MB range. 351 */ 352 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); 353 vaddr = PAGE_OFFSET + md->phys_addr; 354 355 /* 356 + * We must check that the PAL mapping won't overlap with the 357 + * kernel mapping. 358 * 359 + * PAL code is guaranteed to be aligned on a power of 2 between 360 + * 4k and 256KB and that only one ITR is needed to map it. This 361 + * implies that the PAL code is always aligned on its size, 362 + * i.e., the closest matching page size supported by the TLB. 363 + * Therefore PAL code is guaranteed never to cross a 64MB unless 364 + * it is bigger than 64MB (very unlikely!). So for now the 365 + * following test is enough to determine whether or not we need 366 + * a dedicated ITR for the PAL code. 367 */ 368 if ((vaddr & mask) == (KERNEL_START & mask)) { 369 + printk(KERN_INFO "%s: no need to install ITR for " 370 + "PAL code\n", __FUNCTION__); 371 continue; 372 } 373 374 if (efi_md_size(md) > IA64_GRANULE_SIZE) 375 + panic("Whoa! PAL code size bigger than a granule!"); 376 377 #if EFI_DEBUG 378 mask = ~((1 << IA64_GRANULE_SHIFT) - 1); 379 380 + printk(KERN_INFO "CPU %d: mapping PAL code " 381 + "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", 382 + smp_processor_id(), md->phys_addr, 383 + md->phys_addr + efi_md_size(md), 384 + vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); 385 #endif 386 return __va(md->phys_addr); 387 } ··· 401 * Cannot write to CRx with PSR.ic=1 402 */ 403 psr = ia64_clear_ic(); 404 + ia64_itr(0x1, IA64_TR_PALCODE, 405 + GRANULEROUNDDOWN((unsigned long) pal_vaddr), 406 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 407 IA64_GRANULE_SHIFT); 408 ia64_set_psr(psr); /* restore psr */ 409 } 410 411 void __init ··· 418 char *cp, vendor[100] = "unknown"; 419 int i; 420 421 + /* 422 + * It's too early to be able to use the standard kernel command line 423 + * support... 424 + */ 425 for (cp = boot_command_line; *cp; ) { 426 if (memcmp(cp, "mem=", 4) == 0) { 427 mem_limit = memparse(cp + 4, &cp); ··· 434 } 435 } 436 if (min_addr != 0UL) 437 + printk(KERN_INFO "Ignoring memory below %luMB\n", 438 + min_addr >> 20); 439 if (max_addr != ~0UL) 440 + printk(KERN_INFO "Ignoring memory above %luMB\n", 441 + max_addr >> 20); 442 443 efi.systab = __va(ia64_boot_param->efi_systab); 444 ··· 444 * Verify the EFI Table 445 */ 446 if (efi.systab == NULL) 447 + panic("Whoa! Can't find EFI system table.\n"); 448 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 449 + panic("Whoa! EFI system table signature incorrect\n"); 450 if ((efi.systab->hdr.revision >> 16) == 0) 451 printk(KERN_WARNING "Warning: EFI system table version " 452 "%d.%02d, expected 1.00 or greater\n", ··· 464 } 465 466 printk(KERN_INFO "EFI v%u.%.02u by %s:", 467 + efi.systab->hdr.revision >> 16, 468 + efi.systab->hdr.revision & 0xffff, vendor); 469 470 efi.mps = EFI_INVALID_TABLE_ADDR; 471 efi.acpi = EFI_INVALID_TABLE_ADDR; ··· 519 efi_memory_desc_t *md; 520 void *p; 521 522 + for (i = 0, p = efi_map_start; p < efi_map_end; 523 + ++i, p += efi_desc_size) 524 + { 525 md = p; 526 + printk("mem%02u: type=%u, attr=0x%lx, " 527 + "range=[0x%016lx-0x%016lx) (%luMB)\n", 528 i, md->type, md->attribute, md->phys_addr, 529 md->phys_addr + efi_md_size(md), 530 md->num_pages >> (20 - EFI_PAGE_SHIFT)); ··· 549 md = p; 550 if (md->attribute & EFI_MEMORY_RUNTIME) { 551 /* 552 + * Some descriptors have multiple bits set, so the 553 + * order of the tests is relevant. 554 */ 555 if (md->attribute & EFI_MEMORY_WB) { 556 md->virt_addr = (u64) __va(md->phys_addr); ··· 558 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 559 } else if (md->attribute & EFI_MEMORY_WC) { 560 #if 0 561 + md->virt_addr = ia64_remap(md->phys_addr, 562 + (_PAGE_A | 563 + _PAGE_P | 564 + _PAGE_D | 565 + _PAGE_MA_WC | 566 + _PAGE_PL_0 | 567 + _PAGE_AR_RW)); 568 #else 569 printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); 570 md->virt_addr = (u64) ioremap(md->phys_addr, 0); 571 #endif 572 } else if (md->attribute & EFI_MEMORY_WT) { 573 #if 0 574 + md->virt_addr = ia64_remap(md->phys_addr, 575 + (_PAGE_A | 576 + _PAGE_P | 577 + _PAGE_D | 578 + _PAGE_MA_WT | 579 + _PAGE_PL_0 | 580 + _PAGE_AR_RW)); 581 #else 582 printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); 583 md->virt_addr = (u64) ioremap(md->phys_addr, 0); ··· 583 584 status = efi_call_phys(__va(runtime->set_virtual_address_map), 585 ia64_boot_param->efi_memmap_size, 586 + efi_desc_size, 587 + ia64_boot_param->efi_memdesc_version, 588 ia64_boot_param->efi_memmap); 589 if (status != EFI_SUCCESS) { 590 + printk(KERN_WARNING "warning: unable to switch EFI into " 591 + "virtual mode (status=%lu)\n", status); 592 return; 593 } 594 595 /* 596 + * Now that EFI is in virtual mode, we call the EFI functions more 597 + * efficiently: 598 */ 599 efi.get_time = virt_get_time; 600 efi.set_time = virt_set_time; ··· 606 } 607 608 /* 609 + * Walk the EFI memory map looking for the I/O port range. There can only be 610 + * one entry of this type, other I/O port ranges should be described via ACPI. 611 */ 612 u64 613 efi_get_iobase (void) ··· 678 679 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 680 md = p; 681 if (md->phys_addr < end && efi_md_end(md) > phys_addr) 682 return 1; 683 } ··· 731 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) 732 return 0; 733 } while (md); 734 + return 0; /* never reached */ 735 } 736 737 u64 ··· 767 if (!md || md->attribute != attr) 768 return 0; 769 } while (md); 770 + return 0; /* never reached */ 771 } 772 EXPORT_SYMBOL(kern_mem_attribute); 773 ··· 883 return 1; 884 uart = 0; 885 } 886 + hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); 887 } 888 printk(KERN_ERR "Malformed %s value\n", name); 889 return 0; ··· 921 if (!efi_wb(md)) { 922 continue; 923 } 924 + if (pmd == NULL || !efi_wb(pmd) || 925 + efi_md_end(pmd) != md->phys_addr) { 926 contig_low = GRANULEROUNDUP(md->phys_addr); 927 contig_high = efi_md_end(md); 928 + for (q = p + efi_desc_size; q < efi_map_end; 929 + q += efi_desc_size) { 930 check_md = q; 931 if (!efi_wb(check_md)) 932 break; ··· 988 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { 989 md = p; 990 if (!efi_wb(md)) { 991 + if (efi_uc(md) && 992 + (md->type == EFI_CONVENTIONAL_MEMORY || 993 + md->type == EFI_BOOT_SERVICES_DATA)) { 994 k->attribute = EFI_MEMORY_UC; 995 k->start = md->phys_addr; 996 k->num_pages = md->num_pages; ··· 997 } 998 continue; 999 } 1000 + if (pmd == NULL || !efi_wb(pmd) || 1001 + efi_md_end(pmd) != md->phys_addr) { 1002 contig_low = GRANULEROUNDUP(md->phys_addr); 1003 contig_high = efi_md_end(md); 1004 + for (q = p + efi_desc_size; q < efi_map_end; 1005 + q += efi_desc_size) { 1006 check_md = q; 1007 if (!efi_wb(check_md)) 1008 break; ··· 1025 if (md->phys_addr < contig_low) { 1026 lim = min(efi_md_end(md), contig_low); 1027 if (efi_uc(md)) { 1028 + if (k > kern_memmap && 1029 + (k-1)->attribute == EFI_MEMORY_UC && 1030 kmd_end(k-1) == md->phys_addr) { 1031 + (k-1)->num_pages += 1032 + (lim - md->phys_addr) 1033 + >> EFI_PAGE_SHIFT; 1034 } else { 1035 k->attribute = EFI_MEMORY_UC; 1036 k->start = md->phys_addr; 1037 + k->num_pages = (lim - md->phys_addr) 1038 + >> EFI_PAGE_SHIFT; 1039 k++; 1040 } 1041 } ··· 1049 } else { 1050 k->attribute = EFI_MEMORY_UC; 1051 k->start = lim; 1052 + k->num_pages = (efi_md_end(md) - lim) 1053 + >> EFI_PAGE_SHIFT; 1054 k++; 1055 } 1056 } ··· 1151 break; 1152 } 1153 1154 + if ((res = kzalloc(sizeof(struct resource), 1155 + GFP_KERNEL)) == NULL) { 1156 + printk(KERN_ERR 1157 + "failed to allocate resource for iomem\n"); 1158 return; 1159 } 1160 ··· 1187 rsvd_regions are sorted 1188 */ 1189 unsigned long __init 1190 + kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) 1191 { 1192 + int i; 1193 + u64 start, end; 1194 + u64 alignment = 1UL << _PAGE_SIZE_64M; 1195 + void *efi_map_start, *efi_map_end, *p; 1196 + efi_memory_desc_t *md; 1197 + u64 efi_desc_size; 1198 1199 + efi_map_start = __va(ia64_boot_param->efi_memmap); 1200 + efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1201 + efi_desc_size = ia64_boot_param->efi_memdesc_size; 1202 1203 + for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1204 + md = p; 1205 + if (!efi_wb(md)) 1206 + continue; 1207 + start = ALIGN(md->phys_addr, alignment); 1208 + end = efi_md_end(md); 1209 + for (i = 0; i < n; i++) { 1210 + if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1211 + if (__pa(r[i].start) > start + size) 1212 + return start; 1213 + start = ALIGN(__pa(r[i].end), alignment); 1214 + if (i < n-1 && 1215 + __pa(r[i+1].start) < start + size) 1216 + continue; 1217 + else 1218 + break; 1219 + } 1220 } 1221 + if (end > start + size) 1222 + return start; 1223 + } 1224 1225 + printk(KERN_WARNING 1226 + "Cannot reserve 0x%lx byte of memory for crashdump\n", size); 1227 + return ~0UL; 1228 } 1229 #endif 1230
+2 -2
arch/ia64/kernel/fsyscall_gtod_data.h
··· 14 u32 clk_shift; 15 void *clk_fsys_mmio; 16 cycle_t clk_cycle_last; 17 - } __attribute__ ((aligned (L1_CACHE_BYTES))); 18 19 struct itc_jitter_data_t { 20 int itc_jitter; 21 cycle_t itc_lastcycle; 22 - } __attribute__ ((aligned (L1_CACHE_BYTES))); 23
··· 14 u32 clk_shift; 15 void *clk_fsys_mmio; 16 cycle_t clk_cycle_last; 17 + } ____cacheline_aligned; 18 19 struct itc_jitter_data_t { 20 int itc_jitter; 21 cycle_t itc_lastcycle; 22 + } ____cacheline_aligned; 23
+3
arch/ia64/kernel/ia64_ksyms.c
··· 12 EXPORT_SYMBOL(memcpy); 13 EXPORT_SYMBOL(strlen); 14 15 #include <asm/checksum.h> 16 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17 EXPORT_SYMBOL(csum_ipv6_magic);
··· 12 EXPORT_SYMBOL(memcpy); 13 EXPORT_SYMBOL(strlen); 14 15 + #include<asm/pgtable.h> 16 + EXPORT_SYMBOL_GPL(empty_zero_page); 17 + 18 #include <asm/checksum.h> 19 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 20 EXPORT_SYMBOL(csum_ipv6_magic);
+4 -3
arch/ia64/kernel/kprobes.c
··· 381 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 382 { 383 unsigned int i; 384 - i = atomic_sub_return(1, &kcb->prev_kprobe_index); 385 - __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp; 386 - kcb->kprobe_status = kcb->prev_kprobe[i].status; 387 } 388 389 static void __kprobes set_current_kprobe(struct kprobe *p,
··· 381 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 382 { 383 unsigned int i; 384 + i = atomic_read(&kcb->prev_kprobe_index); 385 + __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; 386 + kcb->kprobe_status = kcb->prev_kprobe[i-1].status; 387 + atomic_sub(1, &kcb->prev_kprobe_index); 388 } 389 390 static void __kprobes set_current_kprobe(struct kprobe *p,
+37 -29
arch/ia64/kernel/mca.c
··· 2 * File: mca.c 3 * Purpose: Generic MCA handling layer 4 * 5 - * Updated for latest kernel 6 * Copyright (C) 2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * 9 * Copyright (C) 2002 Dell Inc. 10 - * Copyright (C) Matt Domsch (Matt_Domsch@dell.com) 11 * 12 * Copyright (C) 2002 Intel 13 - * Copyright (C) Jenna Hall (jenna.s.hall@intel.com) 14 * 15 * Copyright (C) 2001 Intel 16 - * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com) 17 * 18 * Copyright (C) 2000 Intel 19 - * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com) 20 * 21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 22 - * Copyright (C) Vijay Chander(vijay@engr.sgi.com) 23 * 24 - * 03/04/15 D. Mosberger Added INIT backtrace support. 25 - * 02/03/25 M. Domsch GUID cleanups 26 * 27 - * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU 28 - * error flag, set SAL default return values, changed 29 - * error record structure to linked list, added init call 30 - * to sal_get_state_info_size(). 31 * 32 - * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected 33 - * platform errors, completed code for logging of 34 - * corrected & uncorrected machine check errors, and 35 - * updated for conformance with Nov. 2000 revision of the 36 - * SAL 3.0 spec. 37 - * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, 38 - * added min save state dump, added INIT handler. 39 * 40 * 2003-12-08 Keith Owens <kaos@sgi.com> 41 - * smp_call_function() must not be called from interrupt context (can 42 - * deadlock on tasklist_lock). Use keventd to call smp_call_function(). 43 * 44 * 2004-02-01 Keith Owens <kaos@sgi.com> 45 - * Avoid deadlock when using printk() for MCA and INIT records. 46 - * Delete all record printing code, moved to salinfo_decode in user space. 47 - * Mark variables and functions static where possible. 48 - * Delete dead variables and functions. 49 - * Reorder to remove the need for forward declarations and to consolidate 50 - * related code. 51 * 52 * 2005-08-12 Keith Owens <kaos@sgi.com> 53 - * Convert MCA/INIT handlers to use per event stacks and SAL/OS state. 54 * 55 * 2005-10-07 Keith Owens <kaos@sgi.com> 56 * Add notify_die() hooks. 57 * 58 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 59 - * Add printing support for MCA/INIT. 60 * 61 * 2007-04-27 Russ Anderson <rja@sgi.com> 62 * Support multiple cpus going through OS_MCA in the same event.
··· 2 * File: mca.c 3 * Purpose: Generic MCA handling layer 4 * 5 * Copyright (C) 2003 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * 8 * Copyright (C) 2002 Dell Inc. 9 + * Copyright (C) Matt Domsch <Matt_Domsch@dell.com> 10 * 11 * Copyright (C) 2002 Intel 12 + * Copyright (C) Jenna Hall <jenna.s.hall@intel.com> 13 * 14 * Copyright (C) 2001 Intel 15 + * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com> 16 * 17 * Copyright (C) 2000 Intel 18 + * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com> 19 * 20 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 21 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 22 * 23 + * Copyright (C) 2006 FUJITSU LIMITED 24 + * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 25 * 26 + * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> 27 + * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, 28 + * added min save state dump, added INIT handler. 29 * 30 + * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com> 31 + * Added setup of CMCI and CPEI IRQs, logging of corrected platform 32 + * errors, completed code for logging of corrected & uncorrected 33 + * machine check errors, and updated for conformance with Nov. 2000 34 + * revision of the SAL 3.0 spec. 35 + * 36 + * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com> 37 + * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag, 38 + * set SAL default return values, changed error record structure to 39 + * linked list, added init call to sal_get_state_info_size(). 40 + * 41 + * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com> 42 + * GUID cleanups. 43 + * 44 + * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com> 45 + * Added INIT backtrace support. 46 * 47 * 2003-12-08 Keith Owens <kaos@sgi.com> 48 + * smp_call_function() must not be called from interrupt context 49 + * (can deadlock on tasklist_lock). 50 + * Use keventd to call smp_call_function(). 51 * 52 * 2004-02-01 Keith Owens <kaos@sgi.com> 53 + * Avoid deadlock when using printk() for MCA and INIT records. 54 + * Delete all record printing code, moved to salinfo_decode in user 55 + * space. Mark variables and functions static where possible. 56 + * Delete dead variables and functions. Reorder to remove the need 57 + * for forward declarations and to consolidate related code. 58 * 59 * 2005-08-12 Keith Owens <kaos@sgi.com> 60 + * Convert MCA/INIT handlers to use per event stacks and SAL/OS 61 + * state. 62 * 63 * 2005-10-07 Keith Owens <kaos@sgi.com> 64 * Add notify_die() hooks. 65 * 66 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 67 + * Add printing support for MCA/INIT. 68 * 69 * 2007-04-27 Russ Anderson <rja@sgi.com> 70 * Support multiple cpus going through OS_MCA in the same event.
+25 -21
arch/ia64/kernel/mca_asm.S
··· 1 - // 2 - // assembly portion of the IA64 MCA handling 3 - // 4 - // Mods by cfleck to integrate into kernel build 5 - // 00/03/15 davidm Added various stop bits to get a clean compile 6 - // 7 - // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp 8 - // kstack, switch modes, jump to C INIT handler 9 - // 10 - // 02/01/04 J.Hall <jenna.s.hall@intel.com> 11 - // Before entering virtual mode code: 12 - // 1. Check for TLB CPU error 13 - // 2. Restore current thread pointer to kr6 14 - // 3. Move stack ptr 16 bytes to conform to C calling convention 15 - // 16 - // 04/11/12 Russ Anderson <rja@sgi.com> 17 - // Added per cpu MCA/INIT stack save areas. 18 - // 19 - // 12/08/05 Keith Owens <kaos@sgi.com> 20 - // Use per cpu MCA/INIT stacks for all data. 21 - // 22 #include <linux/threads.h> 23 24 #include <asm/asmmacro.h>
··· 1 + /* 2 + * File: mca_asm.S 3 + * Purpose: assembly portion of the IA64 MCA handling 4 + * 5 + * Mods by cfleck to integrate into kernel build 6 + * 7 + * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com> 8 + * Added various stop bits to get a clean compile 9 + * 10 + * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com> 11 + * Added code to save INIT handoff state in pt_regs format, 12 + * switch to temp kstack, switch modes, jump to C INIT handler 13 + * 14 + * 2002-01-04 J.Hall <jenna.s.hall@intel.com> 15 + * Before entering virtual mode code: 16 + * 1. Check for TLB CPU error 17 + * 2. Restore current thread pointer to kr6 18 + * 3. Move stack ptr 16 bytes to conform to C calling convention 19 + * 20 + * 2004-11-12 Russ Anderson <rja@sgi.com> 21 + * Added per cpu MCA/INIT stack save areas. 22 + * 23 + * 2005-12-08 Keith Owens <kaos@sgi.com> 24 + * Use per cpu MCA/INIT stacks for all data. 25 + */ 26 #include <linux/threads.h> 27 28 #include <asm/asmmacro.h>
+1 -1
arch/ia64/kernel/mca_drv.c
··· 3 * Purpose: Generic MCA handling layer 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 - * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 7 * Copyright (C) 2005 Silicon Graphics, Inc 8 * Copyright (C) 2005 Keith Owens <kaos@sgi.com> 9 * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
··· 3 * Purpose: Generic MCA handling layer 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 + * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 7 * Copyright (C) 2005 Silicon Graphics, Inc 8 * Copyright (C) 2005 Keith Owens <kaos@sgi.com> 9 * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
+1 -1
arch/ia64/kernel/mca_drv.h
··· 3 * Purpose: Define helpers for Generic MCA handling 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 - * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 7 */ 8 /* 9 * Processor error section:
··· 3 * Purpose: Define helpers for Generic MCA handling 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 + * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 7 */ 8 /* 9 * Processor error section:
+1 -1
arch/ia64/kernel/mca_drv_asm.S
··· 3 * Purpose: Assembly portion of Generic MCA handling 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 - * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) 7 */ 8 #include <linux/threads.h> 9
··· 3 * Purpose: Assembly portion of Generic MCA handling 4 * 5 * Copyright (C) 2004 FUJITSU LIMITED 6 + * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> 7 */ 8 #include <linux/threads.h> 9
+3 -3
arch/ia64/kernel/perfmon.c
··· 2654 /* XXX: need to add more checks here */ 2655 if (pid < 2) return -EPERM; 2656 2657 - if (pid != current->pid) { 2658 2659 read_lock(&tasklist_lock); 2660 2661 - p = find_task_by_pid(pid); 2662 2663 /* make sure task cannot go away while we operate on it */ 2664 if (p) get_task_struct(p); ··· 5795 return 0; 5796 } 5797 5798 - struct seq_operations pfm_seq_ops = { 5799 .start = pfm_proc_start, 5800 .next = pfm_proc_next, 5801 .stop = pfm_proc_stop,
··· 2654 /* XXX: need to add more checks here */ 2655 if (pid < 2) return -EPERM; 2656 2657 + if (pid != task_pid_vnr(current)) { 2658 2659 read_lock(&tasklist_lock); 2660 2661 + p = find_task_by_vpid(pid); 2662 2663 /* make sure task cannot go away while we operate on it */ 2664 if (p) get_task_struct(p); ··· 5795 return 0; 5796 } 5797 5798 + const struct seq_operations pfm_seq_ops = { 5799 .start = pfm_proc_start, 5800 .next = pfm_proc_next, 5801 .stop = pfm_proc_stop,
+14
arch/ia64/kernel/sal.c
··· 284 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); 285 return isrv.status; 286 } 287 288 void __init 289 ia64_sal_init (struct ia64_sal_systab *systab) ··· 373 return 0; 374 } 375 EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
··· 284 SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); 285 return isrv.status; 286 } 287 + EXPORT_SYMBOL_GPL(ia64_sal_cache_flush); 288 289 void __init 290 ia64_sal_init (struct ia64_sal_systab *systab) ··· 372 return 0; 373 } 374 EXPORT_SYMBOL(ia64_sal_oemcall_reentrant); 375 + 376 + long 377 + ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, 378 + unsigned long *drift_info) 379 + { 380 + struct ia64_sal_retval isrv; 381 + 382 + SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); 383 + *ticks_per_second = isrv.v0; 384 + *drift_info = isrv.v1; 385 + return isrv.status; 386 + } 387 + EXPORT_SYMBOL_GPL(ia64_sal_freq_base);
+1 -1
arch/ia64/kernel/setup.c
··· 654 { 655 } 656 657 - struct seq_operations cpuinfo_op = { 658 .start = c_start, 659 .next = c_next, 660 .stop = c_stop,
··· 654 { 655 } 656 657 + const struct seq_operations cpuinfo_op = { 658 .start = c_start, 659 .next = c_next, 660 .stop = c_stop,
-11
arch/ia64/kernel/smpboot.c
··· 767 } 768 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 769 } 770 - #else /* !CONFIG_HOTPLUG_CPU */ 771 - int __cpu_disable(void) 772 - { 773 - return -ENOSYS; 774 - } 775 - 776 - void __cpu_die(unsigned int cpu) 777 - { 778 - /* We said "no" in __cpu_disable */ 779 - BUG(); 780 - } 781 #endif /* CONFIG_HOTPLUG_CPU */ 782 783 void
··· 767 } 768 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 769 } 770 #endif /* CONFIG_HOTPLUG_CPU */ 771 772 void
+23 -12
arch/ia64/kernel/traps.c
··· 35 fpswa_interface = __va(ia64_boot_param->fpswa); 36 } 37 38 - void 39 die (const char *str, struct pt_regs *regs, long err) 40 { 41 static struct { ··· 62 if (++die.lock_owner_depth < 3) { 63 printk("%s[%d]: %s %ld [%d]\n", 64 current->comm, task_pid_nr(current), str, err, ++die_counter); 65 - (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); 66 - show_regs(regs); 67 } else 68 printk(KERN_ERR "Recursive die() failure, output suppressed\n"); 69 ··· 75 add_taint(TAINT_DIE); 76 spin_unlock_irq(&die.lock); 77 78 if (panic_on_oops) 79 panic("Fatal exception"); 80 81 do_exit(SIGSEGV); 82 } 83 84 - void 85 die_if_kernel (char *str, struct pt_regs *regs, long err) 86 { 87 if (!user_mode(regs)) 88 - die(str, regs, err); 89 } 90 91 void ··· 110 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) 111 == NOTIFY_STOP) 112 return; 113 - die_if_kernel("bugcheck!", regs, break_num); 114 sig = SIGILL; code = ILL_ILLOPC; 115 break; 116 ··· 164 break; 165 166 default: 167 - if (break_num < 0x40000 || break_num > 0x100000) 168 - die_if_kernel("Bad break", regs, break_num); 169 170 if (break_num < 0x80000) { 171 sig = SIGILL; code = __ILL_BREAK; ··· 412 #endif 413 414 sprintf(buf, "IA-64 Illegal operation fault"); 415 - die_if_kernel(buf, &regs, 0); 416 417 memset(&si, 0, sizeof(si)); 418 si.si_signo = SIGILL; 419 si.si_code = ILL_ILLOPC; 420 si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri); 421 force_sig_info(SIGILL, &si, current); 422 - rv.fkt = 0; 423 return rv; 424 } 425 ··· 655 sprintf(buf, "Fault %lu", vector); 656 break; 657 } 658 - die_if_kernel(buf, &regs, error); 659 - force_sig(SIGILL, current); 660 }
··· 35 fpswa_interface = __va(ia64_boot_param->fpswa); 36 } 37 38 + int 39 die (const char *str, struct pt_regs *regs, long err) 40 { 41 static struct { ··· 62 if (++die.lock_owner_depth < 3) { 63 printk("%s[%d]: %s %ld [%d]\n", 64 current->comm, task_pid_nr(current), str, err, ++die_counter); 65 + if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) 66 + != NOTIFY_STOP) 67 + show_regs(regs); 68 + else 69 + regs = NULL; 70 } else 71 printk(KERN_ERR "Recursive die() failure, output suppressed\n"); 72 ··· 72 add_taint(TAINT_DIE); 73 spin_unlock_irq(&die.lock); 74 75 + if (!regs) 76 + return 1; 77 + 78 if (panic_on_oops) 79 panic("Fatal exception"); 80 81 do_exit(SIGSEGV); 82 + return 0; 83 } 84 85 + int 86 die_if_kernel (char *str, struct pt_regs *regs, long err) 87 { 88 if (!user_mode(regs)) 89 + return die(str, regs, err); 90 + return 0; 91 } 92 93 void ··· 102 if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) 103 == NOTIFY_STOP) 104 return; 105 + if (die_if_kernel("bugcheck!", regs, break_num)) 106 + return; 107 sig = SIGILL; code = ILL_ILLOPC; 108 break; 109 ··· 155 break; 156 157 default: 158 + if ((break_num < 0x40000 || break_num > 0x100000) 159 + && die_if_kernel("Bad break", regs, break_num)) 160 + return; 161 162 if (break_num < 0x80000) { 163 sig = SIGILL; code = __ILL_BREAK; ··· 402 #endif 403 404 sprintf(buf, "IA-64 Illegal operation fault"); 405 + rv.fkt = 0; 406 + if (die_if_kernel(buf, &regs, 0)) 407 + return rv; 408 409 memset(&si, 0, sizeof(si)); 410 si.si_signo = SIGILL; 411 si.si_code = ILL_ILLOPC; 412 si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri); 413 force_sig_info(SIGILL, &si, current); 414 return rv; 415 } 416 ··· 644 sprintf(buf, "Fault %lu", vector); 645 break; 646 } 647 + if (!die_if_kernel(buf, &regs, error)) 648 + force_sig(SIGILL, current); 649 }
+8 -5
arch/ia64/kernel/unaligned.c
··· 23 #include <asm/uaccess.h> 24 #include <asm/unaligned.h> 25 26 - extern void die_if_kernel(char *str, struct pt_regs *regs, long err); 27 28 #undef DEBUG_UNALIGNED_TRAP 29 ··· 675 */ 676 if (ld.x6_op == 1 || ld.x6_op == 3) { 677 printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); 678 - die_if_kernel("unaligned reference on speculative load with register update\n", 679 - regs, 30); 680 } 681 682 ··· 1318 1319 if (ia64_psr(regs)->be) { 1320 /* we don't support big-endian accesses */ 1321 - die_if_kernel("big-endian unaligned accesses are not supported", regs, 0); 1322 goto force_sigbus; 1323 } 1324 ··· 1536 ia64_handle_exception(regs, eh); 1537 goto done; 1538 } 1539 - die_if_kernel("error during unaligned kernel access\n", regs, ret); 1540 /* NOT_REACHED */ 1541 } 1542 force_sigbus:
··· 23 #include <asm/uaccess.h> 24 #include <asm/unaligned.h> 25 26 + extern int die_if_kernel(char *str, struct pt_regs *regs, long err); 27 28 #undef DEBUG_UNALIGNED_TRAP 29 ··· 675 */ 676 if (ld.x6_op == 1 || ld.x6_op == 3) { 677 printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); 678 + if (die_if_kernel("unaligned reference on speculative load with register update\n", 679 + regs, 30)) 680 + return; 681 } 682 683 ··· 1317 1318 if (ia64_psr(regs)->be) { 1319 /* we don't support big-endian accesses */ 1320 + if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0)) 1321 + return; 1322 goto force_sigbus; 1323 } 1324 ··· 1534 ia64_handle_exception(regs, eh); 1535 goto done; 1536 } 1537 + if (die_if_kernel("error during unaligned kernel access\n", regs, ret)) 1538 + return; 1539 /* NOT_REACHED */ 1540 } 1541 force_sigbus:
+5 -3
arch/ia64/mm/fault.c
··· 16 #include <asm/system.h> 17 #include <asm/uaccess.h> 18 19 - extern void die (char *, struct pt_regs *, long); 20 21 #ifdef CONFIG_KPROBES 22 static inline int notify_page_fault(struct pt_regs *regs, int trap) ··· 267 else 268 printk(KERN_ALERT "Unable to handle kernel paging request at " 269 "virtual address %016lx\n", address); 270 - die("Oops", regs, isr); 271 bust_spinlocks(0); 272 - do_exit(SIGKILL); 273 return; 274 275 out_of_memory:
··· 16 #include <asm/system.h> 17 #include <asm/uaccess.h> 18 19 + extern int die(char *, struct pt_regs *, long); 20 21 #ifdef CONFIG_KPROBES 22 static inline int notify_page_fault(struct pt_regs *regs, int trap) ··· 267 else 268 printk(KERN_ALERT "Unable to handle kernel paging request at " 269 "virtual address %016lx\n", address); 270 + if (die("Oops", regs, isr)) 271 + regs = NULL; 272 bust_spinlocks(0); 273 + if (regs) 274 + do_exit(SIGKILL); 275 return; 276 277 out_of_memory:
+1 -1
arch/ia64/sn/kernel/sn2/sn2_smp.c
··· 523 return count; 524 } 525 526 - static struct seq_operations sn2_ptc_seq_ops = { 527 .start = sn2_ptc_seq_start, 528 .next = sn2_ptc_seq_next, 529 .stop = sn2_ptc_seq_stop,
··· 523 return count; 524 } 525 526 + static const struct seq_operations sn2_ptc_seq_ops = { 527 .start = sn2_ptc_seq_start, 528 .next = sn2_ptc_seq_next, 529 .stop = sn2_ptc_seq_stop,
+6 -5
arch/ia64/sn/kernel/sn2/sn_hwperf.c
··· 33 #include <linux/smp_lock.h> 34 #include <linux/nodemask.h> 35 #include <linux/smp.h> 36 37 #include <asm/processor.h> 38 #include <asm/topology.h> ··· 51 static int sn_hwperf_obj_cnt = 0; 52 static nasid_t sn_hwperf_master_nasid = INVALID_NASID; 53 static int sn_hwperf_init(void); 54 - static DECLARE_MUTEX(sn_hwperf_init_mutex); 55 56 #define cnode_possible(n) ((n) < num_cnodes) 57 ··· 578 /* 579 * /proc/sgi_sn/sn_topology, read-only using seq_file 580 */ 581 - static struct seq_operations sn_topology_seq_ops = { 582 .start = sn_topology_start, 583 .next = sn_topology_next, 584 .stop = sn_topology_stop, ··· 885 int e = 0; 886 887 /* single threaded, once-only initialization */ 888 - down(&sn_hwperf_init_mutex); 889 890 if (sn_hwperf_salheap) { 891 - up(&sn_hwperf_init_mutex); 892 return e; 893 } 894 ··· 937 sn_hwperf_salheap = NULL; 938 sn_hwperf_obj_cnt = 0; 939 } 940 - up(&sn_hwperf_init_mutex); 941 return e; 942 } 943
··· 33 #include <linux/smp_lock.h> 34 #include <linux/nodemask.h> 35 #include <linux/smp.h> 36 + #include <linux/mutex.h> 37 38 #include <asm/processor.h> 39 #include <asm/topology.h> ··· 50 static int sn_hwperf_obj_cnt = 0; 51 static nasid_t sn_hwperf_master_nasid = INVALID_NASID; 52 static int sn_hwperf_init(void); 53 + static DEFINE_MUTEX(sn_hwperf_init_mutex); 54 55 #define cnode_possible(n) ((n) < num_cnodes) 56 ··· 577 /* 578 * /proc/sgi_sn/sn_topology, read-only using seq_file 579 */ 580 + static const struct seq_operations sn_topology_seq_ops = { 581 .start = sn_topology_start, 582 .next = sn_topology_next, 583 .stop = sn_topology_stop, ··· 884 int e = 0; 885 886 /* single threaded, once-only initialization */ 887 + mutex_lock(&sn_hwperf_init_mutex); 888 889 if (sn_hwperf_salheap) { 890 + mutex_unlock(&sn_hwperf_init_mutex); 891 return e; 892 } 893 ··· 936 sn_hwperf_salheap = NULL; 937 sn_hwperf_obj_cnt = 0; 938 } 939 + mutex_unlock(&sn_hwperf_init_mutex); 940 return e; 941 } 942
+28 -22
include/asm-ia64/bitops.h
··· 122 } 123 124 /** 125 - * __clear_bit_unlock - Non-atomically clear a bit with release 126 * 127 - * This is like clear_bit_unlock, but the implementation uses a store 128 * with release semantics. See also __raw_spin_unlock(). 129 */ 130 static __inline__ void 131 - __clear_bit_unlock(int nr, volatile void *addr) 132 { 133 - __u32 mask, new; 134 - volatile __u32 *m; 135 136 - m = (volatile __u32 *)addr + (nr >> 5); 137 - mask = ~(1 << (nr & 31)); 138 - new = *m & mask; 139 - barrier(); 140 ia64_st4_rel_nta(m, new); 141 } 142 143 /** 144 * __clear_bit - Clears a bit in memory (non-atomic version) 145 */ 146 static __inline__ void 147 __clear_bit (int nr, volatile void *addr) 148 { 149 - volatile __u32 *p = (__u32 *) addr + (nr >> 5); 150 - __u32 m = 1 << (nr & 31); 151 - *p &= ~m; 152 } 153 154 /** 155 * change_bit - Toggle a bit in memory 156 - * @nr: Bit to clear 157 * @addr: Address to start counting from 158 * 159 * change_bit() is atomic and may not be reordered. ··· 180 181 /** 182 * __change_bit - Toggle a bit in memory 183 - * @nr: the bit to set 184 * @addr: the address to start counting from 185 * 186 * Unlike change_bit(), this function is non-atomic and may be reordered. ··· 199 * @addr: Address to count from 200 * 201 * This operation is atomic and cannot be reordered. 202 - * It also implies a memory barrier. 203 */ 204 static __inline__ int 205 test_and_set_bit (int nr, volatile void *addr) ··· 249 250 /** 251 * test_and_clear_bit - Clear a bit and return its old value 252 - * @nr: Bit to set 253 * @addr: Address to count from 254 * 255 * This operation is atomic and cannot be reordered. 256 - * It also implies a memory barrier. 257 */ 258 static __inline__ int 259 test_and_clear_bit (int nr, volatile void *addr) ··· 274 275 /** 276 * __test_and_clear_bit - Clear a bit and return its old value 277 - * @nr: Bit to set 278 * @addr: Address to count from 279 * 280 * This operation is non-atomic and can be reordered. ··· 294 295 /** 296 * test_and_change_bit - Change a bit and return its old value 297 - * @nr: Bit to set 298 * @addr: Address to count from 299 * 300 * This operation is atomic and cannot be reordered. 301 - * It also implies a memory barrier. 302 */ 303 static __inline__ int 304 test_and_change_bit (int nr, volatile void *addr) ··· 317 return (old & bit) != 0; 318 } 319 320 - /* 321 - * WARNING: non atomic version. 322 */ 323 static __inline__ int 324 __test_and_change_bit (int nr, void *addr)
··· 122 } 123 124 /** 125 + * __clear_bit_unlock - Non-atomically clears a bit in memory with release 126 + * @nr: Bit to clear 127 + * @addr: Address to start counting from 128 * 129 + * Similarly to clear_bit_unlock, the implementation uses a store 130 * with release semantics. See also __raw_spin_unlock(). 131 */ 132 static __inline__ void 133 + __clear_bit_unlock(int nr, void *addr) 134 { 135 + __u32 * const m = (__u32 *) addr + (nr >> 5); 136 + __u32 const new = *m & ~(1 << (nr & 31)); 137 138 ia64_st4_rel_nta(m, new); 139 } 140 141 /** 142 * __clear_bit - Clears a bit in memory (non-atomic version) 143 + * @nr: the bit to clear 144 + * @addr: the address to start counting from 145 + * 146 + * Unlike clear_bit(), this function is non-atomic and may be reordered. 147 + * If it's called on the same region of memory simultaneously, the effect 148 + * may be that only one operation succeeds. 149 */ 150 static __inline__ void 151 __clear_bit (int nr, volatile void *addr) 152 { 153 + *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31)); 154 } 155 156 /** 157 * change_bit - Toggle a bit in memory 158 + * @nr: Bit to toggle 159 * @addr: Address to start counting from 160 * 161 * change_bit() is atomic and may not be reordered. ··· 178 179 /** 180 * __change_bit - Toggle a bit in memory 181 + * @nr: the bit to toggle 182 * @addr: the address to start counting from 183 * 184 * Unlike change_bit(), this function is non-atomic and may be reordered. ··· 197 * @addr: Address to count from 198 * 199 * This operation is atomic and cannot be reordered. 200 + * It also implies the acquisition side of the memory barrier. 201 */ 202 static __inline__ int 203 test_and_set_bit (int nr, volatile void *addr) ··· 247 248 /** 249 * test_and_clear_bit - Clear a bit and return its old value 250 + * @nr: Bit to clear 251 * @addr: Address to count from 252 * 253 * This operation is atomic and cannot be reordered. 254 + * It also implies the acquisition side of the memory barrier. 255 */ 256 static __inline__ int 257 test_and_clear_bit (int nr, volatile void *addr) ··· 272 273 /** 274 * __test_and_clear_bit - Clear a bit and return its old value 275 + * @nr: Bit to clear 276 * @addr: Address to count from 277 * 278 * This operation is non-atomic and can be reordered. ··· 292 293 /** 294 * test_and_change_bit - Change a bit and return its old value 295 + * @nr: Bit to change 296 * @addr: Address to count from 297 * 298 * This operation is atomic and cannot be reordered. 299 + * It also implies the acquisition side of the memory barrier. 300 */ 301 static __inline__ int 302 test_and_change_bit (int nr, volatile void *addr) ··· 315 return (old & bit) != 0; 316 } 317 318 + /** 319 + * __test_and_change_bit - Change a bit and return its old value 320 + * @nr: Bit to change 321 + * @addr: Address to count from 322 + * 323 + * This operation is non-atomic and can be reordered. 324 */ 325 static __inline__ int 326 __test_and_change_bit (int nr, void *addr)
+2
include/asm-ia64/gcc_intrin.h
··· 24 extern void ia64_bad_param_for_setreg (void); 25 extern void ia64_bad_param_for_getreg (void); 26 27 register unsigned long ia64_r13 asm ("r13") __used; 28 29 #define ia64_setreg(regnum, val) \ 30 ({ \
··· 24 extern void ia64_bad_param_for_setreg (void); 25 extern void ia64_bad_param_for_getreg (void); 26 27 + #ifdef __KERNEL__ 28 register unsigned long ia64_r13 asm ("r13") __used; 29 + #endif 30 31 #define ia64_setreg(regnum, val) \ 32 ({ \
+3 -3
include/asm-ia64/mca.h
··· 3 * Purpose: Machine check handling specific defines 4 * 5 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 6 - * Copyright (C) Vijay Chander (vijay@engr.sgi.com) 7 - * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) 8 - * Copyright (C) Russ Anderson (rja@sgi.com) 9 */ 10 11 #ifndef _ASM_IA64_MCA_H
··· 3 * Purpose: Machine check handling specific defines 4 * 5 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 6 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 7 + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> 8 + * Copyright (C) Russ Anderson <rja@sgi.com> 9 */ 10 11 #ifndef _ASM_IA64_MCA_H
+2 -1
include/asm-ia64/mca_asm.h
··· 1 /* 2 * File: mca_asm.h 3 * 4 * Copyright (C) 1999 Silicon Graphics, Inc. 5 - * Copyright (C) Vijay Chander (vijay@engr.sgi.com) 6 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> 7 * Copyright (C) 2000 Hewlett-Packard Co. 8 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
··· 1 /* 2 * File: mca_asm.h 3 + * Purpose: Machine check handling specific defines 4 * 5 * Copyright (C) 1999 Silicon Graphics, Inc. 6 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 7 * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> 8 * Copyright (C) 2000 Hewlett-Packard Co. 9 * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+1 -1
include/asm-ia64/processor.h
··· 473 { 474 ia64_stop(); 475 ia64_setreg(_IA64_REG_PSR_L, psr); 476 - ia64_srlz_d(); 477 } 478 479 /*
··· 473 { 474 ia64_stop(); 475 ia64_setreg(_IA64_REG_PSR_L, psr); 476 + ia64_srlz_i(); 477 } 478 479 /*
+3 -11
include/asm-ia64/sal.h
··· 649 * Now define a couple of inline functions for improved type checking 650 * and convenience. 651 */ 652 - static inline long 653 - ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, 654 - unsigned long *drift_info) 655 - { 656 - struct ia64_sal_retval isrv; 657 - 658 - SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); 659 - *ticks_per_second = isrv.v0; 660 - *drift_info = isrv.v1; 661 - return isrv.status; 662 - } 663 664 extern s64 ia64_sal_cache_flush (u64 cache_type); 665 extern void __init check_sal_cache_flush (void); ··· 830 u64, u64, u64, u64, u64); 831 extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, 832 u64, u64, u64, u64, u64); 833 #ifdef CONFIG_HOTPLUG_CPU 834 /* 835 * System Abstraction Layer Specification
··· 649 * Now define a couple of inline functions for improved type checking 650 * and convenience. 651 */ 652 653 extern s64 ia64_sal_cache_flush (u64 cache_type); 654 extern void __init check_sal_cache_flush (void); ··· 841 u64, u64, u64, u64, u64); 842 extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, 843 u64, u64, u64, u64, u64); 844 + extern long 845 + ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, 846 + unsigned long *drift_info); 847 #ifdef CONFIG_HOTPLUG_CPU 848 /* 849 * System Abstraction Layer Specification