Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] replace kmalloc+memset with kzalloc
[IA64] resolve name clash by renaming is_available_memory()
[IA64] Need export for csum_ipv6_magic
[IA64] Fix DISCONTIGMEM without VIRTUAL_MEM_MAP
[PATCH] Add support for type argument in PAL_GET_PSTATE
[IA64] tidy up return value of ip_fast_csum
[IA64] implement csum_ipv6_magic for ia64.
[IA64] More Itanium PAL spec updates
[IA64] Update processor_info features
[IA64] Add se bit to Processor State Parameter structure
[IA64] Add dp bit to cache and bus check structs
[IA64] SN: Correctly update smp_affinty mask
[IA64] sparse cleanups
[IA64] IA64 Kexec/kdump

+1146 -63
+23
arch/ia64/Kconfig
··· 434 435 source "drivers/sn/Kconfig" 436 437 source "drivers/firmware/Kconfig" 438 439 source "fs/Kconfig.binfmt"
··· 434 435 source "drivers/sn/Kconfig" 436 437 + config KEXEC 438 + bool "kexec system call (EXPERIMENTAL)" 439 + depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) 440 + help 441 + kexec is a system call that implements the ability to shutdown your 442 + current kernel, and to start another kernel. It is like a reboot 443 + but it is indepedent of the system firmware. And like a reboot 444 + you can start any kernel with it, not just Linux. 445 + 446 + The name comes from the similiarity to the exec system call. 447 + 448 + It is an ongoing process to be certain the hardware in a machine 449 + is properly shutdown, so do not be surprised if this code does not 450 + initially work for you. It may help to enable device hotplugging 451 + support. As of this writing the exact hardware interface is 452 + strongly in flux, so no good recommendation can be made. 453 + 454 + config CRASH_DUMP 455 + bool "kernel crash dumps (EXPERIMENTAL)" 456 + depends on EXPERIMENTAL && IA64_MCA_RECOVERY && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) 457 + help 458 + Generate crash dump after being started by kexec. 459 + 460 source "drivers/firmware/Kconfig" 461 462 source "fs/Kconfig.binfmt"
+3 -7
arch/ia64/hp/common/sba_iommu.c
··· 1672 * SAC (single address cycle) addressable, so allocate a 1673 * pseudo-device to enforce that. 1674 */ 1675 - sac = kmalloc(sizeof(*sac), GFP_KERNEL); 1676 if (!sac) 1677 panic(PFX "Couldn't allocate struct pci_dev"); 1678 - memset(sac, 0, sizeof(*sac)); 1679 1680 - controller = kmalloc(sizeof(*controller), GFP_KERNEL); 1681 if (!controller) 1682 panic(PFX "Couldn't allocate struct pci_controller"); 1683 - memset(controller, 0, sizeof(*controller)); 1684 1685 controller->iommu = ioc; 1686 sac->sysdata = controller; ··· 1735 struct ioc *ioc; 1736 struct ioc_iommu *info; 1737 1738 - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 1739 if (!ioc) 1740 return NULL; 1741 - 1742 - memset(ioc, 0, sizeof(*ioc)); 1743 1744 ioc->next = ioc_list; 1745 ioc_list = ioc;
··· 1672 * SAC (single address cycle) addressable, so allocate a 1673 * pseudo-device to enforce that. 1674 */ 1675 + sac = kzalloc(sizeof(*sac), GFP_KERNEL); 1676 if (!sac) 1677 panic(PFX "Couldn't allocate struct pci_dev"); 1678 1679 + controller = kzalloc(sizeof(*controller), GFP_KERNEL); 1680 if (!controller) 1681 panic(PFX "Couldn't allocate struct pci_controller"); 1682 1683 controller->iommu = ioc; 1684 sac->sysdata = controller; ··· 1737 struct ioc *ioc; 1738 struct ioc_iommu *info; 1739 1740 + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); 1741 if (!ioc) 1742 return NULL; 1743 1744 ioc->next = ioc_list; 1745 ioc_list = ioc;
+1 -2
arch/ia64/hp/sim/simserial.c
··· 684 *ret_info = sstate->info; 685 return 0; 686 } 687 - info = kmalloc(sizeof(struct async_struct), GFP_KERNEL); 688 if (!info) { 689 sstate->count--; 690 return -ENOMEM; 691 } 692 - memset(info, 0, sizeof(struct async_struct)); 693 init_waitqueue_head(&info->open_wait); 694 init_waitqueue_head(&info->close_wait); 695 init_waitqueue_head(&info->delta_msr_wait);
··· 684 *ret_info = sstate->info; 685 return 0; 686 } 687 + info = kzalloc(sizeof(struct async_struct), GFP_KERNEL); 688 if (!info) { 689 sstate->count--; 690 return -ENOMEM; 691 } 692 init_waitqueue_head(&info->open_wait); 693 init_waitqueue_head(&info->close_wait); 694 init_waitqueue_head(&info->delta_msr_wait);
+1
arch/ia64/kernel/Makefile
··· 28 obj-$(CONFIG_CPU_FREQ) += cpufreq/ 29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 31 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 32 obj-$(CONFIG_AUDIT) += audit.o 33 obj-$(CONFIG_PCI_MSI) += msi_ia64.o
··· 28 obj-$(CONFIG_CPU_FREQ) += cpufreq/ 29 obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 30 obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 31 + obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o 32 obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 33 obj-$(CONFIG_AUDIT) += audit.o 34 obj-$(CONFIG_PCI_MSI) += msi_ia64.o
+4 -7
arch/ia64/kernel/cpufreq/acpi-cpufreq.c
··· 68 69 dprintk("processor_get_pstate\n"); 70 71 - retval = ia64_pal_get_pstate(&pstate_index); 72 *value = (u32) pstate_index; 73 74 if (retval) ··· 92 dprintk("extract_clock\n"); 93 94 for (i = 0; i < data->acpi_data.state_count; i++) { 95 - if (value >= data->acpi_data.states[i].control) 96 return data->acpi_data.states[i].core_frequency; 97 } 98 return data->acpi_data.states[i-1].core_frequency; ··· 118 goto migrate_end; 119 } 120 121 - /* 122 - * processor_get_pstate gets the average frequency since the 123 - * last get. So, do two PAL_get_freq()... 124 - */ 125 - ret = processor_get_pstate(&value); 126 ret = processor_get_pstate(&value); 127 128 if (ret) {
··· 68 69 dprintk("processor_get_pstate\n"); 70 71 + retval = ia64_pal_get_pstate(&pstate_index, 72 + PAL_GET_PSTATE_TYPE_INSTANT); 73 *value = (u32) pstate_index; 74 75 if (retval) ··· 91 dprintk("extract_clock\n"); 92 93 for (i = 0; i < data->acpi_data.state_count; i++) { 94 + if (value == data->acpi_data.states[i].status) 95 return data->acpi_data.states[i].core_frequency; 96 } 97 return data->acpi_data.states[i-1].core_frequency; ··· 117 goto migrate_end; 118 } 119 120 + /* processor_get_pstate gets the instantaneous frequency */ 121 ret = processor_get_pstate(&value); 122 123 if (ret) {
+245
arch/ia64/kernel/crash.c
···
··· 1 + /* 2 + * arch/ia64/kernel/crash.c 3 + * 4 + * Architecture specific (ia64) functions for kexec based crash dumps. 5 + * 6 + * Created by: Khalid Aziz <khalid.aziz@hp.com> 7 + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. 8 + * Copyright (C) 2005 Intel Corp Zou Nan hai <nanhai.zou@intel.com> 9 + * 10 + */ 11 + #include <linux/smp.h> 12 + #include <linux/delay.h> 13 + #include <linux/crash_dump.h> 14 + #include <linux/bootmem.h> 15 + #include <linux/kexec.h> 16 + #include <linux/elfcore.h> 17 + #include <linux/sysctl.h> 18 + #include <linux/init.h> 19 + 20 + #include <asm/kdebug.h> 21 + #include <asm/mca.h> 22 + #include <asm/uaccess.h> 23 + 24 + int kdump_status[NR_CPUS]; 25 + atomic_t kdump_cpu_freezed; 26 + atomic_t kdump_in_progress; 27 + int kdump_on_init = 1; 28 + ssize_t 29 + copy_oldmem_page(unsigned long pfn, char *buf, 30 + size_t csize, unsigned long offset, int userbuf) 31 + { 32 + void *vaddr; 33 + 34 + if (!csize) 35 + return 0; 36 + vaddr = __va(pfn<<PAGE_SHIFT); 37 + if (userbuf) { 38 + if (copy_to_user(buf, (vaddr + offset), csize)) { 39 + return -EFAULT; 40 + } 41 + } else 42 + memcpy(buf, (vaddr + offset), csize); 43 + return csize; 44 + } 45 + 46 + static inline Elf64_Word 47 + *append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data, 48 + size_t data_len) 49 + { 50 + struct elf_note *note = (struct elf_note *)buf; 51 + note->n_namesz = strlen(name) + 1; 52 + note->n_descsz = data_len; 53 + note->n_type = type; 54 + buf += (sizeof(*note) + 3)/4; 55 + memcpy(buf, name, note->n_namesz); 56 + buf += (note->n_namesz + 3)/4; 57 + memcpy(buf, data, data_len); 58 + buf += (data_len + 3)/4; 59 + return buf; 60 + } 61 + 62 + static void 63 + final_note(void *buf) 64 + { 65 + memset(buf, 0, sizeof(struct elf_note)); 66 + } 67 + 68 + extern void ia64_dump_cpu_regs(void *); 69 + 70 + static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus); 71 + 72 + void 73 + crash_save_this_cpu() 74 + { 75 + void *buf; 76 + unsigned long cfm, sof, sol; 77 + 78 + int cpu = smp_processor_id(); 79 + struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu); 80 + 81 + elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg); 82 + memset(prstatus, 0, sizeof(*prstatus)); 83 + prstatus->pr_pid = current->pid; 84 + 85 + ia64_dump_cpu_regs(dst); 86 + cfm = dst[43]; 87 + sol = (cfm >> 7) & 0x7f; 88 + sof = cfm & 0x7f; 89 + dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46], 90 + sof - sol); 91 + 92 + buf = (u64 *) per_cpu_ptr(crash_notes, cpu); 93 + if (!buf) 94 + return; 95 + buf = append_elf_note(buf, "CORE", NT_PRSTATUS, prstatus, 96 + sizeof(*prstatus)); 97 + final_note(buf); 98 + } 99 + 100 + static int 101 + kdump_wait_cpu_freeze(void) 102 + { 103 + int cpu_num = num_online_cpus() - 1; 104 + int timeout = 1000; 105 + while(timeout-- > 0) { 106 + if (atomic_read(&kdump_cpu_freezed) == cpu_num) 107 + return 0; 108 + udelay(1000); 109 + } 110 + return 1; 111 + } 112 + 113 + void 114 + machine_crash_shutdown(struct pt_regs *pt) 115 + { 116 + /* This function is only called after the system 117 + * has paniced or is otherwise in a critical state. 118 + * The minimum amount of code to allow a kexec'd kernel 119 + * to run successfully needs to happen here. 120 + * 121 + * In practice this means shooting down the other cpus in 122 + * an SMP system. 123 + */ 124 + kexec_disable_iosapic(); 125 + #ifdef CONFIG_SMP 126 + kdump_smp_send_stop(); 127 + if (kdump_wait_cpu_freeze() && kdump_on_init) { 128 + //not all cpu response to IPI, send INIT to freeze them 129 + kdump_smp_send_init(); 130 + } 131 + #endif 132 + } 133 + 134 + static void 135 + machine_kdump_on_init(void) 136 + { 137 + local_irq_disable(); 138 + kexec_disable_iosapic(); 139 + machine_kexec(ia64_kimage); 140 + } 141 + 142 + void 143 + kdump_cpu_freeze(struct unw_frame_info *info, void *arg) 144 + { 145 + int cpuid; 146 + local_irq_disable(); 147 + cpuid = smp_processor_id(); 148 + crash_save_this_cpu(); 149 + current->thread.ksp = (__u64)info->sw - 16; 150 + atomic_inc(&kdump_cpu_freezed); 151 + kdump_status[cpuid] = 1; 152 + mb(); 153 + if (cpuid == 0) { 154 + for (;;) 155 + cpu_relax(); 156 + } else 157 + ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); 158 + } 159 + 160 + static int 161 + kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) 162 + { 163 + struct ia64_mca_notify_die *nd; 164 + struct die_args *args = data; 165 + 166 + if (!kdump_on_init) 167 + return NOTIFY_DONE; 168 + 169 + if (val != DIE_INIT_MONARCH_ENTER && 170 + val != DIE_INIT_SLAVE_ENTER && 171 + val != DIE_MCA_RENDZVOUS_LEAVE && 172 + val != DIE_MCA_MONARCH_LEAVE) 173 + return NOTIFY_DONE; 174 + 175 + nd = (struct ia64_mca_notify_die *)args->err; 176 + /* Reason code 1 means machine check rendezous*/ 177 + if ((val == DIE_INIT_MONARCH_ENTER || DIE_INIT_SLAVE_ENTER) && 178 + nd->sos->rv_rc == 1) 179 + return NOTIFY_DONE; 180 + 181 + switch (val) { 182 + case DIE_INIT_MONARCH_ENTER: 183 + machine_kdump_on_init(); 184 + break; 185 + case DIE_INIT_SLAVE_ENTER: 186 + unw_init_running(kdump_cpu_freeze, NULL); 187 + break; 188 + case DIE_MCA_RENDZVOUS_LEAVE: 189 + if (atomic_read(&kdump_in_progress)) 190 + unw_init_running(kdump_cpu_freeze, NULL); 191 + break; 192 + case DIE_MCA_MONARCH_LEAVE: 193 + /* die_register->signr indicate if MCA is recoverable */ 194 + if (!args->signr) 195 + machine_kdump_on_init(); 196 + break; 197 + } 198 + return NOTIFY_DONE; 199 + } 200 + 201 + #ifdef CONFIG_SYSCTL 202 + static ctl_table kdump_on_init_table[] = { 203 + { 204 + .ctl_name = CTL_UNNUMBERED, 205 + .procname = "kdump_on_init", 206 + .data = &kdump_on_init, 207 + .maxlen = sizeof(int), 208 + .mode = 0644, 209 + .proc_handler = &proc_dointvec, 210 + }, 211 + { .ctl_name = 0 } 212 + }; 213 + 214 + static ctl_table sys_table[] = { 215 + { 216 + .ctl_name = CTL_KERN, 217 + .procname = "kernel", 218 + .mode = 0555, 219 + .child = kdump_on_init_table, 220 + }, 221 + { .ctl_name = 0 } 222 + }; 223 + #endif 224 + 225 + static int 226 + machine_crash_setup(void) 227 + { 228 + char *from = strstr(saved_command_line, "elfcorehdr="); 229 + static struct notifier_block kdump_init_notifier_nb = { 230 + .notifier_call = kdump_init_notifier, 231 + }; 232 + int ret; 233 + if (from) 234 + elfcorehdr_addr = memparse(from+11, &from); 235 + saved_max_pfn = (unsigned long)-1; 236 + if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) 237 + return ret; 238 + #ifdef CONFIG_SYSCTL 239 + register_sysctl_table(sys_table, 0); 240 + #endif 241 + return 0; 242 + } 243 + 244 + __initcall(machine_crash_setup); 245 +
+65 -6
arch/ia64/kernel/efi.c
··· 26 #include <linux/types.h> 27 #include <linux/time.h> 28 #include <linux/efi.h> 29 30 #include <asm/io.h> 31 #include <asm/kregs.h> ··· 42 struct efi efi; 43 EXPORT_SYMBOL(efi); 44 static efi_runtime_services_t *runtime; 45 - static unsigned long mem_limit = ~0UL, max_addr = ~0UL; 46 47 #define efi_call_virt(f, args...) (*(f))(args) 48 ··· 225 } 226 227 static int 228 - is_available_memory (efi_memory_desc_t *md) 229 { 230 if (!(md->attribute & EFI_MEMORY_WB)) 231 return 0; ··· 422 mem_limit = memparse(cp + 4, &cp); 423 } else if (memcmp(cp, "max_addr=", 9) == 0) { 424 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 425 } else { 426 while (*cp != ' ' && *cp) 427 ++cp; ··· 431 ++cp; 432 } 433 } 434 if (max_addr != ~0UL) 435 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 436 ··· 892 } 893 contig_high = GRANULEROUNDDOWN(contig_high); 894 } 895 - if (!is_available_memory(md) || md->type == EFI_LOADER_DATA) 896 continue; 897 898 /* Round ends inward to granule boundaries */ 899 as = max(contig_low, md->phys_addr); 900 ae = min(contig_high, efi_md_end(md)); 901 902 - /* keep within max_addr= command line arg */ 903 ae = min(ae, max_addr); 904 if (ae <= as) 905 continue; ··· 968 } 969 contig_high = GRANULEROUNDDOWN(contig_high); 970 } 971 - if (!is_available_memory(md)) 972 continue; 973 974 /* ··· 1010 } else 1011 ae = efi_md_end(md); 1012 1013 - /* keep within max_addr= command line arg */ 1014 ae = min(ae, max_addr); 1015 if (ae <= as) 1016 continue; ··· 1123 */ 1124 insert_resource(res, code_resource); 1125 insert_resource(res, data_resource); 1126 } 1127 } 1128 }
··· 26 #include <linux/types.h> 27 #include <linux/time.h> 28 #include <linux/efi.h> 29 + #include <linux/kexec.h> 30 31 #include <asm/io.h> 32 #include <asm/kregs.h> ··· 41 struct efi efi; 42 EXPORT_SYMBOL(efi); 43 static efi_runtime_services_t *runtime; 44 + static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; 45 46 #define efi_call_virt(f, args...) (*(f))(args) 47 ··· 224 } 225 226 static int 227 + is_memory_available (efi_memory_desc_t *md) 228 { 229 if (!(md->attribute & EFI_MEMORY_WB)) 230 return 0; ··· 421 mem_limit = memparse(cp + 4, &cp); 422 } else if (memcmp(cp, "max_addr=", 9) == 0) { 423 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 424 + } else if (memcmp(cp, "min_addr=", 9) == 0) { 425 + min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); 426 } else { 427 while (*cp != ' ' && *cp) 428 ++cp; ··· 428 ++cp; 429 } 430 } 431 + if (min_addr != 0UL) 432 + printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20); 433 if (max_addr != ~0UL) 434 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20); 435 ··· 887 } 888 contig_high = GRANULEROUNDDOWN(contig_high); 889 } 890 + if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) 891 continue; 892 893 /* Round ends inward to granule boundaries */ 894 as = max(contig_low, md->phys_addr); 895 ae = min(contig_high, efi_md_end(md)); 896 897 + /* keep within max_addr= and min_addr= command line arg */ 898 + as = max(as, min_addr); 899 ae = min(ae, max_addr); 900 if (ae <= as) 901 continue; ··· 962 } 963 contig_high = GRANULEROUNDDOWN(contig_high); 964 } 965 + if (!is_memory_available(md)) 966 continue; 967 968 /* ··· 1004 } else 1005 ae = efi_md_end(md); 1006 1007 + /* keep within max_addr= and min_addr= command line arg */ 1008 + as = max(as, min_addr); 1009 ae = min(ae, max_addr); 1010 if (ae <= as) 1011 continue; ··· 1116 */ 1117 insert_resource(res, code_resource); 1118 insert_resource(res, data_resource); 1119 + #ifdef CONFIG_KEXEC 1120 + insert_resource(res, &efi_memmap_res); 1121 + insert_resource(res, &boot_param_res); 1122 + if (crashk_res.end > crashk_res.start) 1123 + insert_resource(res, &crashk_res); 1124 + #endif 1125 } 1126 } 1127 } 1128 + 1129 + #ifdef CONFIG_KEXEC 1130 + /* find a block of memory aligned to 64M exclude reserved regions 1131 + rsvd_regions are sorted 1132 + */ 1133 + unsigned long 1134 + kdump_find_rsvd_region (unsigned long size, 1135 + struct rsvd_region *r, int n) 1136 + { 1137 + int i; 1138 + u64 start, end; 1139 + u64 alignment = 1UL << _PAGE_SIZE_64M; 1140 + void *efi_map_start, *efi_map_end, *p; 1141 + efi_memory_desc_t *md; 1142 + u64 efi_desc_size; 1143 + 1144 + efi_map_start = __va(ia64_boot_param->efi_memmap); 1145 + efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; 1146 + efi_desc_size = ia64_boot_param->efi_memdesc_size; 1147 + 1148 + for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { 1149 + md = p; 1150 + if (!efi_wb(md)) 1151 + continue; 1152 + start = ALIGN(md->phys_addr, alignment); 1153 + end = efi_md_end(md); 1154 + for (i = 0; i < n; i++) { 1155 + if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { 1156 + if (__pa(r[i].start) > start + size) 1157 + return start; 1158 + start = ALIGN(__pa(r[i].end), alignment); 1159 + if (i < n-1 && __pa(r[i+1].start) < start + size) 1160 + continue; 1161 + else 1162 + break; 1163 + } 1164 + } 1165 + if (end > start + size) 1166 + return start; 1167 + } 1168 + 1169 + printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", 1170 + size); 1171 + return ~0UL; 1172 + } 1173 + #endif
+1 -1
arch/ia64/kernel/entry.S
··· 1575 data8 sys_mq_timedreceive // 1265 1576 data8 sys_mq_notify 1577 data8 sys_mq_getsetattr 1578 - data8 sys_ni_syscall // reserved for kexec_load 1579 data8 sys_ni_syscall // reserved for vserver 1580 data8 sys_waitid // 1270 1581 data8 sys_add_key
··· 1575 data8 sys_mq_timedreceive // 1265 1576 data8 sys_mq_notify 1577 data8 sys_mq_getsetattr 1578 + data8 sys_kexec_load 1579 data8 sys_ni_syscall // reserved for vserver 1580 data8 sys_waitid // 1270 1581 data8 sys_add_key
+1
arch/ia64/kernel/ia64_ksyms.c
··· 14 15 #include <asm/checksum.h> 16 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17 18 #include <asm/semaphore.h> 19 EXPORT_SYMBOL(__down);
··· 14 15 #include <asm/checksum.h> 16 EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 17 + EXPORT_SYMBOL(csum_ipv6_magic); 18 19 #include <asm/semaphore.h> 20 EXPORT_SYMBOL(__down);
+21
arch/ia64/kernel/iosapic.c
··· 288 /* do nothing... */ 289 } 290 291 static void 292 mask_irq (unsigned int irq) 293 {
··· 288 /* do nothing... */ 289 } 290 291 + 292 + #ifdef CONFIG_KEXEC 293 + void 294 + kexec_disable_iosapic(void) 295 + { 296 + struct iosapic_intr_info *info; 297 + struct iosapic_rte_info *rte; 298 + u8 vec = 0; 299 + for (info = iosapic_intr_info; info < 300 + iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) { 301 + list_for_each_entry(rte, &info->rtes, 302 + rte_list) { 303 + iosapic_write(rte->addr, 304 + IOSAPIC_RTE_LOW(rte->rte_index), 305 + IOSAPIC_MASK|vec); 306 + iosapic_eoi(rte->addr, vec); 307 + } 308 + } 309 + } 310 + #endif 311 + 312 static void 313 mask_irq (unsigned int irq) 314 {
+1 -1
arch/ia64/kernel/kprobes.c
··· 851 return; 852 } 853 } while (unw_unwind(info) >= 0); 854 - lp->bsp = 0; 855 lp->cfm = 0; 856 return; 857 }
··· 851 return; 852 } 853 } while (unw_unwind(info) >= 0); 854 + lp->bsp = NULL; 855 lp->cfm = 0; 856 return; 857 }
+133
arch/ia64/kernel/machine_kexec.c
···
··· 1 + /* 2 + * arch/ia64/kernel/machine_kexec.c 3 + * 4 + * Handle transition of Linux booting another kernel 5 + * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P. 6 + * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> 7 + * Copyright (C) 2006 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> 8 + * 9 + * This source code is licensed under the GNU General Public License, 10 + * Version 2. See the file COPYING for more details. 11 + */ 12 + 13 + #include <linux/mm.h> 14 + #include <linux/kexec.h> 15 + #include <linux/cpu.h> 16 + #include <linux/irq.h> 17 + #include <asm/mmu_context.h> 18 + #include <asm/setup.h> 19 + #include <asm/delay.h> 20 + #include <asm/meminit.h> 21 + 22 + typedef void (*relocate_new_kernel_t)(unsigned long, unsigned long, 23 + struct ia64_boot_param *, unsigned long); 24 + 25 + struct kimage *ia64_kimage; 26 + 27 + struct resource efi_memmap_res = { 28 + .name = "EFI Memory Map", 29 + .start = 0, 30 + .end = 0, 31 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM 32 + }; 33 + 34 + struct resource boot_param_res = { 35 + .name = "Boot parameter", 36 + .start = 0, 37 + .end = 0, 38 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM 39 + }; 40 + 41 + 42 + /* 43 + * Do what every setup is needed on image and the 44 + * reboot code buffer to allow us to avoid allocations 45 + * later. 46 + */ 47 + int machine_kexec_prepare(struct kimage *image) 48 + { 49 + void *control_code_buffer; 50 + const unsigned long *func; 51 + 52 + func = (unsigned long *)&relocate_new_kernel; 53 + /* Pre-load control code buffer to minimize work in kexec path */ 54 + control_code_buffer = page_address(image->control_code_page); 55 + memcpy((void *)control_code_buffer, (const void *)func[0], 56 + relocate_new_kernel_size); 57 + flush_icache_range((unsigned long)control_code_buffer, 58 + (unsigned long)control_code_buffer + relocate_new_kernel_size); 59 + ia64_kimage = image; 60 + 61 + return 0; 62 + } 63 + 64 + void machine_kexec_cleanup(struct kimage *image) 65 + { 66 + } 67 + 68 + void machine_shutdown(void) 69 + { 70 + int cpu; 71 + 72 + for_each_online_cpu(cpu) { 73 + if (cpu != smp_processor_id()) 74 + cpu_down(cpu); 75 + } 76 + kexec_disable_iosapic(); 77 + } 78 + 79 + /* 80 + * Do not allocate memory (or fail in any way) in machine_kexec(). 81 + * We are past the point of no return, committed to rebooting now. 82 + */ 83 + extern void *efi_get_pal_addr(void); 84 + static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) 85 + { 86 + struct kimage *image = arg; 87 + relocate_new_kernel_t rnk; 88 + void *pal_addr = efi_get_pal_addr(); 89 + unsigned long code_addr = (unsigned long)page_address(image->control_code_page); 90 + unsigned long vector; 91 + int ii; 92 + 93 + if (image->type == KEXEC_TYPE_CRASH) { 94 + crash_save_this_cpu(); 95 + current->thread.ksp = (__u64)info->sw - 16; 96 + } 97 + 98 + /* Interrupts aren't acceptable while we reboot */ 99 + local_irq_disable(); 100 + 101 + /* Mask CMC and Performance Monitor interrupts */ 102 + ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 103 + ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 104 + 105 + /* Mask ITV and Local Redirect Registers */ 106 + ia64_set_itv(1 << 16); 107 + ia64_set_lrr0(1 << 16); 108 + ia64_set_lrr1(1 << 16); 109 + 110 + /* terminate possible nested in-service interrupts */ 111 + for (ii = 0; ii < 16; ii++) 112 + ia64_eoi(); 113 + 114 + /* unmask TPR and clear any pending interrupts */ 115 + ia64_setreg(_IA64_REG_CR_TPR, 0); 116 + ia64_srlz_d(); 117 + vector = ia64_get_ivr(); 118 + while (vector != IA64_SPURIOUS_INT_VECTOR) { 119 + ia64_eoi(); 120 + vector = ia64_get_ivr(); 121 + } 122 + platform_kernel_launch_event(); 123 + rnk = (relocate_new_kernel_t)&code_addr; 124 + (*rnk)(image->head, image->start, ia64_boot_param, 125 + GRANULEROUNDDOWN((unsigned long) pal_addr)); 126 + BUG(); 127 + } 128 + 129 + void machine_kexec(struct kimage *image) 130 + { 131 + unw_init_running(ia64_machine_kexec, image); 132 + for(;;); 133 + }
+5
arch/ia64/kernel/mca.c
··· 82 #include <asm/system.h> 83 #include <asm/sal.h> 84 #include <asm/mca.h> 85 86 #include <asm/irq.h> 87 #include <asm/hw_irq.h> ··· 1239 } else { 1240 /* Dump buffered message to console */ 1241 ia64_mlogbuf_finish(1); 1242 } 1243 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1244 == NOTIFY_STOP)
··· 82 #include <asm/system.h> 83 #include <asm/sal.h> 84 #include <asm/mca.h> 85 + #include <asm/kexec.h> 86 87 #include <asm/irq.h> 88 #include <asm/hw_irq.h> ··· 1238 } else { 1239 /* Dump buffered message to console */ 1240 ia64_mlogbuf_finish(1); 1241 + #ifdef CONFIG_CRASH_DUMP 1242 + atomic_set(&kdump_in_progress, 1); 1243 + monarch_cpu = -1; 1244 + #endif 1245 } 1246 if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) 1247 == NOTIFY_STOP)
+19 -3
arch/ia64/kernel/palinfo.c
··· 16 * 02/05/2001 S.Eranian fixed module support 17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes 18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug 19 */ 20 #include <linux/types.h> 21 #include <linux/errno.h> ··· 315 "Protection Key Registers(PKR) : %d\n" 316 "Implemented bits in PKR.key : %d\n" 317 "Hash Tag ID : 0x%x\n" 318 - "Size of RR.rid : %d\n", 319 vm_info_1.pal_vm_info_1_s.phys_add_size, 320 vm_info_2.pal_vm_info_2_s.impl_va_msb+1, 321 vm_info_1.pal_vm_info_1_s.max_pkr+1, 322 vm_info_1.pal_vm_info_1_s.key_size, 323 vm_info_1.pal_vm_info_1_s.hash_tag_id, 324 vm_info_2.pal_vm_info_2_s.rid_size); 325 } 326 327 if (ia64_pal_mem_attrib(&attrib) == 0) { ··· 475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, 476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, 477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, 478 - NULL,NULL,NULL,NULL,NULL, 479 "XIP,XPSR,XFS implemented", 480 "XR1-XR3 implemented", 481 "Disable dynamic predicate prediction", ··· 487 "Disable dynamic data cache prefetch", 488 "Disable dynamic inst cache prefetch", 489 "Disable dynamic branch prediction", 490 - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 491 "Disable BINIT on processor time-out", 492 "Disable dynamic power management (DPM)", 493 "Disable coherency",
··· 16 * 02/05/2001 S.Eranian fixed module support 17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes 18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug 19 + * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec 20 */ 21 #include <linux/types.h> 22 #include <linux/errno.h> ··· 314 "Protection Key Registers(PKR) : %d\n" 315 "Implemented bits in PKR.key : %d\n" 316 "Hash Tag ID : 0x%x\n" 317 + "Size of RR.rid : %d\n" 318 + "Max Purges : ", 319 vm_info_1.pal_vm_info_1_s.phys_add_size, 320 vm_info_2.pal_vm_info_2_s.impl_va_msb+1, 321 vm_info_1.pal_vm_info_1_s.max_pkr+1, 322 vm_info_1.pal_vm_info_1_s.key_size, 323 vm_info_1.pal_vm_info_1_s.hash_tag_id, 324 vm_info_2.pal_vm_info_2_s.rid_size); 325 + if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES) 326 + p += sprintf(p, "unlimited\n"); 327 + else 328 + p += sprintf(p, "%d\n", 329 + vm_info_2.pal_vm_info_2_s.max_purges ? 330 + vm_info_2.pal_vm_info_2_s.max_purges : 1); 331 } 332 333 if (ia64_pal_mem_attrib(&attrib) == 0) { ··· 467 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, 468 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, 469 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, 470 + "Unimplemented instruction address fault", 471 + "INIT, PMI, and LINT pins", 472 + "Simple unimplemented instr addresses", 473 + "Variable P-state performance", 474 + "Virtual machine features implemented", 475 "XIP,XPSR,XFS implemented", 476 "XR1-XR3 implemented", 477 "Disable dynamic predicate prediction", ··· 475 "Disable dynamic data cache prefetch", 476 "Disable dynamic inst cache prefetch", 477 "Disable dynamic branch prediction", 478 + NULL, NULL, NULL, NULL, 479 + "Disable P-states", 480 + "Enable MCA on Data Poisoning", 481 + "Enable vmsw instruction", 482 + "Enable extern environmental notification", 483 "Disable BINIT on processor time-out", 484 "Disable dynamic power management (DPM)", 485 "Disable coherency",
+1 -2
arch/ia64/kernel/perfmon.c
··· 853 * allocate context descriptor 854 * must be able to free with interrupts disabled 855 */ 856 - ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); 857 if (ctx) { 858 - memset(ctx, 0, sizeof(pfm_context_t)); 859 DPRINT(("alloc ctx @%p\n", ctx)); 860 } 861 return ctx;
··· 853 * allocate context descriptor 854 * must be able to free with interrupts disabled 855 */ 856 + ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); 857 if (ctx) { 858 DPRINT(("alloc ctx @%p\n", ctx)); 859 } 860 return ctx;
+6 -6
arch/ia64/kernel/perfmon_montecito.h
··· 45 /* pmc29 */ { PFM_REG_NOTIMPL, }, 46 /* pmc30 */ { PFM_REG_NOTIMPL, }, 47 /* pmc31 */ { PFM_REG_NOTIMPL, }, 48 - /* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffff, 0x30f01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 49 - /* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 50 - /* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffff, 0xf01ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 51 - /* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffff, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 52 /* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 53 /* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}}, 54 /* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 55 /* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 56 /* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}}, 57 - /* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefe, 0x1e00018181818, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 58 /* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ 60 }; ··· 185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded)); 186 187 if (cnum == 41 && is_loaded 188 - && (tmpval & 0x1e00000000000) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) { 189 190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval)); 191
··· 45 /* pmc29 */ { PFM_REG_NOTIMPL, }, 46 /* pmc30 */ { PFM_REG_NOTIMPL, }, 47 /* pmc31 */ { PFM_REG_NOTIMPL, }, 48 + /* pmc32 */ { PFM_REG_CONFIG, 0, 0x30f01ffffffffffUL, 0x30f01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 49 + /* pmc33 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 50 + /* pmc34 */ { PFM_REG_CONFIG, 0, 0xf01ffffffffffUL, 0xf01ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 51 + /* pmc35 */ { PFM_REG_CONFIG, 0, 0x0, 0x1ffffffffffUL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 52 /* pmc36 */ { PFM_REG_CONFIG, 0, 0xfffffff0, 0xf, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 53 /* pmc37 */ { PFM_REG_MONITOR, 4, 0x0, 0x3fff, NULL, pfm_mont_pmc_check, {RDEP_MONT_IEAR, 0, 0, 0}, {0, 0, 0, 0}}, 54 /* pmc38 */ { PFM_REG_CONFIG, 0, 0xdb6, 0x2492, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 55 /* pmc39 */ { PFM_REG_MONITOR, 6, 0x0, 0xffcf, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 56 /* pmc40 */ { PFM_REG_MONITOR, 6, 0x2000000, 0xf01cf, NULL, pfm_mont_pmc_check, {RDEP_MONT_DEAR,0, 0, 0}, {0,0, 0, 0}}, 57 + /* pmc41 */ { PFM_REG_CONFIG, 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mont_pmc_check, {0,0, 0, 0}, {0,0, 0, 0}}, 58 /* pmc42 */ { PFM_REG_MONITOR, 6, 0x0, 0x7ff4f, NULL, pfm_mont_pmc_check, {RDEP_MONT_ETB,0, 0, 0}, {0,0, 0, 0}}, 59 { PFM_REG_END , 0, 0x0, -1, NULL, NULL, {0,}, {0,}}, /* end marker */ 60 }; ··· 185 DPRINT(("cnum=%u val=0x%lx, using_dbreg=%d loaded=%d\n", cnum, tmpval, ctx->ctx_fl_using_dbreg, is_loaded)); 186 187 if (cnum == 41 && is_loaded 188 + && (tmpval & 0x1e00000000000UL) && (tmpval & 0x18181818UL) != 0x18181818UL && ctx->ctx_fl_using_dbreg == 0) { 189 190 DPRINT(("pmc[%d]=0x%lx has active pmc41 settings, clearing dbr\n", cnum, tmpval)); 191
+334
arch/ia64/kernel/relocate_kernel.S
···
··· 1 + /* 2 + * arch/ia64/kernel/relocate_kernel.S 3 + * 4 + * Relocate kexec'able kernel and start it 5 + * 6 + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. 7 + * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com> 8 + * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com> 9 + * 10 + * This source code is licensed under the GNU General Public License, 11 + * Version 2. See the file COPYING for more details. 12 + */ 13 + #include <asm/asmmacro.h> 14 + #include <asm/kregs.h> 15 + #include <asm/page.h> 16 + #include <asm/pgtable.h> 17 + #include <asm/mca_asm.h> 18 + 19 + /* Must be relocatable PIC code callable as a C function 20 + */ 21 + GLOBAL_ENTRY(relocate_new_kernel) 22 + .prologue 23 + alloc r31=ar.pfs,4,0,0,0 24 + .body 25 + .reloc_entry: 26 + { 27 + rsm psr.i| psr.ic 28 + mov r2=ip 29 + } 30 + ;; 31 + { 32 + flushrs // must be first insn in group 33 + srlz.i 34 + } 35 + ;; 36 + dep r2=0,r2,61,3 //to physical address 37 + ;; 38 + //first switch to physical mode 39 + add r3=1f-.reloc_entry, r2 40 + movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC 41 + mov ar.rsc=0 // put RSE in enforced lazy mode 42 + ;; 43 + add sp=(memory_stack_end - 16 - .reloc_entry),r2 44 + add r8=(register_stack - .reloc_entry),r2 45 + ;; 46 + mov r18=ar.rnat 47 + mov ar.bspstore=r8 48 + ;; 49 + mov cr.ipsr=r16 50 + mov cr.iip=r3 51 + mov cr.ifs=r0 52 + srlz.i 53 + ;; 54 + mov ar.rnat=r18 55 + rfi 56 + ;; 57 + 1: 58 + //physical mode code begin 59 + mov b6=in1 60 + dep r28=0,in2,61,3 //to physical address 61 + 62 + // purge all TC entries 63 + #define O(member) IA64_CPUINFO_##member##_OFFSET 64 + GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 65 + ;; 66 + addl r17=O(PTCE_STRIDE),r2 67 + addl r2=O(PTCE_BASE),r2 68 + ;; 69 + ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base 70 + ld4 r19=[r2],4 // r19=ptce_count[0] 71 + ld4 r21=[r17],4 // r21=ptce_stride[0] 72 + ;; 73 + ld4 r20=[r2] // r20=ptce_count[1] 74 + ld4 r22=[r17] // r22=ptce_stride[1] 75 + mov r24=r0 76 + ;; 77 + adds r20=-1,r20 78 + ;; 79 + #undef O 80 + 2: 81 + cmp.ltu p6,p7=r24,r19 82 + (p7) br.cond.dpnt.few 4f 83 + mov ar.lc=r20 84 + 3: 85 + ptc.e r18 86 + ;; 87 + add r18=r22,r18 88 + br.cloop.sptk.few 3b 89 + ;; 90 + add r18=r21,r18 91 + add r24=1,r24 92 + ;; 93 + br.sptk.few 2b 94 + 4: 95 + srlz.i 96 + ;; 97 + //purge TR entry for kernel text and data 98 + movl r16=KERNEL_START 99 + mov r18=KERNEL_TR_PAGE_SHIFT<<2 100 + ;; 101 + ptr.i r16, r18 102 + ptr.d r16, r18 103 + ;; 104 + srlz.i 105 + ;; 106 + 107 + // purge TR entry for percpu data 108 + movl r16=PERCPU_ADDR 109 + mov r18=PERCPU_PAGE_SHIFT<<2 110 + ;; 111 + ptr.d r16,r18 112 + ;; 113 + srlz.d 114 + ;; 115 + 116 + // purge TR entry for pal code 117 + mov r16=in3 118 + mov r18=IA64_GRANULE_SHIFT<<2 119 + ;; 120 + ptr.i r16,r18 121 + ;; 122 + srlz.i 123 + ;; 124 + 125 + // purge TR entry for stack 126 + mov r16=IA64_KR(CURRENT_STACK) 127 + ;; 128 + shl r16=r16,IA64_GRANULE_SHIFT 129 + movl r19=PAGE_OFFSET 130 + ;; 131 + add r16=r19,r16 132 + mov r18=IA64_GRANULE_SHIFT<<2 133 + ;; 134 + ptr.d r16,r18 135 + ;; 136 + srlz.i 137 + ;; 138 + 139 + //copy segments 140 + movl r16=PAGE_MASK 141 + mov r30=in0 // in0 is page_list 142 + br.sptk.few .dest_page 143 + ;; 144 + .loop: 145 + ld8 r30=[in0], 8;; 146 + .dest_page: 147 + tbit.z p0, p6=r30, 0;; // 0x1 dest page 148 + (p6) and r17=r30, r16 149 + (p6) br.cond.sptk.few .loop;; 150 + 151 + tbit.z p0, p6=r30, 1;; // 0x2 indirect page 152 + (p6) and in0=r30, r16 153 + (p6) br.cond.sptk.few .loop;; 154 + 155 + tbit.z p0, p6=r30, 2;; // 0x4 end flag 156 + (p6) br.cond.sptk.few .end_loop;; 157 + 158 + tbit.z p6, p0=r30, 3;; // 0x8 source page 159 + (p6) br.cond.sptk.few .loop 160 + 161 + and r18=r30, r16 162 + 163 + // simple copy page, may optimize later 164 + movl r14=PAGE_SIZE/8 - 1;; 165 + mov ar.lc=r14;; 166 + 1: 167 + ld8 r14=[r18], 8;; 168 + st8 [r17]=r14;; 169 + fc.i r17 170 + add r17=8, r17 171 + br.ctop.sptk.few 1b 172 + br.sptk.few .loop 173 + ;; 174 + 175 + .end_loop: 176 + sync.i // for fc.i 177 + ;; 178 + srlz.i 179 + ;; 180 + srlz.d 181 + ;; 182 + br.call.sptk.many b0=b6;; 183 + 184 + .align 32 185 + memory_stack: 186 + .fill 8192, 1, 0 187 + memory_stack_end: 188 + register_stack: 189 + .fill 8192, 1, 0 190 + register_stack_end: 191 + relocate_new_kernel_end: 192 + END(relocate_new_kernel) 193 + 194 + .global relocate_new_kernel_size 195 + relocate_new_kernel_size: 196 + data8 relocate_new_kernel_end - relocate_new_kernel 197 + 198 + GLOBAL_ENTRY(ia64_dump_cpu_regs) 199 + .prologue 200 + alloc loc0=ar.pfs,1,2,0,0 201 + .body 202 + mov ar.rsc=0 // put RSE in enforced lazy mode 203 + add loc1=4*8, in0 // save r4 and r5 first 204 + ;; 205 + { 206 + flushrs // flush dirty regs to backing store 207 + srlz.i 208 + } 209 + st8 [loc1]=r4, 8 210 + ;; 211 + st8 [loc1]=r5, 8 212 + ;; 213 + add loc1=32*8, in0 214 + mov r4=ar.rnat 215 + ;; 216 + st8 [in0]=r0, 8 // r0 217 + st8 [loc1]=r4, 8 // rnat 218 + mov r5=pr 219 + ;; 220 + st8 [in0]=r1, 8 // r1 221 + st8 [loc1]=r5, 8 // pr 222 + mov r4=b0 223 + ;; 224 + st8 [in0]=r2, 8 // r2 225 + st8 [loc1]=r4, 8 // b0 226 + mov r5=b1; 227 + ;; 228 + st8 [in0]=r3, 24 // r3 229 + st8 [loc1]=r5, 8 // b1 230 + mov r4=b2 231 + ;; 232 + st8 [in0]=r6, 8 // r6 233 + st8 [loc1]=r4, 8 // b2 234 + mov r5=b3 235 + ;; 236 + st8 [in0]=r7, 8 // r7 237 + st8 [loc1]=r5, 8 // b3 238 + mov r4=b4 239 + ;; 240 + st8 [in0]=r8, 8 // r8 241 + st8 [loc1]=r4, 8 // b4 242 + mov r5=b5 243 + ;; 244 + st8 [in0]=r9, 8 // r9 245 + st8 [loc1]=r5, 8 // b5 246 + mov r4=b6 247 + ;; 248 + st8 [in0]=r10, 8 // r10 249 + st8 [loc1]=r5, 8 // b6 250 + mov r5=b7 251 + ;; 252 + st8 [in0]=r11, 8 // r11 253 + st8 [loc1]=r5, 8 // b7 254 + mov r4=b0 255 + ;; 256 + st8 [in0]=r12, 8 // r12 257 + st8 [loc1]=r4, 8 // ip 258 + mov r5=loc0 259 + ;; 260 + st8 [in0]=r13, 8 // r13 261 + extr.u r5=r5, 0, 38 // ar.pfs.pfm 262 + mov r4=r0 // user mask 263 + ;; 264 + st8 [in0]=r14, 8 // r14 265 + st8 [loc1]=r5, 8 // cfm 266 + ;; 267 + st8 [in0]=r15, 8 // r15 268 + st8 [loc1]=r4, 8 // user mask 269 + mov r5=ar.rsc 270 + ;; 271 + st8 [in0]=r16, 8 // r16 272 + st8 [loc1]=r5, 8 // ar.rsc 273 + mov r4=ar.bsp 274 + ;; 275 + st8 [in0]=r17, 8 // r17 276 + st8 [loc1]=r4, 8 // ar.bsp 277 + mov r5=ar.bspstore 278 + ;; 279 + st8 [in0]=r18, 8 // r18 280 + st8 [loc1]=r5, 8 // ar.bspstore 281 + mov r4=ar.rnat 282 + ;; 283 + st8 [in0]=r19, 8 // r19 284 + st8 [loc1]=r4, 8 // ar.rnat 285 + mov r5=ar.ccv 286 + ;; 287 + st8 [in0]=r20, 8 // r20 288 + st8 [loc1]=r5, 8 // ar.ccv 289 + mov r4=ar.unat 290 + ;; 291 + st8 [in0]=r21, 8 // r21 292 + st8 [loc1]=r4, 8 // ar.unat 293 + mov r5 = ar.fpsr 294 + ;; 295 + st8 [in0]=r22, 8 // r22 296 + st8 [loc1]=r5, 8 // ar.fpsr 297 + mov r4 = ar.unat 298 + ;; 299 + st8 [in0]=r23, 8 // r23 300 + st8 [loc1]=r4, 8 // unat 301 + mov r5 = ar.fpsr 302 + ;; 303 + st8 [in0]=r24, 8 // r24 304 + st8 [loc1]=r5, 8 // fpsr 305 + mov r4 = ar.pfs 306 + ;; 307 + st8 [in0]=r25, 8 // r25 308 + st8 [loc1]=r4, 8 // ar.pfs 309 + mov r5 = ar.lc 310 + ;; 311 + st8 [in0]=r26, 8 // r26 312 + st8 [loc1]=r5, 8 // ar.lc 313 + mov r4 = ar.ec 314 + ;; 315 + st8 [in0]=r27, 8 // r27 316 + st8 [loc1]=r4, 8 // ar.ec 317 + mov r5 = ar.csd 318 + ;; 319 + st8 [in0]=r28, 8 // r28 320 + st8 [loc1]=r5, 8 // ar.csd 321 + mov r4 = ar.ssd 322 + ;; 323 + st8 [in0]=r29, 8 // r29 324 + st8 [loc1]=r4, 8 // ar.ssd 325 + ;; 326 + st8 [in0]=r30, 8 // r30 327 + ;; 328 + st8 [in0]=r31, 8 // r31 329 + mov ar.pfs=loc0 330 + ;; 331 + br.ret.sptk.many rp 332 + END(ia64_dump_cpu_regs) 333 + 334 +
+38
arch/ia64/kernel/setup.c
··· 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 47 #include <asm/ia32.h> 48 #include <asm/machvec.h> ··· 254 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 255 n++; 256 257 /* end of memory marker */ 258 rsvd_region[n].start = ~0UL; 259 rsvd_region[n].end = ~0UL; ··· 299 300 sort_regions(rsvd_region, num_rsvd_regions); 301 } 302 303 /** 304 * find_initrd - get initrd parameters from the boot parameter structure
··· 43 #include <linux/initrd.h> 44 #include <linux/pm.h> 45 #include <linux/cpufreq.h> 46 + #include <linux/kexec.h> 47 + #include <linux/crash_dump.h> 48 49 #include <asm/ia32.h> 50 #include <asm/machvec.h> ··· 252 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 253 n++; 254 255 + #ifdef CONFIG_KEXEC 256 + /* crashkernel=size@offset specifies the size to reserve for a crash 257 + * kernel.(offset is ingored for keep compatibility with other archs) 258 + * By reserving this memory we guarantee that linux never set's it 259 + * up as a DMA target.Useful for holding code to do something 260 + * appropriate after a kernel panic. 261 + */ 262 + { 263 + char *from = strstr(saved_command_line, "crashkernel="); 264 + unsigned long base, size; 265 + if (from) { 266 + size = memparse(from + 12, &from); 267 + if (size) { 268 + sort_regions(rsvd_region, n); 269 + base = kdump_find_rsvd_region(size, 270 + rsvd_region, n); 271 + if (base != ~0UL) { 272 + rsvd_region[n].start = 273 + (unsigned long)__va(base); 274 + rsvd_region[n].end = 275 + (unsigned long)__va(base + size); 276 + n++; 277 + crashk_res.start = base; 278 + crashk_res.end = base + size - 1; 279 + } 280 + } 281 + } 282 + efi_memmap_res.start = ia64_boot_param->efi_memmap; 283 + efi_memmap_res.end = efi_memmap_res.start + 284 + ia64_boot_param->efi_memmap_size; 285 + boot_param_res.start = __pa(ia64_boot_param); 286 + boot_param_res.end = boot_param_res.start + 287 + sizeof(*ia64_boot_param); 288 + } 289 + #endif 290 /* end of memory marker */ 291 rsvd_region[n].start = ~0UL; 292 rsvd_region[n].end = ~0UL; ··· 262 263 sort_regions(rsvd_region, num_rsvd_regions); 264 } 265 + 266 267 /** 268 * find_initrd - get initrd parameters from the boot parameter structure
+27 -1
arch/ia64/kernel/smp.c
··· 30 #include <linux/delay.h> 31 #include <linux/efi.h> 32 #include <linux/bitops.h> 33 34 #include <asm/atomic.h> 35 #include <asm/current.h> ··· 67 68 #define IPI_CALL_FUNC 0 69 #define IPI_CPU_STOP 1 70 71 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 72 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; ··· 157 case IPI_CPU_STOP: 158 stop_this_cpu(); 159 break; 160 - 161 default: 162 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); 163 break; ··· 219 send_IPI_single(smp_processor_id(), op); 220 } 221 222 /* 223 * Called with preeemption disabled. 224 */
··· 30 #include <linux/delay.h> 31 #include <linux/efi.h> 32 #include <linux/bitops.h> 33 + #include <linux/kexec.h> 34 35 #include <asm/atomic.h> 36 #include <asm/current.h> ··· 66 67 #define IPI_CALL_FUNC 0 68 #define IPI_CPU_STOP 1 69 + #define IPI_KDUMP_CPU_STOP 3 70 71 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ 72 static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; ··· 155 case IPI_CPU_STOP: 156 stop_this_cpu(); 157 break; 158 + #ifdef CONFIG_CRASH_DUMP 159 + case IPI_KDUMP_CPU_STOP: 160 + unw_init_running(kdump_cpu_freeze, NULL); 161 + break; 162 + #endif 163 default: 164 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); 165 break; ··· 213 send_IPI_single(smp_processor_id(), op); 214 } 215 216 + #ifdef CONFIG_CRASH_DUMP 217 + void 218 + kdump_smp_send_stop() 219 + { 220 + send_IPI_allbutself(IPI_KDUMP_CPU_STOP); 221 + } 222 + 223 + void 224 + kdump_smp_send_init() 225 + { 226 + unsigned int cpu, self_cpu; 227 + self_cpu = smp_processor_id(); 228 + for_each_online_cpu(cpu) { 229 + if (cpu != self_cpu) { 230 + if(kdump_status[cpu] == 0) 231 + platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); 232 + } 233 + } 234 + } 235 + #endif 236 /* 237 * Called with preeemption disabled. 238 */
+55 -3
arch/ia64/lib/ip_fast_csum.S
··· 8 * in0: address of buffer to checksum (char *) 9 * in1: length of the buffer (int) 10 * 11 - * Copyright (C) 2002 Intel Corp. 12 - * Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com> 13 */ 14 15 #include <asm/asmmacro.h> ··· 25 26 #define in0 r32 27 #define in1 r33 28 #define ret0 r8 29 30 GLOBAL_ENTRY(ip_fast_csum) ··· 68 zxt2 r20=r20 69 ;; 70 add r20=ret0,r20 71 ;; 72 - andcm ret0=-1,r20 73 .restore sp // reset frame state 74 br.ret.sptk.many b0 75 ;; ··· 92 mov b0=r34 93 br.ret.sptk.many b0 94 END(ip_fast_csum)
··· 8 * in0: address of buffer to checksum (char *) 9 * in1: length of the buffer (int) 10 * 11 + * Copyright (C) 2002, 2006 Intel Corp. 12 + * Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com> 13 */ 14 15 #include <asm/asmmacro.h> ··· 25 26 #define in0 r32 27 #define in1 r33 28 + #define in2 r34 29 + #define in3 r35 30 + #define in4 r36 31 #define ret0 r8 32 33 GLOBAL_ENTRY(ip_fast_csum) ··· 65 zxt2 r20=r20 66 ;; 67 add r20=ret0,r20 68 + mov r9=0xffff 69 ;; 70 + andcm ret0=r9,r20 71 .restore sp // reset frame state 72 br.ret.sptk.many b0 73 ;; ··· 88 mov b0=r34 89 br.ret.sptk.many b0 90 END(ip_fast_csum) 91 + 92 + GLOBAL_ENTRY(csum_ipv6_magic) 93 + ld4 r20=[in0],4 94 + ld4 r21=[in1],4 95 + dep r15=in3,in2,32,16 96 + ;; 97 + ld4 r22=[in0],4 98 + ld4 r23=[in1],4 99 + mux1 r15=r15,@rev 100 + ;; 101 + ld4 r24=[in0],4 102 + ld4 r25=[in1],4 103 + shr.u r15=r15,16 104 + add r16=r20,r21 105 + add r17=r22,r23 106 + ;; 107 + ld4 r26=[in0],4 108 + ld4 r27=[in1],4 109 + add r18=r24,r25 110 + add r8=r16,r17 111 + ;; 112 + add r19=r26,r27 113 + add r8=r8,r18 114 + ;; 115 + add r8=r8,r19 116 + add r15=r15,in4 117 + ;; 118 + add r8=r8,r15 119 + ;; 120 + shr.u r10=r8,32 // now fold sum into short 121 + zxt4 r11=r8 122 + ;; 123 + add r8=r10,r11 124 + ;; 125 + shr.u r10=r8,16 // yeah, keep it rolling 126 + zxt2 r11=r8 127 + ;; 128 + add r8=r10,r11 129 + ;; 130 + shr.u r10=r8,16 // three times lucky 131 + zxt2 r11=r8 132 + ;; 133 + add r8=r10,r11 134 + mov r9=0xffff 135 + ;; 136 + andcm r8=r9,r8 137 + br.ret.sptk.many b0 138 + END(csum_ipv6_magic)
+1 -2
arch/ia64/pci/pci.c
··· 125 { 126 struct pci_controller *controller; 127 128 - controller = kmalloc(sizeof(*controller), GFP_KERNEL); 129 if (!controller) 130 return NULL; 131 132 - memset(controller, 0, sizeof(*controller)); 133 controller->segment = seg; 134 controller->node = -1; 135 return controller;
··· 125 { 126 struct pci_controller *controller; 127 128 + controller = kzalloc(sizeof(*controller), GFP_KERNEL); 129 if (!controller) 130 return NULL; 131 132 controller->segment = seg; 133 controller->node = -1; 134 return controller;
+15 -3
arch/ia64/sn/kernel/irq.c
··· 117 nasid_t nasid, int slice) 118 { 119 int vector; 120 int cpuphys; 121 int64_t bridge; 122 int local_widget, status; 123 nasid_t local_nasid; ··· 149 vector = sn_irq_info->irq_irq; 150 /* Free the old PROM new_irq_info structure */ 151 sn_intr_free(local_nasid, local_widget, new_irq_info); 152 - /* Update kernels new_irq_info with new target info */ 153 unregister_intr_pda(new_irq_info); 154 155 /* allocate a new PROM new_irq_info struct */ ··· 162 return NULL; 163 } 164 165 - cpuphys = nasid_slice_to_cpuid(nasid, slice); 166 - new_irq_info->irq_cpuid = cpuphys; 167 register_intr_pda(new_irq_info); 168 169 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; ··· 184 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 185 186 #ifdef CONFIG_SMP 187 set_irq_affinity_info((vector & 0xff), cpuphys, 0); 188 #endif 189 ··· 304 nasid_t nasid = sn_irq_info->irq_nasid; 305 int slice = sn_irq_info->irq_slice; 306 int cpu = nasid_slice_to_cpuid(nasid, slice); 307 308 pci_dev_get(pci_dev); 309 sn_irq_info->irq_cpuid = cpu; ··· 319 spin_unlock(&sn_irq_info_lock); 320 321 register_intr_pda(sn_irq_info); 322 } 323 324 void sn_irq_unfixup(struct pci_dev *pci_dev)
··· 117 nasid_t nasid, int slice) 118 { 119 int vector; 120 + int cpuid; 121 + #ifdef CONFIG_SMP 122 int cpuphys; 123 + #endif 124 int64_t bridge; 125 int local_widget, status; 126 nasid_t local_nasid; ··· 146 vector = sn_irq_info->irq_irq; 147 /* Free the old PROM new_irq_info structure */ 148 sn_intr_free(local_nasid, local_widget, new_irq_info); 149 unregister_intr_pda(new_irq_info); 150 151 /* allocate a new PROM new_irq_info struct */ ··· 160 return NULL; 161 } 162 163 + /* Update kernels new_irq_info with new target info */ 164 + cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, 165 + new_irq_info->irq_slice); 166 + new_irq_info->irq_cpuid = cpuid; 167 register_intr_pda(new_irq_info); 168 169 pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; ··· 180 call_rcu(&sn_irq_info->rcu, sn_irq_info_free); 181 182 #ifdef CONFIG_SMP 183 + cpuphys = cpu_physical_id(cpuid); 184 set_irq_affinity_info((vector & 0xff), cpuphys, 0); 185 #endif 186 ··· 299 nasid_t nasid = sn_irq_info->irq_nasid; 300 int slice = sn_irq_info->irq_slice; 301 int cpu = nasid_slice_to_cpuid(nasid, slice); 302 + #ifdef CONFIG_SMP 303 + int cpuphys; 304 + #endif 305 306 pci_dev_get(pci_dev); 307 sn_irq_info->irq_cpuid = cpu; ··· 311 spin_unlock(&sn_irq_info_lock); 312 313 register_intr_pda(sn_irq_info); 314 + #ifdef CONFIG_SMP 315 + cpuphys = cpu_physical_id(cpu); 316 + set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); 317 + #endif 318 } 319 320 void sn_irq_unfixup(struct pci_dev *pci_dev)
-4
arch/ia64/sn/kernel/msi_sn.c
··· 136 */ 137 msg.data = 0x100 + irq; 138 139 - #ifdef CONFIG_SMP 140 - set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0); 141 - #endif 142 - 143 write_msi_msg(irq, &msg); 144 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 145
··· 136 */ 137 msg.data = 0x100 + irq; 138 139 write_msi_msg(irq, &msg); 140 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); 141
+8
arch/ia64/sn/kernel/setup.c
··· 769 return 0; 770 return test_bit(id, sn_prom_features); 771 } 772 EXPORT_SYMBOL(sn_prom_feature_available); 773
··· 769 return 0; 770 return test_bit(id, sn_prom_features); 771 } 772 + 773 + void 774 + sn_kernel_launch_event(void) 775 + { 776 + /* ignore status until we understand possible failure, if any*/ 777 + if (ia64_sn_kernel_launch_event()) 778 + printk(KERN_ERR "KEXEC is not supported in this PROM, Please update the PROM.\n"); 779 + } 780 EXPORT_SYMBOL(sn_prom_feature_available); 781
+6
include/asm-ia64/checksum.h
··· 70 return (__force __sum16)~sum; 71 } 72 73 #endif /* _ASM_IA64_CHECKSUM_H */
··· 70 return (__force __sum16)~sum; 71 } 72 73 + #define _HAVE_ARCH_IPV6_CSUM 1 74 + struct in6_addr; 75 + extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 76 + struct in6_addr *daddr, __u32 len, unsigned short proto, 77 + unsigned int csum); 78 + 79 #endif /* _ASM_IA64_CHECKSUM_H */
+47
include/asm-ia64/kexec.h
···
··· 1 + #ifndef _ASM_IA64_KEXEC_H 2 + #define _ASM_IA64_KEXEC_H 3 + 4 + 5 + /* Maximum physical address we can use pages from */ 6 + #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) 7 + /* Maximum address we can reach in physical address mode */ 8 + #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) 9 + /* Maximum address we can use for the control code buffer */ 10 + #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 11 + 12 + #define KEXEC_CONTROL_CODE_SIZE (8192 + 8192 + 4096) 13 + 14 + /* The native architecture */ 15 + #define KEXEC_ARCH KEXEC_ARCH_IA_64 16 + 17 + #define MAX_NOTE_BYTES 1024 18 + 19 + #define kexec_flush_icache_page(page) do { \ 20 + unsigned long page_addr = (unsigned long)page_address(page); \ 21 + flush_icache_range(page_addr, page_addr + PAGE_SIZE); \ 22 + } while(0) 23 + 24 + extern struct kimage *ia64_kimage; 25 + DECLARE_PER_CPU(u64, ia64_mca_pal_base); 26 + const extern unsigned int relocate_new_kernel_size; 27 + extern void relocate_new_kernel(unsigned long, unsigned long, 28 + struct ia64_boot_param *, unsigned long); 29 + static inline void 30 + crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) 31 + { 32 + } 33 + extern struct resource efi_memmap_res; 34 + extern struct resource boot_param_res; 35 + extern void kdump_smp_send_stop(void); 36 + extern void kdump_smp_send_init(void); 37 + extern void kexec_disable_iosapic(void); 38 + extern void crash_save_this_cpu(void); 39 + struct rsvd_region; 40 + extern unsigned long kdump_find_rsvd_region(unsigned long size, 41 + struct rsvd_region *rsvd_regions, int n); 42 + extern void kdump_cpu_freeze(struct unw_frame_info *info, void *arg); 43 + extern int kdump_status[]; 44 + extern atomic_t kdump_cpu_freezed; 45 + extern atomic_t kdump_in_progress; 46 + 47 + #endif /* _ASM_IA64_KEXEC_H */
+5
include/asm-ia64/machvec.h
··· 37 u8 size); 38 typedef void ia64_mv_migrate_t(struct task_struct * task); 39 typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *); 40 41 /* DMA-mapping interface: */ 42 typedef void ia64_mv_dma_init (void); ··· 219 ia64_mv_setup_msi_irq_t *setup_msi_irq; 220 ia64_mv_teardown_msi_irq_t *teardown_msi_irq; 221 ia64_mv_pci_fixup_bus_t *pci_fixup_bus; 222 } __attribute__((__aligned__(16))); /* align attrib? see above comment */ 223 224 #define MACHVEC_INIT(name) \ ··· 319 #endif 320 #ifndef platform_tlb_migrate_finish 321 # define platform_tlb_migrate_finish machvec_noop_mm 322 #endif 323 #ifndef platform_dma_init 324 # define platform_dma_init swiotlb_init
··· 37 u8 size); 38 typedef void ia64_mv_migrate_t(struct task_struct * task); 39 typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *); 40 + typedef void ia64_mv_kernel_launch_event_t(void); 41 42 /* DMA-mapping interface: */ 43 typedef void ia64_mv_dma_init (void); ··· 218 ia64_mv_setup_msi_irq_t *setup_msi_irq; 219 ia64_mv_teardown_msi_irq_t *teardown_msi_irq; 220 ia64_mv_pci_fixup_bus_t *pci_fixup_bus; 221 + ia64_mv_kernel_launch_event_t *kernel_launch_event; 222 } __attribute__((__aligned__(16))); /* align attrib? see above comment */ 223 224 #define MACHVEC_INIT(name) \ ··· 317 #endif 318 #ifndef platform_tlb_migrate_finish 319 # define platform_tlb_migrate_finish machvec_noop_mm 320 + #endif 321 + #ifndef platform_kernel_launch_event 322 + # define platform_kernel_launch_event machvec_noop 323 #endif 324 #ifndef platform_dma_init 325 # define platform_dma_init swiotlb_init
+2
include/asm-ia64/machvec_sn2.h
··· 67 extern ia64_mv_dma_mapping_error sn_dma_mapping_error; 68 extern ia64_mv_dma_supported sn_dma_supported; 69 extern ia64_mv_migrate_t sn_migrate; 70 extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 71 extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq; 72 extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; ··· 122 #define platform_dma_mapping_error sn_dma_mapping_error 123 #define platform_dma_supported sn_dma_supported 124 #define platform_migrate sn_migrate 125 #ifdef CONFIG_PCI_MSI 126 #define platform_setup_msi_irq sn_setup_msi_irq 127 #define platform_teardown_msi_irq sn_teardown_msi_irq
··· 67 extern ia64_mv_dma_mapping_error sn_dma_mapping_error; 68 extern ia64_mv_dma_supported sn_dma_supported; 69 extern ia64_mv_migrate_t sn_migrate; 70 + extern ia64_mv_kernel_launch_event_t sn_kernel_launch_event; 71 extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; 72 extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq; 73 extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; ··· 121 #define platform_dma_mapping_error sn_dma_mapping_error 122 #define platform_dma_supported sn_dma_supported 123 #define platform_migrate sn_migrate 124 + #define platform_kernel_launch_event sn_kernel_launch_event 125 #ifdef CONFIG_PCI_MSI 126 #define platform_setup_msi_irq sn_setup_msi_irq 127 #define platform_teardown_msi_irq sn_teardown_msi_irq
+2 -1
include/asm-ia64/meminit.h
··· 15 * - initrd (optional) 16 * - command line string 17 * - kernel code & data 18 * - Kernel memory map built from EFI memory map 19 * 20 * More could be added if necessary 21 */ 22 - #define IA64_MAX_RSVD_REGIONS 6 23 24 struct rsvd_region { 25 unsigned long start; /* virtual address of beginning of element */
··· 15 * - initrd (optional) 16 * - command line string 17 * - kernel code & data 18 + * - crash dumping code reserved region 19 * - Kernel memory map built from EFI memory map 20 * 21 * More could be added if necessary 22 */ 23 + #define IA64_MAX_RSVD_REGIONS 7 24 25 struct rsvd_region { 26 unsigned long start; /* virtual address of beginning of element */
+5 -6
include/asm-ia64/page.h
··· 101 102 #ifdef CONFIG_VIRTUAL_MEM_MAP 103 extern int ia64_pfn_valid (unsigned long pfn); 104 - #elif defined(CONFIG_FLATMEM) 105 # define ia64_pfn_valid(pfn) 1 106 #endif 107 ··· 110 #ifdef CONFIG_DISCONTIGMEM 111 # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) 112 # define pfn_to_page(pfn) (vmem_map + (pfn)) 113 #endif 114 - #endif 115 - 116 - #if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM) 117 - /* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */ 118 - #include <asm-generic/memory_model.h> 119 #endif 120 121 #ifdef CONFIG_FLATMEM
··· 101 102 #ifdef CONFIG_VIRTUAL_MEM_MAP 103 extern int ia64_pfn_valid (unsigned long pfn); 104 + #else 105 # define ia64_pfn_valid(pfn) 1 106 #endif 107 ··· 110 #ifdef CONFIG_DISCONTIGMEM 111 # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) 112 # define pfn_to_page(pfn) (vmem_map + (pfn)) 113 + #else 114 + # include <asm-generic/memory_model.h> 115 #endif 116 + #else 117 + # include <asm-generic/memory_model.h> 118 #endif 119 120 #ifdef CONFIG_FLATMEM
+56 -8
include/asm-ia64/pal.h
··· 20 * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added 21 * 00/05/25 eranian Support for stack calls, and static physical calls 22 * 00/06/18 eranian Support for stacked physical calls 23 */ 24 25 /* ··· 71 #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 72 #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ 73 #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ 74 75 #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 76 #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ ··· 83 #define PAL_GET_PSTATE 262 /* get the current P-state */ 84 #define PAL_SET_PSTATE 263 /* set the P-state */ 85 #define PAL_BRAND_INFO 274 /* Processor branding information */ 86 87 #ifndef __ASSEMBLY__ 88 ··· 111 * cache without sideeffects 112 * and "restrict" was 1 113 */ 114 115 /* Processor cache level in the heirarchy */ 116 typedef u64 pal_cache_level_t; ··· 466 * by the processor 467 */ 468 469 - reserved2 : 11, 470 cc : 1, /* Cache check */ 471 tc : 1, /* TLB check */ 472 bc : 1, /* Bus check */ ··· 499 * error occurred 500 */ 501 wiv : 1, /* Way field valid */ 502 - reserved2 : 10, 503 504 index : 20, /* Cache line index */ 505 - reserved3 : 2, 506 507 is : 1, /* instruction set (1 == ia32) */ 508 iv : 1, /* instruction set field valid */ ··· 571 type : 8, /* Bus xaction type*/ 572 sev : 5, /* Bus error severity*/ 573 hier : 2, /* Bus hierarchy level */ 574 - reserved1 : 1, 575 bsi : 8, /* Bus error status 576 * info 577 */ ··· 848 u64 pbf_req_bus_parking : 1; 849 u64 pbf_bus_lock_mask : 1; 850 u64 pbf_enable_half_xfer_rate : 1; 851 - u64 pbf_reserved2 : 22; 852 u64 pbf_disable_xaction_queueing : 1; 853 u64 pbf_disable_resp_err_check : 1; 854 u64 pbf_disable_berr_check : 1; ··· 1093 return iprv.status; 1094 } 1095 1096 /* Make the processor enter HALT or one of the implementation dependent low 1097 * power states where prefetching and execution are suspended and cache and 1098 * TLB coherency is not maintained. ··· 1146 1147 /* Get the current P-state information */ 1148 static inline s64 1149 - ia64_pal_get_pstate (u64 *pstate_index) 1150 { 1151 struct ia64_pal_retval iprv; 1152 - PAL_CALL_STK(iprv, PAL_GET_PSTATE, 0, 0, 0); 1153 *pstate_index = iprv.v0; 1154 return iprv.status; 1155 } ··· 1435 return iprv.status; 1436 } 1437 1438 /* Cause the processor to enter SHUTDOWN state, where prefetching and execution are 1439 * suspended, but cause cache and TLB coherency to be maintained. 1440 * This is usually called in IA-32 mode. ··· 1569 } pal_vm_info_1_s; 1570 } pal_vm_info_1_u_t; 1571 1572 typedef union pal_vm_info_2_u { 1573 u64 pvi2_val; 1574 struct { 1575 u64 impl_va_msb : 8, 1576 rid_size : 8, 1577 - reserved : 48; 1578 } pal_vm_info_2_s; 1579 } pal_vm_info_2_u_t; 1580
··· 20 * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added 21 * 00/05/25 eranian Support for stack calls, and static physical calls 22 * 00/06/18 eranian Support for stacked physical calls 23 + * 06/10/26 rja Support for Intel Itanium Architecture Software Developer's 24 + * Manual Rev 2.2 (Jan 2006) 25 */ 26 27 /* ··· 69 #define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ 70 #define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */ 71 #define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */ 72 + #define PAL_GET_HW_POLICY 48 /* Get current hardware resource sharing policy */ 73 + #define PAL_SET_HW_POLICY 49 /* Set current hardware resource sharing policy */ 74 75 #define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ 76 #define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ ··· 79 #define PAL_GET_PSTATE 262 /* get the current P-state */ 80 #define PAL_SET_PSTATE 263 /* set the P-state */ 81 #define PAL_BRAND_INFO 274 /* Processor branding information */ 82 + 83 + #define PAL_GET_PSTATE_TYPE_LASTSET 0 84 + #define PAL_GET_PSTATE_TYPE_AVGANDRESET 1 85 + #define PAL_GET_PSTATE_TYPE_AVGNORESET 2 86 + #define PAL_GET_PSTATE_TYPE_INSTANT 3 87 88 #ifndef __ASSEMBLY__ 89 ··· 102 * cache without sideeffects 103 * and "restrict" was 1 104 */ 105 + #define PAL_STATUS_REQUIRES_MEMORY (-9) /* Call requires PAL memory buffer */ 106 107 /* Processor cache level in the heirarchy */ 108 typedef u64 pal_cache_level_t; ··· 456 * by the processor 457 */ 458 459 + se : 1, /* Shared error. MCA in a 460 + shared structure */ 461 + reserved2 : 10, 462 cc : 1, /* Cache check */ 463 tc : 1, /* TLB check */ 464 bc : 1, /* Bus check */ ··· 487 * error occurred 488 */ 489 wiv : 1, /* Way field valid */ 490 + reserved2 : 1, 491 + dp : 1, /* Data poisoned on MBE */ 492 + reserved3 : 8, 493 494 index : 20, /* Cache line index */ 495 + reserved4 : 2, 496 497 is : 1, /* instruction set (1 == ia32) */ 498 iv : 1, /* instruction set field valid */ ··· 557 type : 8, /* Bus xaction type*/ 558 sev : 5, /* Bus error severity*/ 559 hier : 2, /* Bus hierarchy level */ 560 + dp : 1, /* Data poisoned on MBE */ 561 bsi : 8, /* Bus error status 562 * info 563 */ ··· 834 u64 pbf_req_bus_parking : 1; 835 u64 pbf_bus_lock_mask : 1; 836 u64 pbf_enable_half_xfer_rate : 1; 837 + u64 pbf_reserved2 : 20; 838 + u64 pbf_enable_shared_line_replace : 1; 839 + u64 pbf_enable_exclusive_line_replace : 1; 840 u64 pbf_disable_xaction_queueing : 1; 841 u64 pbf_disable_resp_err_check : 1; 842 u64 pbf_disable_berr_check : 1; ··· 1077 return iprv.status; 1078 } 1079 1080 + /* 1081 + * Get the current hardware resource sharing policy of the processor 1082 + */ 1083 + static inline s64 1084 + ia64_pal_get_hw_policy (u64 proc_num, u64 *cur_policy, u64 *num_impacted, 1085 + u64 *la) 1086 + { 1087 + struct ia64_pal_retval iprv; 1088 + PAL_CALL(iprv, PAL_GET_HW_POLICY, proc_num, 0, 0); 1089 + if (cur_policy) 1090 + *cur_policy = iprv.v0; 1091 + if (num_impacted) 1092 + *num_impacted = iprv.v1; 1093 + if (la) 1094 + *la = iprv.v2; 1095 + return iprv.status; 1096 + } 1097 + 1098 /* Make the processor enter HALT or one of the implementation dependent low 1099 * power states where prefetching and execution are suspended and cache and 1100 * TLB coherency is not maintained. ··· 1112 1113 /* Get the current P-state information */ 1114 static inline s64 1115 + ia64_pal_get_pstate (u64 *pstate_index, unsigned long type) 1116 { 1117 struct ia64_pal_retval iprv; 1118 + PAL_CALL_STK(iprv, PAL_GET_PSTATE, type, 0, 0); 1119 *pstate_index = iprv.v0; 1120 return iprv.status; 1121 } ··· 1401 return iprv.status; 1402 } 1403 1404 + /* 1405 + * Set the current hardware resource sharing policy of the processor 1406 + */ 1407 + static inline s64 1408 + ia64_pal_set_hw_policy (u64 policy) 1409 + { 1410 + struct ia64_pal_retval iprv; 1411 + PAL_CALL(iprv, PAL_SET_HW_POLICY, policy, 0, 0); 1412 + return iprv.status; 1413 + } 1414 + 1415 /* Cause the processor to enter SHUTDOWN state, where prefetching and execution are 1416 * suspended, but cause cache and TLB coherency to be maintained. 1417 * This is usually called in IA-32 mode. ··· 1524 } pal_vm_info_1_s; 1525 } pal_vm_info_1_u_t; 1526 1527 + #define PAL_MAX_PURGES 0xFFFF /* all ones is means unlimited */ 1528 + 1529 typedef union pal_vm_info_2_u { 1530 u64 pvi2_val; 1531 struct { 1532 u64 impl_va_msb : 8, 1533 rid_size : 8, 1534 + max_purges : 16, 1535 + reserved : 32; 1536 } pal_vm_info_2_s; 1537 } pal_vm_info_2_u_t; 1538
+9
include/asm-ia64/sn/sn_sal.h
··· 88 #define SN_SAL_INJECT_ERROR 0x02000067 89 #define SN_SAL_SET_CPU_NUMBER 0x02000068 90 91 /* 92 * Service-specific constants 93 */ ··· 1155 struct ia64_sal_retval rv; 1156 1157 SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0); 1158 return rv.status; 1159 } 1160 #endif /* _ASM_IA64_SN_SN_SAL_H */
··· 88 #define SN_SAL_INJECT_ERROR 0x02000067 89 #define SN_SAL_SET_CPU_NUMBER 0x02000068 90 91 + #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069 92 + 93 /* 94 * Service-specific constants 95 */ ··· 1153 struct ia64_sal_retval rv; 1154 1155 SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0); 1156 + return rv.status; 1157 + } 1158 + static inline int 1159 + ia64_sn_kernel_launch_event(void) 1160 + { 1161 + struct ia64_sal_retval rv; 1162 + SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0); 1163 return rv.status; 1164 } 1165 #endif /* _ASM_IA64_SN_SN_SAL_H */
+5
include/linux/kexec.h
··· 109 extern struct kimage *kexec_image; 110 extern struct kimage *kexec_crash_image; 111 112 #define KEXEC_ON_CRASH 0x00000001 113 #define KEXEC_ARCH_MASK 0xffff0000 114 ··· 137 extern struct resource crashk_res; 138 typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; 139 extern note_buf_t *crash_notes; 140 141 #else /* !CONFIG_KEXEC */ 142 struct pt_regs;
··· 109 extern struct kimage *kexec_image; 110 extern struct kimage *kexec_crash_image; 111 112 + #ifndef kexec_flush_icache_page 113 + #define kexec_flush_icache_page(page) 114 + #endif 115 + 116 #define KEXEC_ON_CRASH 0x00000001 117 #define KEXEC_ARCH_MASK 0xffff0000 118 ··· 133 extern struct resource crashk_res; 134 typedef u32 note_buf_t[MAX_NOTE_BYTES/4]; 135 extern note_buf_t *crash_notes; 136 + 137 138 #else /* !CONFIG_KEXEC */ 139 struct pt_regs;
+1
kernel/kexec.c
··· 852 memset(ptr + uchunk, 0, mchunk - uchunk); 853 } 854 result = copy_from_user(ptr, buf, uchunk); 855 kunmap(page); 856 if (result) { 857 result = (result < 0) ? result : -EIO;
··· 852 memset(ptr + uchunk, 0, mchunk - uchunk); 853 } 854 result = copy_from_user(ptr, buf, uchunk); 855 + kexec_flush_icache_page(page); 856 kunmap(page); 857 if (result) { 858 result = (result < 0) ? result : -EIO;