Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (29 commits)
[IA64] BUG to BUG_ON changes
[IA64] Fix typo/thinko in arch/ia64/sn/kernel/sn2/sn2_smp.c
ia64: remove some warnings.
ia64/xen: fix the link error.
ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.
ia64/pv_ops/binary patch: define paravirt_dv_serialize_data() and suppress false positive warning.
ia64/pv_ops/bp/module: support binary patching for kernel module.
ia64/pv_ops: implement binary patching optimization for native.
ia64/pv_op/binarypatch: add helper functions to support binary patching for paravirt_ops.
ia64/pv_ops/xen/gate.S: xen gate page paravirtualization
ia64/pv_ops: paravirtualize gate.S.
ia64/pv_ops: move down __kernel_syscall_via_epc.
ia64/pv_ops/xen: define xen specific gate page.
ia64/pv_ops: gate page paravirtualization.
ia64/pv_ops/xen/pv_time_ops: implement sched_clock.
ia64/pv_ops/pv_time_ops: add sched_clock hook.
ia64/pv_ops/xen: paravirtualize read/write ar.itc and ar.itm
ia64/pv_ops: paravirtualize mov = ar.itc.
ia64/pv_ops/pvchecker: support mov = ar.itc paravirtualization
ia64/pv_ops: paravirtualize fsys.S.
...

+3188 -290
+5 -1
arch/ia64/include/asm/intrinsics.h
··· 202 202 203 203 #ifndef __ASSEMBLY__ 204 204 #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) 205 - #define IA64_INTRINSIC_API(name) pv_cpu_ops.name 205 + #ifdef ASM_SUPPORTED 206 + # define IA64_INTRINSIC_API(name) paravirt_ ## name 207 + #else 208 + # define IA64_INTRINSIC_API(name) pv_cpu_ops.name 209 + #endif 206 210 #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name 207 211 #else 208 212 #define IA64_INTRINSIC_API(name) ia64_native_ ## name
+3 -3
arch/ia64/include/asm/mmu_context.h
··· 87 87 /* re-check, now that we've got the lock: */ 88 88 context = mm->context; 89 89 if (context == 0) { 90 - cpus_clear(mm->cpu_vm_mask); 90 + cpumask_clear(mm_cpumask(mm)); 91 91 if (ia64_ctx.next >= ia64_ctx.limit) { 92 92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 93 93 ia64_ctx.max_ctx, ia64_ctx.next); ··· 166 166 167 167 do { 168 168 context = get_mmu_context(mm); 169 - if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 170 - cpu_set(smp_processor_id(), mm->cpu_vm_mask); 169 + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 170 + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 171 171 reload_context(context); 172 172 /* 173 173 * in the unlikely event of a TLB-flush by another thread,
+6
arch/ia64/include/asm/module.h
··· 16 16 struct elf64_shdr *got; /* global offset table */ 17 17 struct elf64_shdr *opd; /* official procedure descriptors */ 18 18 struct elf64_shdr *unwind; /* unwind-table section */ 19 + #ifdef CONFIG_PARAVIRT 20 + struct elf64_shdr *paravirt_bundles; 21 + /* paravirt_alt_bundle_patch table */ 22 + struct elf64_shdr *paravirt_insts; 23 + /* paravirt_alt_inst_patch table */ 24 + #endif 19 25 unsigned long gp; /* global-pointer for module */ 20 26 21 27 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
+13
arch/ia64/include/asm/native/inst.h
··· 30 30 #define __paravirt_work_processed_syscall_target \ 31 31 ia64_work_processed_syscall 32 32 33 + #define paravirt_fsyscall_table ia64_native_fsyscall_table 34 + #define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down 35 + 33 36 #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK 34 37 # define PARAVIRT_POISON 0xdeadbeefbaadf00d 35 38 # define CLOBBER(clob) \ ··· 76 73 #define MOV_FROM_PSR(pred, reg, clob) \ 77 74 (pred) mov reg = psr \ 78 75 CLOBBER(clob) 76 + 77 + #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ 78 + (pred) mov reg = ar.itc \ 79 + CLOBBER(clob) \ 80 + CLOBBER_PRED(pred_clob) 79 81 80 82 #define MOV_TO_IFA(reg, clob) \ 81 83 mov cr.ifa = reg \ ··· 165 157 166 158 #define RSM_PSR_DT \ 167 159 rsm psr.dt 160 + 161 + #define RSM_PSR_BE_I(clob0, clob1) \ 162 + rsm psr.be | psr.i \ 163 + CLOBBER(clob0) \ 164 + CLOBBER(clob1) 168 165 169 166 #define SSM_PSR_DT_AND_SRLZ_I \ 170 167 ssm psr.dt \
+38
arch/ia64/include/asm/native/patchlist.h
··· 1 + /****************************************************************************** 2 + * arch/ia64/include/asm/native/inst.h 3 + * 4 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 + * VA Linux Systems Japan K.K. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + * 21 + */ 22 + 23 + #define __paravirt_start_gate_fsyscall_patchlist \ 24 + __ia64_native_start_gate_fsyscall_patchlist 25 + #define __paravirt_end_gate_fsyscall_patchlist \ 26 + __ia64_native_end_gate_fsyscall_patchlist 27 + #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ 28 + __ia64_native_start_gate_brl_fsys_bubble_down_patchlist 29 + #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ 30 + __ia64_native_end_gate_brl_fsys_bubble_down_patchlist 31 + #define __paravirt_start_gate_vtop_patchlist \ 32 + __ia64_native_start_gate_vtop_patchlist 33 + #define __paravirt_end_gate_vtop_patchlist \ 34 + __ia64_native_end_gate_vtop_patchlist 35 + #define __paravirt_start_gate_mckinley_e9_patchlist \ 36 + __ia64_native_start_gate_mckinley_e9_patchlist 37 + #define __paravirt_end_gate_mckinley_e9_patchlist \ 38 + __ia64_native_end_gate_mckinley_e9_patchlist
+8
arch/ia64/include/asm/native/pvchk_inst.h
··· 180 180 IS_PRED_IN(pred) \ 181 181 IS_RREG_OUT(reg) \ 182 182 IS_RREG_CLOB(clob) 183 + #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ 184 + IS_PRED_IN(pred) \ 185 + IS_PRED_CLOB(pred_clob) \ 186 + IS_RREG_OUT(reg) \ 187 + IS_RREG_CLOB(clob) 183 188 #define MOV_TO_IFA(reg, clob) \ 184 189 IS_RREG_IN(reg) \ 185 190 IS_RREG_CLOB(clob) ··· 251 246 IS_RREG_CLOB(clob2) 252 247 #define RSM_PSR_DT \ 253 248 nop 0 249 + #define RSM_PSR_BE_I(clob0, clob1) \ 250 + IS_RREG_CLOB(clob0) \ 251 + IS_RREG_CLOB(clob1) 254 252 #define SSM_PSR_DT_AND_SRLZ_I \ 255 253 nop 0 256 254 #define BSW_0(clob0, clob1, clob2) \
+65
arch/ia64/include/asm/paravirt.h
··· 22 22 #ifndef __ASM_PARAVIRT_H 23 23 #define __ASM_PARAVIRT_H 24 24 25 + #ifndef __ASSEMBLY__ 26 + /****************************************************************************** 27 + * fsys related addresses 28 + */ 29 + struct pv_fsys_data { 30 + unsigned long *fsyscall_table; 31 + void *fsys_bubble_down; 32 + }; 33 + 34 + extern struct pv_fsys_data pv_fsys_data; 35 + 36 + unsigned long *paravirt_get_fsyscall_table(void); 37 + char *paravirt_get_fsys_bubble_down(void); 38 + 39 + /****************************************************************************** 40 + * patchlist addresses for gate page 41 + */ 42 + enum pv_gate_patchlist { 43 + PV_GATE_START_FSYSCALL, 44 + PV_GATE_END_FSYSCALL, 45 + 46 + PV_GATE_START_BRL_FSYS_BUBBLE_DOWN, 47 + PV_GATE_END_BRL_FSYS_BUBBLE_DOWN, 48 + 49 + PV_GATE_START_VTOP, 50 + PV_GATE_END_VTOP, 51 + 52 + PV_GATE_START_MCKINLEY_E9, 53 + PV_GATE_END_MCKINLEY_E9, 54 + }; 55 + 56 + struct pv_patchdata { 57 + unsigned long start_fsyscall_patchlist; 58 + unsigned long end_fsyscall_patchlist; 59 + unsigned long start_brl_fsys_bubble_down_patchlist; 60 + unsigned long end_brl_fsys_bubble_down_patchlist; 61 + unsigned long start_vtop_patchlist; 62 + unsigned long end_vtop_patchlist; 63 + unsigned long start_mckinley_e9_patchlist; 64 + unsigned long end_mckinley_e9_patchlist; 65 + 66 + void *gate_section; 67 + }; 68 + 69 + extern struct pv_patchdata pv_patchdata; 70 + 71 + unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type); 72 + void *paravirt_get_gate_section(void); 73 + #endif 74 + 25 75 #ifdef CONFIG_PARAVIRT_GUEST 26 76 27 77 #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 ··· 118 68 int (*arch_setup_nomca)(void); 119 69 120 70 void (*post_smp_prepare_boot_cpu)(void); 71 + 72 + #ifdef ASM_SUPPORTED 73 + unsigned long (*patch_bundle)(void *sbundle, void *ebundle, 74 + unsigned long type); 75 + unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, 76 + unsigned long type); 77 + #endif 78 + void (*patch_branch)(unsigned long tag, unsigned long type); 121 79 }; 122 80 123 81 extern struct pv_init_ops pv_init_ops; ··· 268 210 int (*do_steal_accounting)(unsigned long *new_itm); 269 211 270 212 void (*clocksource_resume)(void); 213 + 214 + unsigned long long (*sched_clock)(void); 271 215 }; 272 216 273 217 extern struct pv_time_ops pv_time_ops; ··· 285 225 paravirt_do_steal_accounting(unsigned long *new_itm) 286 226 { 287 227 return pv_time_ops.do_steal_accounting(new_itm); 228 + } 229 + 230 + static inline unsigned long long paravirt_sched_clock(void) 231 + { 232 + return pv_time_ops.sched_clock(); 288 233 } 289 234 290 235 #endif /* !__ASSEMBLY__ */
+143
arch/ia64/include/asm/paravirt_patch.h
··· 1 + /****************************************************************************** 2 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 3 + * VA Linux Systems Japan K.K. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; either version 2 of the License, or 8 + * (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + * 19 + */ 20 + 21 + #ifndef __ASM_PARAVIRT_PATCH_H 22 + #define __ASM_PARAVIRT_PATCH_H 23 + 24 + #ifdef __ASSEMBLY__ 25 + 26 + .section .paravirt_branches, "a" 27 + .previous 28 + #define PARAVIRT_PATCH_SITE_BR(type) \ 29 + { \ 30 + [1:] ; \ 31 + br.cond.sptk.many 2f ; \ 32 + nop.b 0 ; \ 33 + nop.b 0;; ; \ 34 + } ; \ 35 + 2: \ 36 + .xdata8 ".paravirt_branches", 1b, type 37 + 38 + #else 39 + 40 + #include <linux/stringify.h> 41 + #include <asm/intrinsics.h> 42 + 43 + /* for binary patch */ 44 + struct paravirt_patch_site_bundle { 45 + void *sbundle; 46 + void *ebundle; 47 + unsigned long type; 48 + }; 49 + 50 + /* label means the beginning of new bundle */ 51 + #define paravirt_alt_bundle(instr, privop) \ 52 + "\t998:\n" \ 53 + "\t" instr "\n" \ 54 + "\t999:\n" \ 55 + "\t.pushsection .paravirt_bundles, \"a\"\n" \ 56 + "\t.popsection\n" \ 57 + "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \ 58 + __stringify(privop) "\n" 59 + 60 + 61 + struct paravirt_patch_bundle_elem { 62 + const void *sbundle; 63 + const void *ebundle; 64 + unsigned long type; 65 + }; 66 + 67 + 68 + struct paravirt_patch_site_inst { 69 + unsigned long stag; 70 + unsigned long etag; 71 + unsigned long type; 72 + }; 73 + 74 + #define paravirt_alt_inst(instr, privop) \ 75 + "\t[998:]\n" \ 76 + "\t" instr "\n" \ 77 + "\t[999:]\n" \ 78 + "\t.pushsection .paravirt_insts, \"a\"\n" \ 79 + "\t.popsection\n" \ 80 + "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \ 81 + __stringify(privop) "\n" 82 + 83 + struct paravirt_patch_site_branch { 84 + unsigned long tag; 85 + unsigned long type; 86 + }; 87 + 88 + struct paravirt_patch_branch_target { 89 + const void *entry; 90 + unsigned long type; 91 + }; 92 + 93 + void 94 + __paravirt_patch_apply_branch( 95 + unsigned long tag, unsigned long type, 96 + const struct paravirt_patch_branch_target *entries, 97 + unsigned int nr_entries); 98 + 99 + void 100 + paravirt_patch_reloc_br(unsigned long tag, const void *target); 101 + 102 + void 103 + paravirt_patch_reloc_brl(unsigned long tag, const void *target); 104 + 105 + 106 + #if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT) 107 + unsigned long 108 + ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type); 109 + 110 + unsigned long 111 + __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, 112 + const struct paravirt_patch_bundle_elem *elems, 113 + unsigned long nelems, 114 + const struct paravirt_patch_bundle_elem **found); 115 + 116 + void 117 + paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, 118 + const struct paravirt_patch_site_bundle *end); 119 + 120 + void 121 + paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, 122 + const struct paravirt_patch_site_inst *end); 123 + 124 + void paravirt_patch_apply(void); 125 + #else 126 + #define paravirt_patch_apply_bundle(start, end) do { } while (0) 127 + #define paravirt_patch_apply_inst(start, end) do { } while (0) 128 + #define paravirt_patch_apply() do { } while (0) 129 + #endif 130 + 131 + #endif /* !__ASSEMBLEY__ */ 132 + 133 + #endif /* __ASM_PARAVIRT_PATCH_H */ 134 + 135 + /* 136 + * Local variables: 137 + * mode: C 138 + * c-set-style: "linux" 139 + * c-basic-offset: 8 140 + * tab-width: 8 141 + * indent-tabs-mode: t 142 + * End: 143 + */
+361 -4
arch/ia64/include/asm/paravirt_privop.h
··· 33 33 */ 34 34 35 35 struct pv_cpu_ops { 36 - void (*fc)(unsigned long addr); 36 + void (*fc)(void *addr); 37 37 unsigned long (*thash)(unsigned long addr); 38 38 unsigned long (*get_cpuid)(int index); 39 39 unsigned long (*get_pmd)(int index); ··· 60 60 /* Instructions paravirtualized for performance */ 61 61 /************************************************/ 62 62 63 + #ifndef ASM_SUPPORTED 64 + #define paravirt_ssm_i() pv_cpu_ops.ssm_i() 65 + #define paravirt_rsm_i() pv_cpu_ops.rsm_i() 66 + #define __paravirt_getreg() pv_cpu_ops.getreg() 67 + #endif 68 + 63 69 /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). 64 70 * static inline function doesn't satisfy it. */ 65 71 #define paravirt_ssm(mask) \ 66 72 do { \ 67 73 if ((mask) == IA64_PSR_I) \ 68 - pv_cpu_ops.ssm_i(); \ 74 + paravirt_ssm_i(); \ 69 75 else \ 70 76 ia64_native_ssm(mask); \ 71 77 } while (0) ··· 79 73 #define paravirt_rsm(mask) \ 80 74 do { \ 81 75 if ((mask) == IA64_PSR_I) \ 82 - pv_cpu_ops.rsm_i(); \ 76 + paravirt_rsm_i(); \ 83 77 else \ 84 78 ia64_native_rsm(mask); \ 85 79 } while (0) ··· 92 86 if ((reg) == _IA64_REG_IP) \ 93 87 res = ia64_native_getreg(_IA64_REG_IP); \ 94 88 else \ 95 - res = pv_cpu_ops.getreg(reg); \ 89 + res = __paravirt_getreg(reg); \ 96 90 res; \ 97 91 }) 98 92 ··· 118 112 119 113 #endif /* CONFIG_PARAVIRT */ 120 114 115 + #if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED) 116 + #define paravirt_dv_serialize_data() ia64_dv_serialize_data() 117 + #else 118 + #define paravirt_dv_serialize_data() /* nothing */ 119 + #endif 120 + 121 121 /* these routines utilize privilege-sensitive or performance-sensitive 122 122 * privileged instructions so the code must be replaced with 123 123 * paravirtualized versions */ ··· 132 120 #define ia64_work_processed_syscall \ 133 121 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) 134 122 #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) 123 + 124 + 125 + #if defined(CONFIG_PARAVIRT) 126 + /****************************************************************************** 127 + * binary patching infrastructure 128 + */ 129 + #define PARAVIRT_PATCH_TYPE_FC 1 130 + #define PARAVIRT_PATCH_TYPE_THASH 2 131 + #define PARAVIRT_PATCH_TYPE_GET_CPUID 3 132 + #define PARAVIRT_PATCH_TYPE_GET_PMD 4 133 + #define PARAVIRT_PATCH_TYPE_PTCGA 5 134 + #define PARAVIRT_PATCH_TYPE_GET_RR 6 135 + #define PARAVIRT_PATCH_TYPE_SET_RR 7 136 + #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 137 + #define PARAVIRT_PATCH_TYPE_SSM_I 9 138 + #define PARAVIRT_PATCH_TYPE_RSM_I 10 139 + #define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 140 + #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 141 + 142 + /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ 143 + #define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 144 + #define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 145 + 146 + /* 147 + * struct task_struct* (*ia64_switch_to)(void* next_task); 148 + * void *ia64_leave_syscall; 149 + * void *ia64_work_processed_syscall 150 + * void *ia64_leave_kernel; 151 + */ 152 + 153 + #define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 154 + #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ 155 + (PARAVIRT_PATCH_TYPE_BR_START + 0) 156 + #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ 157 + (PARAVIRT_PATCH_TYPE_BR_START + 1) 158 + #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ 159 + (PARAVIRT_PATCH_TYPE_BR_START + 2) 160 + #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ 161 + (PARAVIRT_PATCH_TYPE_BR_START + 3) 162 + 163 + #ifdef ASM_SUPPORTED 164 + #include <asm/paravirt_patch.h> 165 + 166 + /* 167 + * pv_cpu_ops calling stub. 168 + * normal function call convension can't be written by gcc 169 + * inline assembly. 170 + * 171 + * from the caller's point of view, 172 + * the following registers will be clobbered. 173 + * r2, r3 174 + * r8-r15 175 + * r16, r17 176 + * b6, b7 177 + * p6-p15 178 + * ar.ccv 179 + * 180 + * from the callee's point of view , 181 + * the following registers can be used. 182 + * r2, r3: scratch 183 + * r8: scratch, input argument0 and return value 184 + * r0-r15: scratch, input argument1-5 185 + * b6: return pointer 186 + * b7: scratch 187 + * p6-p15: scratch 188 + * ar.ccv: scratch 189 + * 190 + * other registers must not be changed. especially 191 + * b0: rp: preserved. gcc ignores b0 in clobbered register. 192 + * r16: saved gp 193 + */ 194 + /* 5 bundles */ 195 + #define __PARAVIRT_BR \ 196 + ";;\n" \ 197 + "{ .mlx\n" \ 198 + "nop 0\n" \ 199 + "movl r2 = %[op_addr]\n"/* get function pointer address */ \ 200 + ";;\n" \ 201 + "}\n" \ 202 + "1:\n" \ 203 + "{ .mii\n" \ 204 + "ld8 r2 = [r2]\n" /* load function descriptor address */ \ 205 + "mov r17 = ip\n" /* get ip to calc return address */ \ 206 + "mov r16 = gp\n" /* save gp */ \ 207 + ";;\n" \ 208 + "}\n" \ 209 + "{ .mii\n" \ 210 + "ld8 r3 = [r2], 8\n" /* load entry address */ \ 211 + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ 212 + ";;\n" \ 213 + "mov b7 = r3\n" /* set entry address */ \ 214 + "}\n" \ 215 + "{ .mib\n" \ 216 + "ld8 gp = [r2]\n" /* load gp value */ \ 217 + "mov b6 = r17\n" /* set return address */ \ 218 + "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ 219 + "}\n" \ 220 + "1:\n" \ 221 + "{ .mii\n" \ 222 + "mov gp = r16\n" /* restore gp value */ \ 223 + "nop 0\n" \ 224 + "nop 0\n" \ 225 + ";;\n" \ 226 + "}\n" 227 + 228 + #define PARAVIRT_OP(op) \ 229 + [op_addr] "i"(&pv_cpu_ops.op) 230 + 231 + #define PARAVIRT_TYPE(type) \ 232 + PARAVIRT_PATCH_TYPE_ ## type 233 + 234 + #define PARAVIRT_REG_CLOBBERS0 \ 235 + "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ 236 + "r15", "r16", "r17" 237 + 238 + #define PARAVIRT_REG_CLOBBERS1 \ 239 + "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ 240 + "r15", "r16", "r17" 241 + 242 + #define PARAVIRT_REG_CLOBBERS2 \ 243 + "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ 244 + "r15", "r16", "r17" 245 + 246 + #define PARAVIRT_REG_CLOBBERS5 \ 247 + "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ 248 + "r15", "r16", "r17" 249 + 250 + #define PARAVIRT_BR_CLOBBERS \ 251 + "b6", "b7" 252 + 253 + #define PARAVIRT_PR_CLOBBERS \ 254 + "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" 255 + 256 + #define PARAVIRT_AR_CLOBBERS \ 257 + "ar.ccv" 258 + 259 + #define PARAVIRT_CLOBBERS0 \ 260 + PARAVIRT_REG_CLOBBERS0, \ 261 + PARAVIRT_BR_CLOBBERS, \ 262 + PARAVIRT_PR_CLOBBERS, \ 263 + PARAVIRT_AR_CLOBBERS, \ 264 + "memory" 265 + 266 + #define PARAVIRT_CLOBBERS1 \ 267 + PARAVIRT_REG_CLOBBERS1, \ 268 + PARAVIRT_BR_CLOBBERS, \ 269 + PARAVIRT_PR_CLOBBERS, \ 270 + PARAVIRT_AR_CLOBBERS, \ 271 + "memory" 272 + 273 + #define PARAVIRT_CLOBBERS2 \ 274 + PARAVIRT_REG_CLOBBERS2, \ 275 + PARAVIRT_BR_CLOBBERS, \ 276 + PARAVIRT_PR_CLOBBERS, \ 277 + PARAVIRT_AR_CLOBBERS, \ 278 + "memory" 279 + 280 + #define PARAVIRT_CLOBBERS5 \ 281 + PARAVIRT_REG_CLOBBERS5, \ 282 + PARAVIRT_BR_CLOBBERS, \ 283 + PARAVIRT_PR_CLOBBERS, \ 284 + PARAVIRT_AR_CLOBBERS, \ 285 + "memory" 286 + 287 + #define PARAVIRT_BR0(op, type) \ 288 + register unsigned long ia64_clobber asm ("r8"); \ 289 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 290 + PARAVIRT_TYPE(type)) \ 291 + : "=r"(ia64_clobber) \ 292 + : PARAVIRT_OP(op) \ 293 + : PARAVIRT_CLOBBERS0) 294 + 295 + #define PARAVIRT_BR0_RET(op, type) \ 296 + register unsigned long ia64_intri_res asm ("r8"); \ 297 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 298 + PARAVIRT_TYPE(type)) \ 299 + : "=r"(ia64_intri_res) \ 300 + : PARAVIRT_OP(op) \ 301 + : PARAVIRT_CLOBBERS0) 302 + 303 + #define PARAVIRT_BR1(op, type, arg1) \ 304 + register unsigned long __##arg1 asm ("r8") = arg1; \ 305 + register unsigned long ia64_clobber asm ("r8"); \ 306 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 307 + PARAVIRT_TYPE(type)) \ 308 + : "=r"(ia64_clobber) \ 309 + : PARAVIRT_OP(op), "0"(__##arg1) \ 310 + : PARAVIRT_CLOBBERS1) 311 + 312 + #define PARAVIRT_BR1_RET(op, type, arg1) \ 313 + register unsigned long ia64_intri_res asm ("r8"); \ 314 + register unsigned long __##arg1 asm ("r8") = arg1; \ 315 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 316 + PARAVIRT_TYPE(type)) \ 317 + : "=r"(ia64_intri_res) \ 318 + : PARAVIRT_OP(op), "0"(__##arg1) \ 319 + : PARAVIRT_CLOBBERS1) 320 + 321 + #define PARAVIRT_BR1_VOID(op, type, arg1) \ 322 + register void *__##arg1 asm ("r8") = arg1; \ 323 + register unsigned long ia64_clobber asm ("r8"); \ 324 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 325 + PARAVIRT_TYPE(type)) \ 326 + : "=r"(ia64_clobber) \ 327 + : PARAVIRT_OP(op), "0"(__##arg1) \ 328 + : PARAVIRT_CLOBBERS1) 329 + 330 + #define PARAVIRT_BR2(op, type, arg1, arg2) \ 331 + register unsigned long __##arg1 asm ("r8") = arg1; \ 332 + register unsigned long __##arg2 asm ("r9") = arg2; \ 333 + register unsigned long ia64_clobber1 asm ("r8"); \ 334 + register unsigned long ia64_clobber2 asm ("r9"); \ 335 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 336 + PARAVIRT_TYPE(type)) \ 337 + : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ 338 + : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ 339 + : PARAVIRT_CLOBBERS2) 340 + 341 + 342 + #define PARAVIRT_DEFINE_CPU_OP0(op, type) \ 343 + static inline void \ 344 + paravirt_ ## op (void) \ 345 + { \ 346 + PARAVIRT_BR0(op, type); \ 347 + } 348 + 349 + #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ 350 + static inline unsigned long \ 351 + paravirt_ ## op (void) \ 352 + { \ 353 + PARAVIRT_BR0_RET(op, type); \ 354 + return ia64_intri_res; \ 355 + } 356 + 357 + #define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \ 358 + static inline void \ 359 + paravirt_ ## op (void *arg1) \ 360 + { \ 361 + PARAVIRT_BR1_VOID(op, type, arg1); \ 362 + } 363 + 364 + #define PARAVIRT_DEFINE_CPU_OP1(op, type) \ 365 + static inline void \ 366 + paravirt_ ## op (unsigned long arg1) \ 367 + { \ 368 + PARAVIRT_BR1(op, type, arg1); \ 369 + } 370 + 371 + #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ 372 + static inline unsigned long \ 373 + paravirt_ ## op (unsigned long arg1) \ 374 + { \ 375 + PARAVIRT_BR1_RET(op, type, arg1); \ 376 + return ia64_intri_res; \ 377 + } 378 + 379 + #define PARAVIRT_DEFINE_CPU_OP2(op, type) \ 380 + static inline void \ 381 + paravirt_ ## op (unsigned long arg1, \ 382 + unsigned long arg2) \ 383 + { \ 384 + PARAVIRT_BR2(op, type, arg1, arg2); \ 385 + } 386 + 387 + 388 + PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC); 389 + PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) 390 + PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) 391 + PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) 392 + PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) 393 + PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) 394 + PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) 395 + PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) 396 + PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) 397 + PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) 398 + PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) 399 + 400 + static inline void 401 + paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, 402 + unsigned long val2, unsigned long val3, 403 + unsigned long val4) 404 + { 405 + register unsigned long __val0 asm ("r8") = val0; 406 + register unsigned long __val1 asm ("r9") = val1; 407 + register unsigned long __val2 asm ("r10") = val2; 408 + register unsigned long __val3 asm ("r11") = val3; 409 + register unsigned long __val4 asm ("r14") = val4; 410 + 411 + register unsigned long ia64_clobber0 asm ("r8"); 412 + register unsigned long ia64_clobber1 asm ("r9"); 413 + register unsigned long ia64_clobber2 asm ("r10"); 414 + register unsigned long ia64_clobber3 asm ("r11"); 415 + register unsigned long ia64_clobber4 asm ("r14"); 416 + 417 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, 418 + PARAVIRT_TYPE(SET_RR0_TO_RR4)) 419 + : "=r"(ia64_clobber0), 420 + "=r"(ia64_clobber1), 421 + "=r"(ia64_clobber2), 422 + "=r"(ia64_clobber3), 423 + "=r"(ia64_clobber4) 424 + : PARAVIRT_OP(set_rr0_to_rr4), 425 + "0"(__val0), "1"(__val1), "2"(__val2), 426 + "3"(__val3), "4"(__val4) 427 + : PARAVIRT_CLOBBERS5); 428 + } 429 + 430 + /* unsigned long paravirt_getreg(int reg) */ 431 + #define __paravirt_getreg(reg) \ 432 + ({ \ 433 + register unsigned long ia64_intri_res asm ("r8"); \ 434 + register unsigned long __reg asm ("r8") = (reg); \ 435 + \ 436 + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ 437 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 438 + PARAVIRT_TYPE(GETREG) \ 439 + + (reg)) \ 440 + : "=r"(ia64_intri_res) \ 441 + : PARAVIRT_OP(getreg), "0"(__reg) \ 442 + : PARAVIRT_CLOBBERS1); \ 443 + \ 444 + ia64_intri_res; \ 445 + }) 446 + 447 + /* void paravirt_setreg(int reg, unsigned long val) */ 448 + #define paravirt_setreg(reg, val) \ 449 + do { \ 450 + register unsigned long __val asm ("r8") = val; \ 451 + register unsigned long __reg asm ("r9") = reg; \ 452 + register unsigned long ia64_clobber1 asm ("r8"); \ 453 + register unsigned long ia64_clobber2 asm ("r9"); \ 454 + \ 455 + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ 456 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 457 + PARAVIRT_TYPE(SETREG) \ 458 + + (reg)) \ 459 + : "=r"(ia64_clobber1), \ 460 + "=r"(ia64_clobber2) \ 461 + : PARAVIRT_OP(setreg), \ 462 + "1"(__reg), "0"(__val) \ 463 + : PARAVIRT_CLOBBERS2); \ 464 + } while (0) 465 + 466 + #endif /* ASM_SUPPORTED */ 467 + #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ 135 468 136 469 #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
+2 -1
arch/ia64/include/asm/smp.h
··· 126 126 extern int is_multithreading_enabled(void); 127 127 128 128 extern void arch_send_call_function_single_ipi(int cpu); 129 - extern void arch_send_call_function_ipi(cpumask_t mask); 129 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 130 + #define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask 130 131 131 132 #else /* CONFIG_SMP */ 132 133
+1
arch/ia64/include/asm/timex.h
··· 40 40 } 41 41 42 42 extern void ia64_cpu_local_tick (void); 43 + extern unsigned long long ia64_native_sched_clock (void); 43 44 44 45 #endif /* _ASM_IA64_TIMEX_H */
-5
arch/ia64/include/asm/topology.h
··· 112 112 113 113 extern void arch_fix_phys_package_id(int num, u32 slot); 114 114 115 - #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \ 116 - CPU_MASK_ALL : \ 117 - node_to_cpumask(pcibus_to_node(bus)) \ 118 - ) 119 - 120 115 #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 121 116 cpu_all_mask : \ 122 117 cpumask_of_node(pcibus_to_node(bus)))
+18 -21
arch/ia64/include/asm/xen/hypervisor.h
··· 33 33 #ifndef _ASM_IA64_XEN_HYPERVISOR_H 34 34 #define _ASM_IA64_XEN_HYPERVISOR_H 35 35 36 - #ifdef CONFIG_XEN 37 - 38 - #include <linux/init.h> 39 36 #include <xen/interface/xen.h> 40 37 #include <xen/interface/version.h> /* to compile feature.c */ 41 38 #include <xen/features.h> /* to comiple xen-netfront.c */ ··· 40 43 41 44 /* xen_domain_type is set before executing any C code by early_xen_setup */ 42 45 enum xen_domain_type { 43 - XEN_NATIVE, 44 - XEN_PV_DOMAIN, 45 - XEN_HVM_DOMAIN, 46 + XEN_NATIVE, /* running on bare hardware */ 47 + XEN_PV_DOMAIN, /* running in a PV domain */ 48 + XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ 46 49 }; 47 50 51 + #ifdef CONFIG_XEN 48 52 extern enum xen_domain_type xen_domain_type; 53 + #else 54 + #define xen_domain_type XEN_NATIVE 55 + #endif 49 56 50 57 #define xen_domain() (xen_domain_type != XEN_NATIVE) 51 - #define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) 52 - #define xen_initial_domain() (xen_pv_domain() && \ 58 + #define xen_pv_domain() (xen_domain() && \ 59 + xen_domain_type == XEN_PV_DOMAIN) 60 + #define xen_hvm_domain() (xen_domain() && \ 61 + xen_domain_type == XEN_HVM_DOMAIN) 62 + 63 + #ifdef CONFIG_XEN_DOM0 64 + #define xen_initial_domain() (xen_pv_domain() && \ 53 65 (xen_start_info->flags & SIF_INITDOMAIN)) 54 - #define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) 66 + #else 67 + #define xen_initial_domain() (0) 68 + #endif 55 69 56 - /* deprecated. remove this */ 57 - #define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN) 58 70 71 + #ifdef CONFIG_XEN 59 72 extern struct shared_info *HYPERVISOR_shared_info; 60 73 extern struct start_info *xen_start_info; 61 74 ··· 81 74 82 75 /* For setup_arch() in arch/ia64/kernel/setup.c */ 83 76 void xen_ia64_enable_opt_feature(void); 84 - 85 - #else /* CONFIG_XEN */ 86 - 87 - #define xen_domain() (0) 88 - #define xen_pv_domain() (0) 89 - #define xen_initial_domain() (0) 90 - #define xen_hvm_domain() (0) 91 - #define is_running_on_xen() (0) /* deprecated. remove this */ 92 77 #endif 93 - 94 - #define is_initial_xendomain() (0) /* deprecated. remove this */ 95 78 96 79 #endif /* _ASM_IA64_XEN_HYPERVISOR_H */
+28
arch/ia64/include/asm/xen/inst.h
··· 33 33 #define __paravirt_work_processed_syscall_target \ 34 34 xen_work_processed_syscall 35 35 36 + #define paravirt_fsyscall_table xen_fsyscall_table 37 + #define paravirt_fsys_bubble_down xen_fsys_bubble_down 38 + 36 39 #define MOV_FROM_IFA(reg) \ 37 40 movl reg = XSI_IFA; \ 38 41 ;; \ ··· 112 109 (\pred) mov r8 = \clob 113 110 .endm 114 111 #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob 112 + 113 + /* assuming ar.itc is read with interrupt disabled. */ 114 + #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ 115 + (pred) movl clob = XSI_ITC_OFFSET; \ 116 + ;; \ 117 + (pred) ld8 clob = [clob]; \ 118 + (pred) mov reg = ar.itc; \ 119 + ;; \ 120 + (pred) add reg = reg, clob; \ 121 + ;; \ 122 + (pred) movl clob = XSI_ITC_LAST; \ 123 + ;; \ 124 + (pred) ld8 clob = [clob]; \ 125 + ;; \ 126 + (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \ 127 + ;; \ 128 + (pred_clob) add reg = 1, clob; \ 129 + ;; \ 130 + (pred) movl clob = XSI_ITC_LAST; \ 131 + ;; \ 132 + (pred) st8 [clob] = reg 115 133 116 134 117 135 #define MOV_TO_IFA(reg, clob) \ ··· 385 361 386 362 #define RSM_PSR_DT \ 387 363 XEN_HYPER_RSM_PSR_DT 364 + 365 + #define RSM_PSR_BE_I(clob0, clob1) \ 366 + RSM_PSR_I(p0, clob0, clob1); \ 367 + rum psr.be 388 368 389 369 #define SSM_PSR_DT_AND_SRLZ_I \ 390 370 XEN_HYPER_SSM_PSR_DT
+9
arch/ia64/include/asm/xen/interface.h
··· 209 209 unsigned long krs[8]; /* kernel registers */ 210 210 unsigned long tmp[16]; /* temp registers 211 211 (e.g. for hyperprivops) */ 212 + 213 + /* itc paravirtualization 214 + * vAR.ITC = mAR.ITC + itc_offset 215 + * itc_last is one which was lastly passed to 216 + * the guest OS in order to prevent it from 217 + * going backwords. 218 + */ 219 + unsigned long itc_offset; 220 + unsigned long itc_last; 212 221 }; 213 222 }; 214 223 };
+10 -1
arch/ia64/include/asm/xen/minstate.h
··· 1 + 2 + #ifdef CONFIG_VIRT_CPU_ACCOUNTING 3 + /* read ar.itc in advance, and use it before leaving bank 0 */ 4 + #define XEN_ACCOUNT_GET_STAMP \ 5 + MOV_FROM_ITC(pUStk, p6, r20, r2); 6 + #else 7 + #define XEN_ACCOUNT_GET_STAMP 8 + #endif 9 + 1 10 /* 2 11 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 3 12 * the minimum state necessary that allows us to turn psr.ic back ··· 132 123 ;; \ 133 124 .mem.offset 0,0; st8.spill [r16]=r2,16; \ 134 125 .mem.offset 8,0; st8.spill [r17]=r3,16; \ 135 - ACCOUNT_GET_STAMP \ 126 + XEN_ACCOUNT_GET_STAMP \ 136 127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 137 128 ;; \ 138 129 EXTRA; \
+38
arch/ia64/include/asm/xen/patchlist.h
··· 1 + /****************************************************************************** 2 + * arch/ia64/include/asm/xen/patchlist.h 3 + * 4 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 + * VA Linux Systems Japan K.K. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + * 21 + */ 22 + 23 + #define __paravirt_start_gate_fsyscall_patchlist \ 24 + __xen_start_gate_fsyscall_patchlist 25 + #define __paravirt_end_gate_fsyscall_patchlist \ 26 + __xen_end_gate_fsyscall_patchlist 27 + #define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \ 28 + __xen_start_gate_brl_fsys_bubble_down_patchlist 29 + #define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \ 30 + __xen_end_gate_brl_fsys_bubble_down_patchlist 31 + #define __paravirt_start_gate_vtop_patchlist \ 32 + __xen_start_gate_vtop_patchlist 33 + #define __paravirt_end_gate_vtop_patchlist \ 34 + __xen_end_gate_vtop_patchlist 35 + #define __paravirt_start_gate_mckinley_e9_patchlist \ 36 + __xen_start_gate_mckinley_e9_patchlist 37 + #define __paravirt_end_gate_mckinley_e9_patchlist \ 38 + __xen_end_gate_mckinley_e9_patchlist
+7 -1
arch/ia64/include/asm/xen/privop.h
··· 55 55 #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) 56 56 #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) 57 57 #define XSI_IHA (XSI_BASE + XSI_IHA_OFS) 58 + #define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS) 59 + #define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS) 58 60 #endif 59 61 60 62 #ifndef __ASSEMBLY__ ··· 69 67 * may have different semantics depending on whether they are executed 70 68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't 71 69 * be allowed to execute directly, lest incorrect semantics result. */ 72 - extern void xen_fc(unsigned long addr); 70 + extern void xen_fc(void *addr); 73 71 extern unsigned long xen_thash(unsigned long addr); 74 72 75 73 /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" ··· 82 80 extern unsigned long xen_get_cpuid(int index); 83 81 extern unsigned long xen_get_pmd(int index); 84 82 83 + #ifndef ASM_SUPPORTED 85 84 extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ 86 85 extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ 86 + #endif 87 87 88 88 /************************************************/ 89 89 /* Instructions paravirtualized for performance */ ··· 110 106 #define xen_get_virtual_pend() \ 111 107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) 112 108 109 + #ifndef ASM_SUPPORTED 113 110 /* Although all privileged operations can be left to trap and will 114 111 * be properly handled by Xen, some are frequent enough that we use 115 112 * hyperprivops for performance. */ ··· 128 123 unsigned long val4); 129 124 extern void xen_set_kr(unsigned long index, unsigned long val); 130 125 extern void xen_ptcga(unsigned long addr, unsigned long size); 126 + #endif /* !ASM_SUPPORTED */ 131 127 132 128 #endif /* !__ASSEMBLY__ */ 133 129
+9 -30
arch/ia64/kernel/Makefile
··· 5 5 extra-y := head.o init_task.o vmlinux.lds 6 6 7 7 obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 8 - irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 + irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ 9 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 10 10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o 11 11 ··· 36 36 mca_recovery-y += mca_drv.o mca_drv_asm.o 37 37 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 38 38 39 - obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o 39 + obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 40 + paravirt_patch.o 40 41 41 42 obj-$(CONFIG_IA64_ESI) += esi.o 42 43 ifneq ($(CONFIG_IA64_ESI),) ··· 46 45 obj-$(CONFIG_DMAR) += pci-dma.o 47 46 obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 48 47 49 - # The gate DSO image is built using a special linker script. 50 - targets += gate.so gate-syms.o 51 - 52 - extra-y += gate.so gate-syms.o gate.lds gate.o 53 - 54 48 # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. 55 49 CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 56 50 57 - CPPFLAGS_gate.lds := -P -C -U$(ARCH) 58 - 59 - quiet_cmd_gate = GATE $@ 60 - cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 61 - 62 - GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ 63 - $(call ld-option, -Wl$(comma)--hash-style=sysv) 64 - $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE 65 - $(call if_changed,gate) 66 - 67 - $(obj)/built-in.o: $(obj)/gate-syms.o 68 - $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o 69 - 70 - GATECFLAGS_gate-syms.o = -r 71 - $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE 72 - $(call if_changed,gate) 73 - 74 - # gate-data.o contains the gate DSO image as data in section .data.gate. 75 - # We must build gate.so before we can assemble it. 76 - # Note: kbuild does not track this dependency due to usage of .incbin 77 - $(obj)/gate-data.o: $(obj)/gate.so 51 + # The gate DSO image is built using a special linker script. 52 + include $(srctree)/arch/ia64/kernel/Makefile.gate 53 + # tell compiled for native 54 + CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE 78 55 79 56 # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config 80 57 define sed-y ··· 88 109 clean-files += $(objtree)/include/asm-ia64/nr-irqs.h 89 110 90 111 # 91 - # native ivt.S and entry.S 112 + # native ivt.S, entry.S and fsys.S 92 113 # 93 - ASM_PARAVIRT_OBJS = ivt.o entry.o 114 + ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o 94 115 define paravirtualized_native 95 116 AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE 96 117 AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
+27
arch/ia64/kernel/Makefile.gate
··· 1 + # The gate DSO image is built using a special linker script. 2 + 3 + targets += gate.so gate-syms.o 4 + 5 + extra-y += gate.so gate-syms.o gate.lds gate.o 6 + 7 + CPPFLAGS_gate.lds := -P -C -U$(ARCH) 8 + 9 + quiet_cmd_gate = GATE $@ 10 + cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 11 + 12 + GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ 13 + $(call ld-option, -Wl$(comma)--hash-style=sysv) 14 + $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE 15 + $(call if_changed,gate) 16 + 17 + $(obj)/built-in.o: $(obj)/gate-syms.o 18 + $(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o 19 + 20 + GATECFLAGS_gate-syms.o = -r 21 + $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE 22 + $(call if_changed,gate) 23 + 24 + # gate-data.o contains the gate DSO image as data in section .data.gate. 25 + # We must build gate.so before we can assemble it. 26 + # Note: kbuild does not track this dependency due to usage of .incbin 27 + $(obj)/gate-data.o: $(obj)/gate.so
+4 -4
arch/ia64/kernel/acpi.c
··· 890 890 possible, max((possible - available_cpus), 0)); 891 891 892 892 for (i = 0; i < possible; i++) 893 - cpu_set(i, cpu_possible_map); 893 + set_cpu_possible(i, true); 894 894 } 895 895 896 896 int acpi_map_lsapic(acpi_handle handle, int *pcpu) ··· 928 928 buffer.length = ACPI_ALLOCATE_BUFFER; 929 929 buffer.pointer = NULL; 930 930 931 - cpus_complement(tmp_map, cpu_present_map); 932 - cpu = first_cpu(tmp_map); 933 - if (cpu >= NR_CPUS) 931 + cpumask_complement(&tmp_map, cpu_present_mask); 932 + cpu = cpumask_first(&tmp_map); 933 + if (cpu >= nr_cpu_ids) 934 934 return -EINVAL; 935 935 936 936 acpi_map_cpu2node(handle, cpu, physid);
+2
arch/ia64/kernel/asm-offsets.c
··· 316 316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); 317 317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); 318 318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); 319 + DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset); 320 + DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last); 319 321 #endif /* CONFIG_XEN */ 320 322 }
+1
arch/ia64/kernel/efi.c
··· 456 456 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 457 457 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 458 458 IA64_GRANULE_SHIFT); 459 + paravirt_dv_serialize_data(); 459 460 ia64_set_psr(psr); /* restore psr */ 460 461 } 461 462
+2 -2
arch/ia64/kernel/entry.S
··· 735 735 __paravirt_work_processed_syscall: 736 736 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 737 737 adds r2=PT(LOADRS)+16,r12 738 - (pUStk) mov.m r22=ar.itc // fetch time at leave 738 + MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave 739 739 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 740 740 ;; 741 741 (p6) ld4 r31=[r18] // load current_thread_info()->flags ··· 984 984 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 985 985 .pred.rel.mutex pUStk,pKStk 986 986 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled 987 - (pUStk) mov.m r22=ar.itc // M fetch time at leave 987 + MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave 988 988 nop.i 0 989 989 ;; 990 990 #else
+18 -17
arch/ia64/kernel/fsys.S
··· 25 25 #include <asm/unistd.h> 26 26 27 27 #include "entry.h" 28 + #include "paravirt_inst.h" 28 29 29 30 /* 30 31 * See Documentation/ia64/fsys.txt for details on fsyscalls. ··· 280 279 (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control 281 280 ;; 282 281 .pred.rel.mutex p8,p9 283 - (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! 282 + MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! 284 283 (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. 285 284 (p13) ld8 r25 = [r19] // get itc_lastcycle value 286 285 ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec ··· 419 418 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) 420 419 ;; 421 420 422 - rsm psr.i // mask interrupt delivery 421 + RSM_PSR_I(p0, r18, r19) // mask interrupt delivery 423 422 mov ar.ccv=0 424 423 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP 425 424 ··· 492 491 #ifdef CONFIG_SMP 493 492 st4.rel [r31]=r0 // release the lock 494 493 #endif 495 - ssm psr.i 494 + SSM_PSR_I(p0, p9, r31) 496 495 ;; 497 496 498 497 srlz.d // ensure psr.i is set again ··· 514 513 #ifdef CONFIG_SMP 515 514 st4.rel [r31]=r0 // release the lock 516 515 #endif 517 - ssm psr.i 516 + SSM_PSR_I(p0, p9, r17) 518 517 ;; 519 518 srlz.d 520 519 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall ··· 522 521 #ifdef CONFIG_SMP 523 522 .lock_contention: 524 523 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ 525 - ssm psr.i 524 + SSM_PSR_I(p0, p9, r17) 526 525 ;; 527 526 srlz.d 528 527 br.sptk.many fsys_fallback_syscall ··· 593 592 adds r17=-1024,r15 594 593 movl r14=sys_call_table 595 594 ;; 596 - rsm psr.i 595 + RSM_PSR_I(p0, r26, r27) 597 596 shladd r18=r17,3,r14 598 597 ;; 599 598 ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point 600 - mov r29=psr // read psr (12 cyc load latency) 599 + MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency) 601 600 mov r27=ar.rsc 602 601 mov r21=ar.fpsr 603 602 mov r26=ar.pfs 604 603 END(fsys_fallback_syscall) 605 604 /* FALL THROUGH */ 606 - GLOBAL_ENTRY(fsys_bubble_down) 605 + GLOBAL_ENTRY(paravirt_fsys_bubble_down) 607 606 .prologue 608 607 .altrp b6 609 608 .body ··· 641 640 * 642 641 * PSR.BE : already is turned off in __kernel_syscall_via_epc() 643 642 * PSR.AC : don't care (kernel normally turns PSR.AC on) 644 - * PSR.I : already turned off by the time fsys_bubble_down gets 643 + * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets 645 644 * invoked 646 645 * PSR.DFL: always 0 (kernel never turns it on) 647 646 * PSR.DFH: don't care --- kernel never touches f32-f127 on its own ··· 651 650 * PSR.DB : don't care --- kernel never enables kernel-level 652 651 * breakpoints 653 652 * PSR.TB : must be 0 already; if it wasn't zero on entry to 654 - * __kernel_syscall_via_epc, the branch to fsys_bubble_down 653 + * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down 655 654 * will trigger a taken branch; the taken-trap-handler then 656 655 * converts the syscall into a break-based system-call. 657 656 */ ··· 684 683 ;; 685 684 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 686 685 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 687 - mov.m r30=ar.itc // M get cycle for accounting 686 + MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting 688 687 #else 689 688 nop.m 0 690 689 #endif ··· 735 734 mov rp=r14 // I0 set the real return addr 736 735 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A 737 736 ;; 738 - ssm psr.i // M2 we're on kernel stacks now, reenable irqs 737 + SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs 739 738 cmp.eq p8,p0=r3,r0 // A 740 739 (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 741 740 742 741 nop.m 0 743 742 (p8) br.call.sptk.many b6=b6 // B (ignore return address) 744 743 br.cond.spnt ia64_trace_syscall // B 745 - END(fsys_bubble_down) 744 + END(paravirt_fsys_bubble_down) 746 745 747 746 .rodata 748 747 .align 8 749 - .globl fsyscall_table 748 + .globl paravirt_fsyscall_table 750 749 751 - data8 fsys_bubble_down 752 - fsyscall_table: 750 + data8 paravirt_fsys_bubble_down 751 + paravirt_fsyscall_table: 753 752 data8 fsys_ni_syscall 754 753 data8 0 // exit // 1025 755 754 data8 0 // read ··· 1034 1033 1035 1034 // fill in zeros for the remaining entries 1036 1035 .zero: 1037 - .space fsyscall_table + 8*NR_syscalls - .zero, 0 1036 + .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0
+90 -81
arch/ia64/kernel/gate.S
··· 13 13 #include <asm/sigcontext.h> 14 14 #include <asm/system.h> 15 15 #include <asm/unistd.h> 16 + #include "paravirt_inst.h" 16 17 17 18 /* 18 19 * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, ··· 48 47 br.ret.sptk.many b6 49 48 } 50 49 END(__kernel_syscall_via_break) 51 - 52 - /* 53 - * On entry: 54 - * r11 = saved ar.pfs 55 - * r15 = system call # 56 - * b0 = saved return address 57 - * b6 = return address 58 - * On exit: 59 - * r11 = saved ar.pfs 60 - * r15 = system call # 61 - * b0 = saved return address 62 - * all other "scratch" registers: undefined 63 - * all "preserved" registers: same as on entry 64 - */ 65 - 66 - GLOBAL_ENTRY(__kernel_syscall_via_epc) 67 - .prologue 68 - .altrp b6 69 - .body 70 - { 71 - /* 72 - * Note: the kernel cannot assume that the first two instructions in this 73 - * bundle get executed. The remaining code must be safe even if 74 - * they do not get executed. 75 - */ 76 - adds r17=-1024,r15 // A 77 - mov r10=0 // A default to successful syscall execution 78 - epc // B causes split-issue 79 - } 80 - ;; 81 - rsm psr.be | psr.i // M2 (5 cyc to srlz.d) 82 - LOAD_FSYSCALL_TABLE(r14) // X 83 - ;; 84 - mov r16=IA64_KR(CURRENT) // M2 (12 cyc) 85 - shladd r18=r17,3,r14 // A 86 - mov r19=NR_syscalls-1 // A 87 - ;; 88 - lfetch [r18] // M0|1 89 - mov r29=psr // M2 (12 cyc) 90 - // If r17 is a NaT, p6 will be zero 91 - cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? 92 - ;; 93 - mov r21=ar.fpsr // M2 (12 cyc) 94 - tnat.nz p10,p9=r15 // I0 95 - mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) 96 - ;; 97 - srlz.d // M0 (forces split-issue) ensure PSR.BE==0 98 - (p6) ld8 r18=[r18] // M0|1 99 - nop.i 0 100 - ;; 101 - nop.m 0 102 - (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) 103 - nop.i 0 104 - ;; 105 - (p8) ssm psr.i 106 - (p6) mov b7=r18 // I0 107 - (p8) br.dptk.many b7 // B 108 - 109 - mov r27=ar.rsc // M2 (12 cyc) 110 - /* 111 - * brl.cond doesn't work as intended because the linker would convert this branch 112 - * into a branch to a PLT. Perhaps there will be a way to avoid this with some 113 - * future version of the linker. In the meantime, we just use an indirect branch 114 - * instead. 115 - */ 116 - #ifdef CONFIG_ITANIUM 117 - (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry 118 - ;; 119 - (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down 120 - ;; 121 - (p6) mov b7=r14 122 - (p6) br.sptk.many b7 123 - #else 124 - BRL_COND_FSYS_BUBBLE_DOWN(p6) 125 - #endif 126 - ssm psr.i 127 - mov r10=-1 128 - (p10) mov r8=EINVAL 129 - (p9) mov r8=ENOSYS 130 - FSYS_RETURN 131 - END(__kernel_syscall_via_epc) 132 50 133 51 # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) 134 52 # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) ··· 294 374 // invala not necessary as that will happen when returning to user-mode 295 375 br.cond.sptk back_from_restore_rbs 296 376 END(__kernel_sigtramp) 377 + 378 + /* 379 + * On entry: 380 + * r11 = saved ar.pfs 381 + * r15 = system call # 382 + * b0 = saved return address 383 + * b6 = return address 384 + * On exit: 385 + * r11 = saved ar.pfs 386 + * r15 = system call # 387 + * b0 = saved return address 388 + * all other "scratch" registers: undefined 389 + * all "preserved" registers: same as on entry 390 + */ 391 + 392 + GLOBAL_ENTRY(__kernel_syscall_via_epc) 393 + .prologue 394 + .altrp b6 395 + .body 396 + { 397 + /* 398 + * Note: the kernel cannot assume that the first two instructions in this 399 + * bundle get executed. The remaining code must be safe even if 400 + * they do not get executed. 401 + */ 402 + adds r17=-1024,r15 // A 403 + mov r10=0 // A default to successful syscall execution 404 + epc // B causes split-issue 405 + } 406 + ;; 407 + RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d) 408 + LOAD_FSYSCALL_TABLE(r14) // X 409 + ;; 410 + mov r16=IA64_KR(CURRENT) // M2 (12 cyc) 411 + shladd r18=r17,3,r14 // A 412 + mov r19=NR_syscalls-1 // A 413 + ;; 414 + lfetch [r18] // M0|1 415 + MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc) 416 + // If r17 is a NaT, p6 will be zero 417 + cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)? 418 + ;; 419 + mov r21=ar.fpsr // M2 (12 cyc) 420 + tnat.nz p10,p9=r15 // I0 421 + mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...) 422 + ;; 423 + srlz.d // M0 (forces split-issue) ensure PSR.BE==0 424 + (p6) ld8 r18=[r18] // M0|1 425 + nop.i 0 426 + ;; 427 + nop.m 0 428 + (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!) 429 + nop.i 0 430 + ;; 431 + SSM_PSR_I(p8, p14, r25) 432 + (p6) mov b7=r18 // I0 433 + (p8) br.dptk.many b7 // B 434 + 435 + mov r27=ar.rsc // M2 (12 cyc) 436 + /* 437 + * brl.cond doesn't work as intended because the linker would convert this branch 438 + * into a branch to a PLT. Perhaps there will be a way to avoid this with some 439 + * future version of the linker. In the meantime, we just use an indirect branch 440 + * instead. 441 + */ 442 + #ifdef CONFIG_ITANIUM 443 + (p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry 444 + ;; 445 + (p6) ld8 r14=[r14] // r14 <- fsys_bubble_down 446 + ;; 447 + (p6) mov b7=r14 448 + (p6) br.sptk.many b7 449 + #else 450 + BRL_COND_FSYS_BUBBLE_DOWN(p6) 451 + #endif 452 + SSM_PSR_I(p0, p14, r10) 453 + mov r10=-1 454 + (p10) mov r8=EINVAL 455 + (p9) mov r8=ENOSYS 456 + FSYS_RETURN 457 + 458 + #ifdef CONFIG_PARAVIRT 459 + /* 460 + * padd to make the size of this symbol constant 461 + * independent of paravirtualization. 462 + */ 463 + .align PAGE_SIZE / 8 464 + #endif 465 + END(__kernel_syscall_via_epc)
+9 -8
arch/ia64/kernel/gate.lds.S
··· 7 7 8 8 9 9 #include <asm/system.h> 10 + #include "paravirt_patchlist.h" 10 11 11 12 SECTIONS 12 13 { ··· 34 33 . = GATE_ADDR + 0x600; 35 34 36 35 .data.patch : { 37 - __start_gate_mckinley_e9_patchlist = .; 36 + __paravirt_start_gate_mckinley_e9_patchlist = .; 38 37 *(.data.patch.mckinley_e9) 39 - __end_gate_mckinley_e9_patchlist = .; 38 + __paravirt_end_gate_mckinley_e9_patchlist = .; 40 39 41 - __start_gate_vtop_patchlist = .; 40 + __paravirt_start_gate_vtop_patchlist = .; 42 41 *(.data.patch.vtop) 43 - __end_gate_vtop_patchlist = .; 42 + __paravirt_end_gate_vtop_patchlist = .; 44 43 45 - __start_gate_fsyscall_patchlist = .; 44 + __paravirt_start_gate_fsyscall_patchlist = .; 46 45 *(.data.patch.fsyscall_table) 47 - __end_gate_fsyscall_patchlist = .; 46 + __paravirt_end_gate_fsyscall_patchlist = .; 48 47 49 - __start_gate_brl_fsys_bubble_down_patchlist = .; 48 + __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; 50 49 *(.data.patch.brl_fsys_bubble_down) 51 - __end_gate_brl_fsys_bubble_down_patchlist = .; 50 + __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; 52 51 } :readable 53 52 54 53 .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
+8 -2
arch/ia64/kernel/head.S
··· 1050 1050 * except that the multiplication and the shift are done with 128-bit 1051 1051 * intermediate precision so that we can produce a full 64-bit result. 1052 1052 */ 1053 - GLOBAL_ENTRY(sched_clock) 1053 + GLOBAL_ENTRY(ia64_native_sched_clock) 1054 1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1055 1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) 1056 1056 ;; ··· 1066 1066 ;; 1067 1067 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT 1068 1068 br.ret.sptk.many rp 1069 - END(sched_clock) 1069 + END(ia64_native_sched_clock) 1070 + #ifndef CONFIG_PARAVIRT 1071 + //unsigned long long 1072 + //sched_clock(void) __attribute__((alias("ia64_native_sched_clock"))); 1073 + .global sched_clock 1074 + sched_clock = ia64_native_sched_clock 1075 + #endif 1070 1076 1071 1077 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 1072 1078 GLOBAL_ENTRY(cycle_to_cputime)
+1 -1
arch/ia64/kernel/ivt.S
··· 804 804 /////////////////////////////////////////////////////////////////////// 805 805 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 806 806 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 807 - mov.m r30=ar.itc // M get cycle for accounting 807 + MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting 808 808 #else 809 809 mov b6=r30 // I0 setup syscall handler branch reg early 810 810 #endif
+3 -3
arch/ia64/kernel/mca.c
··· 1456 1456 1457 1457 ia64_mca_cmc_int_handler(cmc_irq, arg); 1458 1458 1459 - for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1459 + cpuid = cpumask_next(cpuid+1, cpu_online_mask); 1460 1460 1461 - if (cpuid < NR_CPUS) { 1461 + if (cpuid < nr_cpu_ids) { 1462 1462 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1463 1463 } else { 1464 1464 /* If no log record, switch out of polling mode */ ··· 1525 1525 1526 1526 ia64_mca_cpe_int_handler(cpe_irq, arg); 1527 1527 1528 - for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1528 + cpuid = cpumask_next(cpuid+1, cpu_online_mask); 1529 1529 1530 1530 if (cpuid < NR_CPUS) { 1531 1531 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+33 -2
arch/ia64/kernel/module.c
··· 446 446 mod->arch.opd = s; 447 447 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) 448 448 mod->arch.unwind = s; 449 + #ifdef CONFIG_PARAVIRT 450 + else if (strcmp(".paravirt_bundles", 451 + secstrings + s->sh_name) == 0) 452 + mod->arch.paravirt_bundles = s; 453 + else if (strcmp(".paravirt_insts", 454 + secstrings + s->sh_name) == 0) 455 + mod->arch.paravirt_insts = s; 456 + #endif 449 457 450 458 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { 451 459 printk(KERN_ERR "%s: sections missing\n", mod->name); ··· 533 525 goto found; 534 526 535 527 /* Not enough GOT entries? */ 536 - if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) 537 - BUG(); 528 + BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); 538 529 539 530 e->val = value; 540 531 ++mod->arch.next_got_entry; ··· 928 921 DEBUGP("%s: init: entry=%p\n", __func__, mod->init); 929 922 if (mod->arch.unwind) 930 923 register_unwind_table(mod); 924 + #ifdef CONFIG_PARAVIRT 925 + if (mod->arch.paravirt_bundles) { 926 + struct paravirt_patch_site_bundle *start = 927 + (struct paravirt_patch_site_bundle *) 928 + mod->arch.paravirt_bundles->sh_addr; 929 + struct paravirt_patch_site_bundle *end = 930 + (struct paravirt_patch_site_bundle *) 931 + (mod->arch.paravirt_bundles->sh_addr + 932 + mod->arch.paravirt_bundles->sh_size); 933 + 934 + paravirt_patch_apply_bundle(start, end); 935 + } 936 + if (mod->arch.paravirt_insts) { 937 + struct paravirt_patch_site_inst *start = 938 + (struct paravirt_patch_site_inst *) 939 + mod->arch.paravirt_insts->sh_addr; 940 + struct paravirt_patch_site_inst *end = 941 + (struct paravirt_patch_site_inst *) 942 + (mod->arch.paravirt_insts->sh_addr + 943 + mod->arch.paravirt_insts->sh_size); 944 + 945 + paravirt_patch_apply_inst(start, end); 946 + } 947 + #endif 931 948 return 0; 932 949 } 933 950
+535 -4
arch/ia64/kernel/paravirt.c
··· 46 46 * initialization hooks. 47 47 */ 48 48 49 - struct pv_init_ops pv_init_ops; 49 + static void __init 50 + ia64_native_patch_branch(unsigned long tag, unsigned long type); 51 + 52 + struct pv_init_ops pv_init_ops = 53 + { 54 + #ifdef ASM_SUPPORTED 55 + .patch_bundle = ia64_native_patch_bundle, 56 + #endif 57 + .patch_branch = ia64_native_patch_branch, 58 + }; 50 59 51 60 /*************************************************************************** 52 61 * pv_cpu_ops 53 62 * intrinsics hooks. 54 63 */ 55 64 65 + #ifndef ASM_SUPPORTED 56 66 /* ia64_native_xxx are macros so that we have to make them real functions */ 57 67 58 68 #define DEFINE_VOID_FUNC1(name) \ ··· 70 60 ia64_native_ ## name ## _func(unsigned long arg) \ 71 61 { \ 72 62 ia64_native_ ## name(arg); \ 73 - } \ 63 + } 64 + 65 + #define DEFINE_VOID_FUNC1_VOID(name) \ 66 + static void \ 67 + ia64_native_ ## name ## _func(void *arg) \ 68 + { \ 69 + ia64_native_ ## name(arg); \ 70 + } 74 71 75 72 #define DEFINE_VOID_FUNC2(name) \ 76 73 static void \ ··· 85 68 unsigned long arg1) \ 86 69 { \ 87 70 ia64_native_ ## name(arg0, arg1); \ 88 - } \ 71 + } 89 72 90 73 #define DEFINE_FUNC0(name) \ 91 74 static unsigned long \ ··· 101 84 return ia64_native_ ## name(arg); \ 102 85 } \ 103 86 104 - DEFINE_VOID_FUNC1(fc); 87 + DEFINE_VOID_FUNC1_VOID(fc); 105 88 DEFINE_VOID_FUNC1(intrin_local_irq_restore); 106 89 107 90 DEFINE_VOID_FUNC2(ptcga); ··· 291 274 break; 292 275 } 293 276 } 277 + #else 278 + 279 + #define __DEFINE_FUNC(name, code) \ 280 + extern const char ia64_native_ ## name ## _direct_start[]; \ 281 + extern const char ia64_native_ ## name ## _direct_end[]; \ 282 + asm (".align 32\n" \ 283 + ".proc ia64_native_" #name "_func\n" \ 284 + "ia64_native_" #name "_func:\n" \ 285 + "ia64_native_" #name "_direct_start:\n" \ 286 + code \ 287 + "ia64_native_" #name "_direct_end:\n" \ 288 + "br.cond.sptk.many b6\n" \ 289 + ".endp ia64_native_" #name "_func\n") 290 + 291 + #define DEFINE_VOID_FUNC0(name, code) \ 292 + extern void \ 293 + ia64_native_ ## name ## _func(void); \ 294 + __DEFINE_FUNC(name, code) 295 + 296 + #define DEFINE_VOID_FUNC1(name, code) \ 297 + extern void \ 298 + ia64_native_ ## name ## _func(unsigned long arg); \ 299 + __DEFINE_FUNC(name, code) 300 + 301 + #define DEFINE_VOID_FUNC1_VOID(name, code) \ 302 + extern void \ 303 + ia64_native_ ## name ## _func(void *arg); \ 304 + __DEFINE_FUNC(name, code) 305 + 306 + #define DEFINE_VOID_FUNC2(name, code) \ 307 + extern void \ 308 + ia64_native_ ## name ## _func(unsigned long arg0, \ 309 + unsigned long arg1); \ 310 + __DEFINE_FUNC(name, code) 311 + 312 + #define DEFINE_FUNC0(name, code) \ 313 + extern unsigned long \ 314 + ia64_native_ ## name ## _func(void); \ 315 + __DEFINE_FUNC(name, code) 316 + 317 + #define DEFINE_FUNC1(name, type, code) \ 318 + extern unsigned long \ 319 + ia64_native_ ## name ## _func(type arg); \ 320 + __DEFINE_FUNC(name, code) 321 + 322 + DEFINE_VOID_FUNC1_VOID(fc, 323 + "fc r8\n"); 324 + DEFINE_VOID_FUNC1(intrin_local_irq_restore, 325 + ";;\n" 326 + " cmp.ne p6, p7 = r8, r0\n" 327 + ";;\n" 328 + "(p6) ssm psr.i\n" 329 + "(p7) rsm psr.i\n" 330 + ";;\n" 331 + "(p6) srlz.d\n"); 332 + 333 + DEFINE_VOID_FUNC2(ptcga, 334 + "ptc.ga r8, r9\n"); 335 + DEFINE_VOID_FUNC2(set_rr, 336 + "mov rr[r8] = r9\n"); 337 + 338 + /* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ 339 + DEFINE_FUNC0(get_psr_i, 340 + "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" 341 + "mov r8 = psr\n" 342 + ";;\n" 343 + "and r8 = r2, r8\n"); 344 + 345 + DEFINE_FUNC1(thash, unsigned long, 346 + "thash r8 = r8\n"); 347 + DEFINE_FUNC1(get_cpuid, int, 348 + "mov r8 = cpuid[r8]\n"); 349 + DEFINE_FUNC1(get_pmd, int, 350 + "mov r8 = pmd[r8]\n"); 351 + DEFINE_FUNC1(get_rr, unsigned long, 352 + "mov r8 = rr[r8]\n"); 353 + 354 + DEFINE_VOID_FUNC0(ssm_i, 355 + "ssm psr.i\n"); 356 + DEFINE_VOID_FUNC0(rsm_i, 357 + "rsm psr.i\n"); 358 + 359 + extern void 360 + ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, 361 + unsigned long val2, unsigned long val3, 362 + unsigned long val4); 363 + __DEFINE_FUNC(set_rr0_to_rr4, 364 + "mov rr[r0] = r8\n" 365 + "movl r2 = 0x2000000000000000\n" 366 + ";;\n" 367 + "mov rr[r2] = r9\n" 368 + "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ 369 + ";;\n" 370 + "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ 371 + "mov rr[r3] = r10\n" 372 + ";;\n" 373 + "mov rr[r2] = r11\n" 374 + "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ 375 + ";;\n" 376 + "mov rr[r3] = r14\n"); 377 + 378 + extern unsigned long ia64_native_getreg_func(int regnum); 379 + asm(".global ia64_native_getreg_func\n"); 380 + #define __DEFINE_GET_REG(id, reg) \ 381 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 382 + ";;\n" \ 383 + "cmp.eq p6, p0 = r2, r8\n" \ 384 + ";;\n" \ 385 + "(p6) mov r8 = " #reg "\n" \ 386 + "(p6) br.cond.sptk.many b6\n" \ 387 + ";;\n" 388 + #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) 389 + #define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) 390 + 391 + __DEFINE_FUNC(getreg, 392 + __DEFINE_GET_REG(GP, gp) 393 + /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ 394 + __DEFINE_GET_REG(PSR, psr) 395 + __DEFINE_GET_REG(TP, tp) 396 + __DEFINE_GET_REG(SP, sp) 397 + 398 + __DEFINE_GET_REG(AR_KR0, ar0) 399 + __DEFINE_GET_REG(AR_KR1, ar1) 400 + __DEFINE_GET_REG(AR_KR2, ar2) 401 + __DEFINE_GET_REG(AR_KR3, ar3) 402 + __DEFINE_GET_REG(AR_KR4, ar4) 403 + __DEFINE_GET_REG(AR_KR5, ar5) 404 + __DEFINE_GET_REG(AR_KR6, ar6) 405 + __DEFINE_GET_REG(AR_KR7, ar7) 406 + __DEFINE_GET_AR(RSC, rsc) 407 + __DEFINE_GET_AR(BSP, bsp) 408 + __DEFINE_GET_AR(BSPSTORE, bspstore) 409 + __DEFINE_GET_AR(RNAT, rnat) 410 + __DEFINE_GET_AR(FCR, fcr) 411 + __DEFINE_GET_AR(EFLAG, eflag) 412 + __DEFINE_GET_AR(CSD, csd) 413 + __DEFINE_GET_AR(SSD, ssd) 414 + __DEFINE_GET_REG(AR_CFLAG, ar27) 415 + __DEFINE_GET_AR(FSR, fsr) 416 + __DEFINE_GET_AR(FIR, fir) 417 + __DEFINE_GET_AR(FDR, fdr) 418 + __DEFINE_GET_AR(CCV, ccv) 419 + __DEFINE_GET_AR(UNAT, unat) 420 + __DEFINE_GET_AR(FPSR, fpsr) 421 + __DEFINE_GET_AR(ITC, itc) 422 + __DEFINE_GET_AR(PFS, pfs) 423 + __DEFINE_GET_AR(LC, lc) 424 + __DEFINE_GET_AR(EC, ec) 425 + 426 + __DEFINE_GET_CR(DCR, dcr) 427 + __DEFINE_GET_CR(ITM, itm) 428 + __DEFINE_GET_CR(IVA, iva) 429 + __DEFINE_GET_CR(PTA, pta) 430 + __DEFINE_GET_CR(IPSR, ipsr) 431 + __DEFINE_GET_CR(ISR, isr) 432 + __DEFINE_GET_CR(IIP, iip) 433 + __DEFINE_GET_CR(IFA, ifa) 434 + __DEFINE_GET_CR(ITIR, itir) 435 + __DEFINE_GET_CR(IIPA, iipa) 436 + __DEFINE_GET_CR(IFS, ifs) 437 + __DEFINE_GET_CR(IIM, iim) 438 + __DEFINE_GET_CR(IHA, iha) 439 + __DEFINE_GET_CR(LID, lid) 440 + __DEFINE_GET_CR(IVR, ivr) 441 + __DEFINE_GET_CR(TPR, tpr) 442 + __DEFINE_GET_CR(EOI, eoi) 443 + __DEFINE_GET_CR(IRR0, irr0) 444 + __DEFINE_GET_CR(IRR1, irr1) 445 + __DEFINE_GET_CR(IRR2, irr2) 446 + __DEFINE_GET_CR(IRR3, irr3) 447 + __DEFINE_GET_CR(ITV, itv) 448 + __DEFINE_GET_CR(PMV, pmv) 449 + __DEFINE_GET_CR(CMCV, cmcv) 450 + __DEFINE_GET_CR(LRR0, lrr0) 451 + __DEFINE_GET_CR(LRR1, lrr1) 452 + 453 + "mov r8 = -1\n" /* unsupported case */ 454 + ); 455 + 456 + extern void ia64_native_setreg_func(int regnum, unsigned long val); 457 + asm(".global ia64_native_setreg_func\n"); 458 + #define __DEFINE_SET_REG(id, reg) \ 459 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 460 + ";;\n" \ 461 + "cmp.eq p6, p0 = r2, r9\n" \ 462 + ";;\n" \ 463 + "(p6) mov " #reg " = r8\n" \ 464 + "(p6) br.cond.sptk.many b6\n" \ 465 + ";;\n" 466 + #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) 467 + #define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) 468 + __DEFINE_FUNC(setreg, 469 + "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" 470 + ";;\n" 471 + "cmp.eq p6, p0 = r2, r9\n" 472 + ";;\n" 473 + "(p6) mov psr.l = r8\n" 474 + #ifdef HAVE_SERIALIZE_DIRECTIVE 475 + ".serialize.data\n" 476 + #endif 477 + "(p6) br.cond.sptk.many b6\n" 478 + __DEFINE_SET_REG(GP, gp) 479 + __DEFINE_SET_REG(SP, sp) 480 + 481 + __DEFINE_SET_REG(AR_KR0, ar0) 482 + __DEFINE_SET_REG(AR_KR1, ar1) 483 + __DEFINE_SET_REG(AR_KR2, ar2) 484 + __DEFINE_SET_REG(AR_KR3, ar3) 485 + __DEFINE_SET_REG(AR_KR4, ar4) 486 + __DEFINE_SET_REG(AR_KR5, ar5) 487 + __DEFINE_SET_REG(AR_KR6, ar6) 488 + __DEFINE_SET_REG(AR_KR7, ar7) 489 + __DEFINE_SET_AR(RSC, rsc) 490 + __DEFINE_SET_AR(BSP, bsp) 491 + __DEFINE_SET_AR(BSPSTORE, bspstore) 492 + __DEFINE_SET_AR(RNAT, rnat) 493 + __DEFINE_SET_AR(FCR, fcr) 494 + __DEFINE_SET_AR(EFLAG, eflag) 495 + __DEFINE_SET_AR(CSD, csd) 496 + __DEFINE_SET_AR(SSD, ssd) 497 + __DEFINE_SET_REG(AR_CFLAG, ar27) 498 + __DEFINE_SET_AR(FSR, fsr) 499 + __DEFINE_SET_AR(FIR, fir) 500 + __DEFINE_SET_AR(FDR, fdr) 501 + __DEFINE_SET_AR(CCV, ccv) 502 + __DEFINE_SET_AR(UNAT, unat) 503 + __DEFINE_SET_AR(FPSR, fpsr) 504 + __DEFINE_SET_AR(ITC, itc) 505 + __DEFINE_SET_AR(PFS, pfs) 506 + __DEFINE_SET_AR(LC, lc) 507 + __DEFINE_SET_AR(EC, ec) 508 + 509 + __DEFINE_SET_CR(DCR, dcr) 510 + __DEFINE_SET_CR(ITM, itm) 511 + __DEFINE_SET_CR(IVA, iva) 512 + __DEFINE_SET_CR(PTA, pta) 513 + __DEFINE_SET_CR(IPSR, ipsr) 514 + __DEFINE_SET_CR(ISR, isr) 515 + __DEFINE_SET_CR(IIP, iip) 516 + __DEFINE_SET_CR(IFA, ifa) 517 + __DEFINE_SET_CR(ITIR, itir) 518 + __DEFINE_SET_CR(IIPA, iipa) 519 + __DEFINE_SET_CR(IFS, ifs) 520 + __DEFINE_SET_CR(IIM, iim) 521 + __DEFINE_SET_CR(IHA, iha) 522 + __DEFINE_SET_CR(LID, lid) 523 + __DEFINE_SET_CR(IVR, ivr) 524 + __DEFINE_SET_CR(TPR, tpr) 525 + __DEFINE_SET_CR(EOI, eoi) 526 + __DEFINE_SET_CR(IRR0, irr0) 527 + __DEFINE_SET_CR(IRR1, irr1) 528 + __DEFINE_SET_CR(IRR2, irr2) 529 + __DEFINE_SET_CR(IRR3, irr3) 530 + __DEFINE_SET_CR(ITV, itv) 531 + __DEFINE_SET_CR(PMV, pmv) 532 + __DEFINE_SET_CR(CMCV, cmcv) 533 + __DEFINE_SET_CR(LRR0, lrr0) 534 + __DEFINE_SET_CR(LRR1, lrr1) 535 + ); 536 + #endif 294 537 295 538 struct pv_cpu_ops pv_cpu_ops = { 296 539 .fc = ia64_native_fc_func, ··· 643 366 644 367 struct pv_time_ops pv_time_ops = { 645 368 .do_steal_accounting = ia64_native_do_steal_accounting, 369 + .sched_clock = ia64_native_sched_clock, 646 370 }; 371 + 372 + /*************************************************************************** 373 + * binary pacthing 374 + * pv_init_ops.patch_bundle 375 + */ 376 + 377 + #ifdef ASM_SUPPORTED 378 + #define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ 379 + __DEFINE_FUNC(get_ ## name, \ 380 + ";;\n" \ 381 + "mov r8 = " #reg "\n" \ 382 + ";;\n") 383 + 384 + #define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ 385 + __DEFINE_FUNC(set_ ## name, \ 386 + ";;\n" \ 387 + "mov " #reg " = r8\n" \ 388 + ";;\n") 389 + 390 + #define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ 391 + IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ 392 + IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ 393 + 394 + #define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ 395 + IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) 396 + 397 + #define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ 398 + IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) 399 + 400 + 401 + IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); 402 + IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); 403 + 404 + /* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ 405 + __DEFINE_FUNC(set_psr_l, 406 + ";;\n" 407 + "mov psr.l = r8\n" 408 + #ifdef HAVE_SERIALIZE_DIRECTIVE 409 + ".serialize.data\n" 410 + #endif 411 + ";;\n"); 412 + 413 + IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); 414 + IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); 415 + 416 + IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); 417 + IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); 418 + IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); 419 + IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); 420 + IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); 421 + IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); 422 + IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); 423 + IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); 424 + 425 + IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); 426 + IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); 427 + IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); 428 + IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); 429 + IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); 430 + IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); 431 + IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); 432 + IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); 433 + IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); 434 + IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); 435 + IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); 436 + IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); 437 + IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); 438 + IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); 439 + IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); 440 + IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); 441 + IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); 442 + IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); 443 + IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); 444 + 445 + IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); 446 + IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); 447 + IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); 448 + IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); 449 + IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); 450 + IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); 451 + IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); 452 + IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); 453 + IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); 454 + IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); 455 + IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); 456 + IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); 457 + IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); 458 + IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); 459 + IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); 460 + IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); 461 + IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); 462 + IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); 463 + IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); 464 + IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); 465 + IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); 466 + IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); 467 + IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); 468 + IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); 469 + IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); 470 + IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); 471 + 472 + static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] 473 + __initdata_or_module = 474 + { 475 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ 476 + { \ 477 + (void*)ia64_native_ ## name ## _direct_start, \ 478 + (void*)ia64_native_ ## name ## _direct_end, \ 479 + PARAVIRT_PATCH_TYPE_ ## type, \ 480 + } 481 + 482 + IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), 483 + IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), 484 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), 485 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), 486 + IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), 487 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), 488 + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), 489 + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), 490 + IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), 491 + IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), 492 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), 493 + IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, 494 + INTRIN_LOCAL_IRQ_RESTORE), 495 + 496 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ 497 + { \ 498 + (void*)ia64_native_get_ ## name ## _direct_start, \ 499 + (void*)ia64_native_get_ ## name ## _direct_end, \ 500 + PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ 501 + } 502 + 503 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 504 + { \ 505 + (void*)ia64_native_set_ ## name ## _direct_start, \ 506 + (void*)ia64_native_set_ ## name ## _direct_end, \ 507 + PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ 508 + } 509 + 510 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ 511 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ 512 + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 513 + 514 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ 515 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) 516 + 517 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ 518 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) 519 + 520 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), 521 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), 522 + 523 + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), 524 + 525 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), 526 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), 527 + 528 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), 529 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), 530 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), 531 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), 532 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), 533 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), 534 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), 535 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), 536 + 537 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), 538 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), 539 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), 540 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), 541 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), 542 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), 543 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), 544 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), 545 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), 546 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), 547 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), 548 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), 549 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), 550 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), 551 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), 552 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), 553 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), 554 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), 555 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), 556 + 557 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), 558 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), 559 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), 560 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), 561 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), 562 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), 563 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), 564 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), 565 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), 566 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), 567 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), 568 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), 569 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), 570 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), 571 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), 572 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), 573 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), 574 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), 575 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), 576 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), 577 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), 578 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), 579 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), 580 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), 581 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), 582 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), 583 + }; 584 + 585 + unsigned long __init_or_module 586 + ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) 587 + { 588 + const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / 589 + sizeof(ia64_native_patch_bundle_elems[0]); 590 + 591 + return __paravirt_patch_apply_bundle(sbundle, ebundle, type, 592 + ia64_native_patch_bundle_elems, 593 + nelems, NULL); 594 + } 595 + #endif /* ASM_SUPPOTED */ 596 + 597 + extern const char ia64_native_switch_to[]; 598 + extern const char ia64_native_leave_syscall[]; 599 + extern const char ia64_native_work_processed_syscall[]; 600 + extern const char ia64_native_leave_kernel[]; 601 + 602 + const struct paravirt_patch_branch_target ia64_native_branch_target[] 603 + __initconst = { 604 + #define PARAVIRT_BR_TARGET(name, type) \ 605 + { \ 606 + ia64_native_ ## name, \ 607 + PARAVIRT_PATCH_TYPE_BR_ ## type, \ 608 + } 609 + PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), 610 + PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), 611 + PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), 612 + PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), 613 + }; 614 + 615 + static void __init 616 + ia64_native_patch_branch(unsigned long tag, unsigned long type) 617 + { 618 + const unsigned long nelem = 619 + sizeof(ia64_native_branch_target) / 620 + sizeof(ia64_native_branch_target[0]); 621 + __paravirt_patch_apply_branch(tag, type, 622 + ia64_native_branch_target, nelem); 623 + }
+514
arch/ia64/kernel/paravirt_patch.c
··· 1 + /****************************************************************************** 2 + * linux/arch/ia64/xen/paravirt_patch.c 3 + * 4 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 + * VA Linux Systems Japan K.K. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + * 21 + */ 22 + 23 + #include <linux/init.h> 24 + #include <asm/intrinsics.h> 25 + #include <asm/kprobes.h> 26 + #include <asm/paravirt.h> 27 + #include <asm/paravirt_patch.h> 28 + 29 + typedef union ia64_inst { 30 + struct { 31 + unsigned long long qp : 6; 32 + unsigned long long : 31; 33 + unsigned long long opcode : 4; 34 + unsigned long long reserved : 23; 35 + } generic; 36 + unsigned long long l; 37 + } ia64_inst_t; 38 + 39 + /* 40 + * flush_icache_range() can't be used here. 41 + * we are here before cpu_init() which initializes 42 + * ia64_i_cache_stride_shift. flush_icache_range() uses it. 43 + */ 44 + void __init_or_module 45 + paravirt_flush_i_cache_range(const void *instr, unsigned long size) 46 + { 47 + extern void paravirt_fc_i(const void *addr); 48 + unsigned long i; 49 + 50 + for (i = 0; i < size; i += sizeof(bundle_t)) 51 + paravirt_fc_i(instr + i); 52 + } 53 + 54 + bundle_t* __init_or_module 55 + paravirt_get_bundle(unsigned long tag) 56 + { 57 + return (bundle_t *)(tag & ~3UL); 58 + } 59 + 60 + unsigned long __init_or_module 61 + paravirt_get_slot(unsigned long tag) 62 + { 63 + return tag & 3UL; 64 + } 65 + 66 + unsigned long __init_or_module 67 + paravirt_get_num_inst(unsigned long stag, unsigned long etag) 68 + { 69 + bundle_t *sbundle = paravirt_get_bundle(stag); 70 + unsigned long sslot = paravirt_get_slot(stag); 71 + bundle_t *ebundle = paravirt_get_bundle(etag); 72 + unsigned long eslot = paravirt_get_slot(etag); 73 + 74 + return (ebundle - sbundle) * 3 + eslot - sslot + 1; 75 + } 76 + 77 + unsigned long __init_or_module 78 + paravirt_get_next_tag(unsigned long tag) 79 + { 80 + unsigned long slot = paravirt_get_slot(tag); 81 + 82 + switch (slot) { 83 + case 0: 84 + case 1: 85 + return tag + 1; 86 + case 2: { 87 + bundle_t *bundle = paravirt_get_bundle(tag); 88 + return (unsigned long)(bundle + 1); 89 + } 90 + default: 91 + BUG(); 92 + } 93 + /* NOTREACHED */ 94 + } 95 + 96 + ia64_inst_t __init_or_module 97 + paravirt_read_slot0(const bundle_t *bundle) 98 + { 99 + ia64_inst_t inst; 100 + inst.l = bundle->quad0.slot0; 101 + return inst; 102 + } 103 + 104 + ia64_inst_t __init_or_module 105 + paravirt_read_slot1(const bundle_t *bundle) 106 + { 107 + ia64_inst_t inst; 108 + inst.l = bundle->quad0.slot1_p0 | 109 + ((unsigned long long)bundle->quad1.slot1_p1 << 18UL); 110 + return inst; 111 + } 112 + 113 + ia64_inst_t __init_or_module 114 + paravirt_read_slot2(const bundle_t *bundle) 115 + { 116 + ia64_inst_t inst; 117 + inst.l = bundle->quad1.slot2; 118 + return inst; 119 + } 120 + 121 + ia64_inst_t __init_or_module 122 + paravirt_read_inst(unsigned long tag) 123 + { 124 + bundle_t *bundle = paravirt_get_bundle(tag); 125 + unsigned long slot = paravirt_get_slot(tag); 126 + 127 + switch (slot) { 128 + case 0: 129 + return paravirt_read_slot0(bundle); 130 + case 1: 131 + return paravirt_read_slot1(bundle); 132 + case 2: 133 + return paravirt_read_slot2(bundle); 134 + default: 135 + BUG(); 136 + } 137 + /* NOTREACHED */ 138 + } 139 + 140 + void __init_or_module 141 + paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst) 142 + { 143 + bundle->quad0.slot0 = inst.l; 144 + } 145 + 146 + void __init_or_module 147 + paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst) 148 + { 149 + bundle->quad0.slot1_p0 = inst.l; 150 + bundle->quad1.slot1_p1 = inst.l >> 18UL; 151 + } 152 + 153 + void __init_or_module 154 + paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst) 155 + { 156 + bundle->quad1.slot2 = inst.l; 157 + } 158 + 159 + void __init_or_module 160 + paravirt_write_inst(unsigned long tag, ia64_inst_t inst) 161 + { 162 + bundle_t *bundle = paravirt_get_bundle(tag); 163 + unsigned long slot = paravirt_get_slot(tag); 164 + 165 + switch (slot) { 166 + case 0: 167 + paravirt_write_slot0(bundle, inst); 168 + break; 169 + case 1: 170 + paravirt_write_slot1(bundle, inst); 171 + break; 172 + case 2: 173 + paravirt_write_slot2(bundle, inst); 174 + break; 175 + default: 176 + BUG(); 177 + break; 178 + } 179 + paravirt_flush_i_cache_range(bundle, sizeof(*bundle)); 180 + } 181 + 182 + /* for debug */ 183 + void 184 + paravirt_print_bundle(const bundle_t *bundle) 185 + { 186 + const unsigned long *quad = (const unsigned long *)bundle; 187 + ia64_inst_t slot0 = paravirt_read_slot0(bundle); 188 + ia64_inst_t slot1 = paravirt_read_slot1(bundle); 189 + ia64_inst_t slot2 = paravirt_read_slot2(bundle); 190 + 191 + printk(KERN_DEBUG 192 + "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]); 193 + printk(KERN_DEBUG 194 + "bundle template 0x%x\n", 195 + bundle->quad0.template); 196 + printk(KERN_DEBUG 197 + "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n", 198 + (unsigned long)bundle->quad0.slot0, 199 + (unsigned long)bundle->quad0.slot1_p0, 200 + (unsigned long)bundle->quad1.slot1_p1, 201 + (unsigned long)bundle->quad1.slot2); 202 + printk(KERN_DEBUG 203 + "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n", 204 + slot0.l, slot1.l, slot2.l); 205 + } 206 + 207 + static int noreplace_paravirt __init_or_module = 0; 208 + 209 + static int __init setup_noreplace_paravirt(char *str) 210 + { 211 + noreplace_paravirt = 1; 212 + return 1; 213 + } 214 + __setup("noreplace-paravirt", setup_noreplace_paravirt); 215 + 216 + #ifdef ASM_SUPPORTED 217 + static void __init_or_module 218 + fill_nop_bundle(void *sbundle, void *ebundle) 219 + { 220 + extern const char paravirt_nop_bundle[]; 221 + extern const unsigned long paravirt_nop_bundle_size; 222 + 223 + void *bundle = sbundle; 224 + 225 + BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); 226 + BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); 227 + 228 + while (bundle < ebundle) { 229 + memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size); 230 + 231 + bundle += paravirt_nop_bundle_size; 232 + } 233 + } 234 + 235 + /* helper function */ 236 + unsigned long __init_or_module 237 + __paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type, 238 + const struct paravirt_patch_bundle_elem *elems, 239 + unsigned long nelems, 240 + const struct paravirt_patch_bundle_elem **found) 241 + { 242 + unsigned long used = 0; 243 + unsigned long i; 244 + 245 + BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0); 246 + BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0); 247 + 248 + found = NULL; 249 + for (i = 0; i < nelems; i++) { 250 + const struct paravirt_patch_bundle_elem *p = &elems[i]; 251 + if (p->type == type) { 252 + unsigned long need = p->ebundle - p->sbundle; 253 + unsigned long room = ebundle - sbundle; 254 + 255 + if (found != NULL) 256 + *found = p; 257 + 258 + if (room < need) { 259 + /* no room to replace. skip it */ 260 + printk(KERN_DEBUG 261 + "the space is too small to put " 262 + "bundles. type %ld need %ld room %ld\n", 263 + type, need, room); 264 + break; 265 + } 266 + 267 + used = need; 268 + memcpy(sbundle, p->sbundle, used); 269 + break; 270 + } 271 + } 272 + 273 + return used; 274 + } 275 + 276 + void __init_or_module 277 + paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start, 278 + const struct paravirt_patch_site_bundle *end) 279 + { 280 + const struct paravirt_patch_site_bundle *p; 281 + 282 + if (noreplace_paravirt) 283 + return; 284 + if (pv_init_ops.patch_bundle == NULL) 285 + return; 286 + 287 + for (p = start; p < end; p++) { 288 + unsigned long used; 289 + 290 + used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle, 291 + p->type); 292 + if (used == 0) 293 + continue; 294 + 295 + fill_nop_bundle(p->sbundle + used, p->ebundle); 296 + paravirt_flush_i_cache_range(p->sbundle, 297 + p->ebundle - p->sbundle); 298 + } 299 + ia64_sync_i(); 300 + ia64_srlz_i(); 301 + } 302 + 303 + /* 304 + * nop.i, nop.m, nop.f instruction are same format. 305 + * but nop.b has differennt format. 306 + * This doesn't support nop.b for now. 307 + */ 308 + static void __init_or_module 309 + fill_nop_inst(unsigned long stag, unsigned long etag) 310 + { 311 + extern const bundle_t paravirt_nop_mfi_inst_bundle[]; 312 + unsigned long tag; 313 + const ia64_inst_t nop_inst = 314 + paravirt_read_slot0(paravirt_nop_mfi_inst_bundle); 315 + 316 + for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag)) 317 + paravirt_write_inst(tag, nop_inst); 318 + } 319 + 320 + void __init_or_module 321 + paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start, 322 + const struct paravirt_patch_site_inst *end) 323 + { 324 + const struct paravirt_patch_site_inst *p; 325 + 326 + if (noreplace_paravirt) 327 + return; 328 + if (pv_init_ops.patch_inst == NULL) 329 + return; 330 + 331 + for (p = start; p < end; p++) { 332 + unsigned long tag; 333 + bundle_t *sbundle; 334 + bundle_t *ebundle; 335 + 336 + tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type); 337 + if (tag == p->stag) 338 + continue; 339 + 340 + fill_nop_inst(tag, p->etag); 341 + sbundle = paravirt_get_bundle(p->stag); 342 + ebundle = paravirt_get_bundle(p->etag) + 1; 343 + paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) * 344 + sizeof(bundle_t)); 345 + } 346 + ia64_sync_i(); 347 + ia64_srlz_i(); 348 + } 349 + #endif /* ASM_SUPPOTED */ 350 + 351 + /* brl.cond.sptk.many <target64> X3 */ 352 + typedef union inst_x3_op { 353 + ia64_inst_t inst; 354 + struct { 355 + unsigned long qp: 6; 356 + unsigned long btyp: 3; 357 + unsigned long unused: 3; 358 + unsigned long p: 1; 359 + unsigned long imm20b: 20; 360 + unsigned long wh: 2; 361 + unsigned long d: 1; 362 + unsigned long i: 1; 363 + unsigned long opcode: 4; 364 + }; 365 + unsigned long l; 366 + } inst_x3_op_t; 367 + 368 + typedef union inst_x3_imm { 369 + ia64_inst_t inst; 370 + struct { 371 + unsigned long unused: 2; 372 + unsigned long imm39: 39; 373 + }; 374 + unsigned long l; 375 + } inst_x3_imm_t; 376 + 377 + void __init_or_module 378 + paravirt_patch_reloc_brl(unsigned long tag, const void *target) 379 + { 380 + unsigned long tag_op = paravirt_get_next_tag(tag); 381 + unsigned long tag_imm = tag; 382 + bundle_t *bundle = paravirt_get_bundle(tag); 383 + 384 + ia64_inst_t inst_op = paravirt_read_inst(tag_op); 385 + ia64_inst_t inst_imm = paravirt_read_inst(tag_imm); 386 + 387 + inst_x3_op_t inst_x3_op = { .l = inst_op.l }; 388 + inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l }; 389 + 390 + unsigned long imm60 = 391 + ((unsigned long)target - (unsigned long)bundle) >> 4; 392 + 393 + BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */ 394 + BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); 395 + 396 + /* imm60[59] 1bit */ 397 + inst_x3_op.i = (imm60 >> 59) & 1; 398 + /* imm60[19:0] 20bit */ 399 + inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1); 400 + /* imm60[58:20] 39bit */ 401 + inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1); 402 + 403 + inst_op.l = inst_x3_op.l; 404 + inst_imm.l = inst_x3_imm.l; 405 + 406 + paravirt_write_inst(tag_op, inst_op); 407 + paravirt_write_inst(tag_imm, inst_imm); 408 + } 409 + 410 + /* br.cond.sptk.many <target25> B1 */ 411 + typedef union inst_b1 { 412 + ia64_inst_t inst; 413 + struct { 414 + unsigned long qp: 6; 415 + unsigned long btype: 3; 416 + unsigned long unused: 3; 417 + unsigned long p: 1; 418 + unsigned long imm20b: 20; 419 + unsigned long wh: 2; 420 + unsigned long d: 1; 421 + unsigned long s: 1; 422 + unsigned long opcode: 4; 423 + }; 424 + unsigned long l; 425 + } inst_b1_t; 426 + 427 + void __init 428 + paravirt_patch_reloc_br(unsigned long tag, const void *target) 429 + { 430 + bundle_t *bundle = paravirt_get_bundle(tag); 431 + ia64_inst_t inst = paravirt_read_inst(tag); 432 + unsigned long target25 = (unsigned long)target - (unsigned long)bundle; 433 + inst_b1_t inst_b1; 434 + 435 + BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0); 436 + 437 + inst_b1.l = inst.l; 438 + if (target25 & (1UL << 63)) 439 + inst_b1.s = 1; 440 + else 441 + inst_b1.s = 0; 442 + 443 + inst_b1.imm20b = target25 >> 4; 444 + inst.l = inst_b1.l; 445 + 446 + paravirt_write_inst(tag, inst); 447 + } 448 + 449 + void __init 450 + __paravirt_patch_apply_branch( 451 + unsigned long tag, unsigned long type, 452 + const struct paravirt_patch_branch_target *entries, 453 + unsigned int nr_entries) 454 + { 455 + unsigned int i; 456 + for (i = 0; i < nr_entries; i++) { 457 + if (entries[i].type == type) { 458 + paravirt_patch_reloc_br(tag, entries[i].entry); 459 + break; 460 + } 461 + } 462 + } 463 + 464 + static void __init 465 + paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start, 466 + const struct paravirt_patch_site_branch *end) 467 + { 468 + const struct paravirt_patch_site_branch *p; 469 + 470 + if (noreplace_paravirt) 471 + return; 472 + if (pv_init_ops.patch_branch == NULL) 473 + return; 474 + 475 + for (p = start; p < end; p++) 476 + (*pv_init_ops.patch_branch)(p->tag, p->type); 477 + 478 + ia64_sync_i(); 479 + ia64_srlz_i(); 480 + } 481 + 482 + void __init 483 + paravirt_patch_apply(void) 484 + { 485 + extern const char __start_paravirt_bundles[]; 486 + extern const char __stop_paravirt_bundles[]; 487 + extern const char __start_paravirt_insts[]; 488 + extern const char __stop_paravirt_insts[]; 489 + extern const char __start_paravirt_branches[]; 490 + extern const char __stop_paravirt_branches[]; 491 + 492 + paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *) 493 + __start_paravirt_bundles, 494 + (const struct paravirt_patch_site_bundle *) 495 + __stop_paravirt_bundles); 496 + paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *) 497 + __start_paravirt_insts, 498 + (const struct paravirt_patch_site_inst *) 499 + __stop_paravirt_insts); 500 + paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *) 501 + __start_paravirt_branches, 502 + (const struct paravirt_patch_site_branch *) 503 + __stop_paravirt_branches); 504 + } 505 + 506 + /* 507 + * Local variables: 508 + * mode: C 509 + * c-set-style: "linux" 510 + * c-basic-offset: 8 511 + * tab-width: 8 512 + * indent-tabs-mode: t 513 + * End: 514 + */
+79
arch/ia64/kernel/paravirt_patchlist.c
··· 1 + /****************************************************************************** 2 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 3 + * VA Linux Systems Japan K.K. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; either version 2 of the License, or 8 + * (at your option) any later version. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 + * 19 + */ 20 + 21 + #include <linux/bug.h> 22 + #include <asm/paravirt.h> 23 + 24 + #define DECLARE(name) \ 25 + extern unsigned long \ 26 + __ia64_native_start_gate_##name##_patchlist[]; \ 27 + extern unsigned long \ 28 + __ia64_native_end_gate_##name##_patchlist[] 29 + 30 + DECLARE(fsyscall); 31 + DECLARE(brl_fsys_bubble_down); 32 + DECLARE(vtop); 33 + DECLARE(mckinley_e9); 34 + 35 + extern unsigned long __start_gate_section[]; 36 + 37 + #define ASSIGN(name) \ 38 + .start_##name##_patchlist = \ 39 + (unsigned long)__ia64_native_start_gate_##name##_patchlist, \ 40 + .end_##name##_patchlist = \ 41 + (unsigned long)__ia64_native_end_gate_##name##_patchlist 42 + 43 + struct pv_patchdata pv_patchdata __initdata = { 44 + ASSIGN(fsyscall), 45 + ASSIGN(brl_fsys_bubble_down), 46 + ASSIGN(vtop), 47 + ASSIGN(mckinley_e9), 48 + 49 + .gate_section = (void*)__start_gate_section, 50 + }; 51 + 52 + 53 + unsigned long __init 54 + paravirt_get_gate_patchlist(enum pv_gate_patchlist type) 55 + { 56 + 57 + #define CASE(NAME, name) \ 58 + case PV_GATE_START_##NAME: \ 59 + return pv_patchdata.start_##name##_patchlist; \ 60 + case PV_GATE_END_##NAME: \ 61 + return pv_patchdata.end_##name##_patchlist; \ 62 + 63 + switch (type) { 64 + CASE(FSYSCALL, fsyscall); 65 + CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down); 66 + CASE(VTOP, vtop); 67 + CASE(MCKINLEY_E9, mckinley_e9); 68 + default: 69 + BUG(); 70 + break; 71 + } 72 + return 0; 73 + } 74 + 75 + void * __init 76 + paravirt_get_gate_section(void) 77 + { 78 + return pv_patchdata.gate_section; 79 + }
+28
arch/ia64/kernel/paravirt_patchlist.h
··· 1 + /****************************************************************************** 2 + * linux/arch/ia64/xen/paravirt_patchlist.h 3 + * 4 + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> 5 + * VA Linux Systems Japan K.K. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation; either version 2 of the License, or 10 + * (at your option) any later version. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + * 21 + */ 22 + 23 + #if defined(__IA64_GATE_PARAVIRTUALIZED_XEN) 24 + #include <asm/xen/patchlist.h> 25 + #else 26 + #include <asm/native/patchlist.h> 27 + #endif 28 +
+80 -19
arch/ia64/kernel/paravirtentry.S
··· 20 20 * 21 21 */ 22 22 23 + #include <linux/init.h> 23 24 #include <asm/asmmacro.h> 24 25 #include <asm/asm-offsets.h> 26 + #include <asm/paravirt_privop.h> 27 + #include <asm/paravirt_patch.h> 25 28 #include "entry.h" 26 29 27 30 #define DATA8(sym, init_value) \ ··· 35 32 data8 init_value ; \ 36 33 .popsection 37 34 38 - #define BRANCH(targ, reg, breg) \ 39 - movl reg=targ ; \ 40 - ;; \ 41 - ld8 reg=[reg] ; \ 42 - ;; \ 43 - mov breg=reg ; \ 35 + #define BRANCH(targ, reg, breg, type) \ 36 + PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \ 37 + ;; \ 38 + movl reg=targ ; \ 39 + ;; \ 40 + ld8 reg=[reg] ; \ 41 + ;; \ 42 + mov breg=reg ; \ 44 43 br.cond.sptk.many breg 45 44 46 - #define BRANCH_PROC(sym, reg, breg) \ 47 - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 48 - GLOBAL_ENTRY(paravirt_ ## sym) ; \ 49 - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 45 + #define BRANCH_PROC(sym, reg, breg, type) \ 46 + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 47 + GLOBAL_ENTRY(paravirt_ ## sym) ; \ 48 + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ 50 49 END(paravirt_ ## sym) 51 50 52 - #define BRANCH_PROC_UNWINFO(sym, reg, breg) \ 53 - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 54 - GLOBAL_ENTRY(paravirt_ ## sym) ; \ 55 - PT_REGS_UNWIND_INFO(0) ; \ 56 - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 51 + #define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \ 52 + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 53 + GLOBAL_ENTRY(paravirt_ ## sym) ; \ 54 + PT_REGS_UNWIND_INFO(0) ; \ 55 + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ 57 56 END(paravirt_ ## sym) 58 57 59 58 60 - BRANCH_PROC(switch_to, r22, b7) 61 - BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) 62 - BRANCH_PROC(work_processed_syscall, r2, b7) 63 - BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) 59 + BRANCH_PROC(switch_to, r22, b7, SWITCH_TO) 60 + BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL) 61 + BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL) 62 + BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL) 63 + 64 + 65 + #ifdef CONFIG_MODULES 66 + #define __INIT_OR_MODULE .text 67 + #define __INITDATA_OR_MODULE .data 68 + #else 69 + #define __INIT_OR_MODULE __INIT 70 + #define __INITDATA_OR_MODULE __INITDATA 71 + #endif /* CONFIG_MODULES */ 72 + 73 + __INIT_OR_MODULE 74 + GLOBAL_ENTRY(paravirt_fc_i) 75 + fc.i r32 76 + br.ret.sptk.many rp 77 + END(paravirt_fc_i) 78 + __FINIT 79 + 80 + __INIT_OR_MODULE 81 + .align 32 82 + GLOBAL_ENTRY(paravirt_nop_b_inst_bundle) 83 + { 84 + nop.b 0 85 + nop.b 0 86 + nop.b 0 87 + } 88 + END(paravirt_nop_b_inst_bundle) 89 + __FINIT 90 + 91 + /* NOTE: nop.[mfi] has same format */ 92 + __INIT_OR_MODULE 93 + GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle) 94 + { 95 + nop.m 0 96 + nop.f 0 97 + nop.i 0 98 + } 99 + END(paravirt_nop_mfi_inst_bundle) 100 + __FINIT 101 + 102 + __INIT_OR_MODULE 103 + GLOBAL_ENTRY(paravirt_nop_bundle) 104 + paravirt_nop_bundle_start: 105 + { 106 + nop 0 107 + nop 0 108 + nop 0 109 + } 110 + paravirt_nop_bundle_end: 111 + END(paravirt_nop_bundle) 112 + __FINIT 113 + 114 + __INITDATA_OR_MODULE 115 + .align 8 116 + .global paravirt_nop_bundle_size 117 + paravirt_nop_bundle_size: 118 + data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
+30 -10
arch/ia64/kernel/patch.c
··· 7 7 #include <linux/init.h> 8 8 #include <linux/string.h> 9 9 10 + #include <asm/paravirt.h> 10 11 #include <asm/patch.h> 11 12 #include <asm/processor.h> 12 13 #include <asm/sections.h> ··· 170 169 ia64_srlz_i(); 171 170 } 172 171 172 + extern unsigned long ia64_native_fsyscall_table[NR_syscalls]; 173 + extern char ia64_native_fsys_bubble_down[]; 174 + struct pv_fsys_data pv_fsys_data __initdata = { 175 + .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table, 176 + .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down, 177 + }; 178 + 179 + unsigned long * __init 180 + paravirt_get_fsyscall_table(void) 181 + { 182 + return pv_fsys_data.fsyscall_table; 183 + } 184 + 185 + char * __init 186 + paravirt_get_fsys_bubble_down(void) 187 + { 188 + return pv_fsys_data.fsys_bubble_down; 189 + } 190 + 173 191 static void __init 174 192 patch_fsyscall_table (unsigned long start, unsigned long end) 175 193 { 176 - extern unsigned long fsyscall_table[NR_syscalls]; 194 + u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); 177 195 s32 *offp = (s32 *) start; 178 196 u64 ip; 179 197 180 198 while (offp < (s32 *) end) { 181 199 ip = (u64) ia64_imva((char *) offp + *offp); 182 - ia64_patch_imm64(ip, (u64) fsyscall_table); 200 + ia64_patch_imm64(ip, fsyscall_table); 183 201 ia64_fc((void *) ip); 184 202 ++offp; 185 203 } ··· 209 189 static void __init 210 190 patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) 211 191 { 212 - extern char fsys_bubble_down[]; 192 + u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); 213 193 s32 *offp = (s32 *) start; 214 194 u64 ip; 215 195 ··· 227 207 void __init 228 208 ia64_patch_gate (void) 229 209 { 230 - # define START(name) ((unsigned long) __start_gate_##name##_patchlist) 231 - # define END(name) ((unsigned long)__end_gate_##name##_patchlist) 210 + # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) 211 + # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) 232 212 233 - patch_fsyscall_table(START(fsyscall), END(fsyscall)); 234 - patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); 235 - ia64_patch_vtop(START(vtop), END(vtop)); 236 - ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); 213 + patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); 214 + patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); 215 + ia64_patch_vtop(START(VTOP), END(VTOP)); 216 + ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); 237 217 } 238 218 239 219 void ia64_patch_phys_stack_reg(unsigned long val) ··· 249 229 while (offp < end) { 250 230 ip = (u64) offp + *offp; 251 231 ia64_patch(ip, mask, imm); 252 - ia64_fc(ip); 232 + ia64_fc((void *)ip); 253 233 ++offp; 254 234 } 255 235 ia64_sync_i();
+2 -2
arch/ia64/kernel/perfmon.c
··· 5603 5603 * /proc/perfmon interface, for debug only 5604 5604 */ 5605 5605 5606 - #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) 5606 + #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1) 5607 5607 5608 5608 static void * 5609 5609 pfm_proc_start(struct seq_file *m, loff_t *pos) ··· 5612 5612 return PFM_PROC_SHOW_HEADER; 5613 5613 } 5614 5614 5615 - while (*pos <= NR_CPUS) { 5615 + while (*pos <= nr_cpu_ids) { 5616 5616 if (cpu_online(*pos - 1)) { 5617 5617 return (void *)*pos; 5618 5618 }
+3 -3
arch/ia64/kernel/salinfo.c
··· 317 317 } 318 318 319 319 n = data->cpu_check; 320 - for (i = 0; i < NR_CPUS; i++) { 320 + for (i = 0; i < nr_cpu_ids; i++) { 321 321 if (cpu_isset(n, data->cpu_event)) { 322 322 if (!cpu_online(n)) { 323 323 cpu_clear(n, data->cpu_event); ··· 326 326 cpu = n; 327 327 break; 328 328 } 329 - if (++n == NR_CPUS) 329 + if (++n == nr_cpu_ids) 330 330 n = 0; 331 331 } 332 332 ··· 337 337 338 338 /* for next read, start checking at next CPU */ 339 339 data->cpu_check = cpu; 340 - if (++data->cpu_check == NR_CPUS) 340 + if (++data->cpu_check == nr_cpu_ids) 341 341 data->cpu_check = 0; 342 342 343 343 snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
+5 -4
arch/ia64/kernel/setup.c
··· 52 52 #include <asm/meminit.h> 53 53 #include <asm/page.h> 54 54 #include <asm/paravirt.h> 55 + #include <asm/paravirt_patch.h> 55 56 #include <asm/patch.h> 56 57 #include <asm/pgtable.h> 57 58 #include <asm/processor.h> ··· 538 537 paravirt_arch_setup_early(); 539 538 540 539 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 540 + paravirt_patch_apply(); 541 541 542 542 *cmdline_p = __va(ia64_boot_param->command_line); 543 543 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); ··· 732 730 c_start (struct seq_file *m, loff_t *pos) 733 731 { 734 732 #ifdef CONFIG_SMP 735 - while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 733 + while (*pos < nr_cpu_ids && !cpu_online(*pos)) 736 734 ++*pos; 737 735 #endif 738 - return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 736 + return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; 739 737 } 740 738 741 739 static void * ··· 1018 1016 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 1019 1017 atomic_inc(&init_mm.mm_count); 1020 1018 current->active_mm = &init_mm; 1021 - if (current->mm) 1022 - BUG(); 1019 + BUG_ON(current->mm); 1023 1020 1024 1021 ia64_mmu_init(ia64_imva(cpu_data)); 1025 1022 ia64_mca_cpu_init(ia64_imva(cpu_data));
+3 -3
arch/ia64/kernel/smp.c
··· 166 166 * Called with preemption disabled. 167 167 */ 168 168 static inline void 169 - send_IPI_mask(cpumask_t mask, int op) 169 + send_IPI_mask(const struct cpumask *mask, int op) 170 170 { 171 171 unsigned int cpu; 172 172 173 - for_each_cpu_mask(cpu, mask) { 173 + for_each_cpu(cpu, mask) { 174 174 send_IPI_single(cpu, op); 175 175 } 176 176 } ··· 316 316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 317 317 } 318 318 319 - void arch_send_call_function_ipi(cpumask_t mask) 319 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 320 320 { 321 321 send_IPI_mask(mask, IPI_CALL_FUNC); 322 322 }
+7 -10
arch/ia64/kernel/smpboot.c
··· 581 581 582 582 ia64_cpu_to_sapicid[0] = boot_cpu_id; 583 583 cpus_clear(cpu_present_map); 584 - cpu_set(0, cpu_present_map); 585 - cpu_set(0, cpu_possible_map); 584 + set_cpu_present(0, true); 585 + set_cpu_possible(0, true); 586 586 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { 587 587 sapicid = smp_boot_data.cpu_phys_id[i]; 588 588 if (sapicid == boot_cpu_id) 589 589 continue; 590 - cpu_set(cpu, cpu_present_map); 591 - cpu_set(cpu, cpu_possible_map); 590 + set_cpu_present(cpu, true); 591 + set_cpu_possible(cpu, true); 592 592 ia64_cpu_to_sapicid[cpu] = sapicid; 593 593 cpu++; 594 594 } ··· 626 626 */ 627 627 if (!max_cpus) { 628 628 printk(KERN_INFO "SMP mode deactivated.\n"); 629 - cpus_clear(cpu_online_map); 630 - cpus_clear(cpu_present_map); 631 - cpus_clear(cpu_possible_map); 632 - cpu_set(0, cpu_online_map); 633 - cpu_set(0, cpu_present_map); 634 - cpu_set(0, cpu_possible_map); 629 + init_cpu_online(cpumask_of(0)); 630 + init_cpu_present(cpumask_of(0)); 631 + init_cpu_possible(cpumask_of(0)); 635 632 return; 636 633 } 637 634 }
+9
arch/ia64/kernel/time.c
··· 51 51 #endif 52 52 53 53 #ifdef CONFIG_PARAVIRT 54 + /* We need to define a real function for sched_clock, to override the 55 + weak default version */ 56 + unsigned long long sched_clock(void) 57 + { 58 + return paravirt_sched_clock(); 59 + } 60 + #endif 61 + 62 + #ifdef CONFIG_PARAVIRT 54 63 static void 55 64 paravirt_clocksource_resume(void) 56 65 {
+30
arch/ia64/kernel/vmlinux.lds.S
··· 169 169 __end___mckinley_e9_bundles = .; 170 170 } 171 171 172 + #if defined(CONFIG_PARAVIRT) 173 + . = ALIGN(16); 174 + .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) 175 + { 176 + __start_paravirt_bundles = .; 177 + *(.paravirt_bundles) 178 + __stop_paravirt_bundles = .; 179 + } 180 + . = ALIGN(16); 181 + .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) 182 + { 183 + __start_paravirt_insts = .; 184 + *(.paravirt_insts) 185 + __stop_paravirt_insts = .; 186 + } 187 + . = ALIGN(16); 188 + .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) 189 + { 190 + __start_paravirt_branches = .; 191 + *(.paravirt_branches) 192 + __stop_paravirt_branches = .; 193 + } 194 + #endif 195 + 172 196 #if defined(CONFIG_IA64_GENERIC) 173 197 /* Machine Vector */ 174 198 . = ALIGN(16); ··· 225 201 __start_gate_section = .; 226 202 *(.data.gate) 227 203 __stop_gate_section = .; 204 + #ifdef CONFIG_XEN 205 + . = ALIGN(PAGE_SIZE); 206 + __xen_start_gate_section = .; 207 + *(.data.gate.xen) 208 + __xen_stop_gate_section = .; 209 + #endif 228 210 } 229 211 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose 230 212 * kernel data
+1 -1
arch/ia64/kvm/kvm-ia64.c
··· 70 70 int l; 71 71 72 72 for (l = 0; l < (len + 32); l += 32) 73 - ia64_fc(start + l); 73 + ia64_fc((void *)(start + l)); 74 74 75 75 ia64_sync_i(); 76 76 ia64_srlz_i();
+1 -1
arch/ia64/kvm/vcpu.c
··· 386 386 else 387 387 *rnat_addr = (*rnat_addr) & (~nat_mask); 388 388 389 - ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); 389 + ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore); 390 390 ia64_setreg(_IA64_REG_AR_RNAT, rnat); 391 391 } 392 392 local_irq_restore(psr);
+2
arch/ia64/kvm/vtlb.c
··· 210 210 phy_pte &= ~PAGE_FLAGS_RV_MASK; 211 211 psr = ia64_clear_ic(); 212 212 ia64_itc(type, va, phy_pte, itir_ps(itir)); 213 + paravirt_dv_serialize_data(); 213 214 ia64_set_psr(psr); 214 215 } 215 216 ··· 457 456 phy_pte &= ~PAGE_FLAGS_RV_MASK; 458 457 psr = ia64_clear_ic(); 459 458 ia64_itc(type, ifa, phy_pte, ps); 459 + paravirt_dv_serialize_data(); 460 460 ia64_set_psr(psr); 461 461 } 462 462 if (!(pte&VTLB_PTE_IO))
+7 -5
arch/ia64/mm/init.c
··· 35 35 #include <asm/uaccess.h> 36 36 #include <asm/unistd.h> 37 37 #include <asm/mca.h> 38 + #include <asm/paravirt.h> 38 39 39 40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40 41 ··· 260 259 static void __init 261 260 setup_gate (void) 262 261 { 262 + void *gate_section; 263 263 struct page *page; 264 264 265 265 /* ··· 268 266 * headers etc. and once execute-only page to enable 269 267 * privilege-promotion via "epc": 270 268 */ 271 - page = virt_to_page(ia64_imva(__start_gate_section)); 269 + gate_section = paravirt_get_gate_section(); 270 + page = virt_to_page(ia64_imva(gate_section)); 272 271 put_kernel_page(page, GATE_ADDR, PAGE_READONLY); 273 272 #ifdef HAVE_BUGGY_SEGREL 274 - page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); 273 + page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); 275 274 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); 276 275 #else 277 276 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); ··· 636 633 #endif 637 634 638 635 #ifdef CONFIG_FLATMEM 639 - if (!mem_map) 640 - BUG(); 636 + BUG_ON(!mem_map); 641 637 max_mapnr = max_low_pfn; 642 638 #endif 643 639 ··· 669 667 * code can tell them apart. 670 668 */ 671 669 for (i = 0; i < NR_syscalls; ++i) { 672 - extern unsigned long fsyscall_table[NR_syscalls]; 673 670 extern unsigned long sys_call_table[NR_syscalls]; 671 + unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); 674 672 675 673 if (!fsyscall_table[i] || nolwsys) 676 674 fsyscall_table[i] = sys_call_table[i] | 1;
+1 -1
arch/ia64/mm/tlb.c
··· 309 309 310 310 preempt_disable(); 311 311 #ifdef CONFIG_SMP 312 - if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { 312 + if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { 313 313 platform_global_tlb_purge(mm, start, end, nbits); 314 314 preempt_enable(); 315 315 return;
+1
arch/ia64/scripts/pvcheck.sed
··· 17 17 s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g 18 18 s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr 19 19 s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g 20 + s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g 20 21 s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g 21 22 s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g 22 23 s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
+5 -10
arch/ia64/sn/kernel/io_common.c
··· 135 135 } 136 136 137 137 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); 138 - if (!war_list) 139 - BUG(); 138 + BUG_ON(!war_list); 140 139 141 140 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, 142 141 nasid, widget, __pa(war_list), 0, 0, 0 ,0); ··· 179 180 sizeof(struct sn_flush_device_kernel *); 180 181 hubdev->hdi_flush_nasid_list.widget_p = 181 182 kzalloc(size, GFP_KERNEL); 182 - if (!hubdev->hdi_flush_nasid_list.widget_p) 183 - BUG(); 183 + BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p); 184 184 185 185 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 186 186 size = DEV_PER_WIDGET * 187 187 sizeof(struct sn_flush_device_kernel); 188 188 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); 189 - if (!sn_flush_device_kernel) 190 - BUG(); 189 + BUG_ON(!sn_flush_device_kernel); 191 190 192 191 dev_entry = sn_flush_device_kernel; 193 192 for (device = 0; device < DEV_PER_WIDGET; 194 193 device++, dev_entry++) { 195 194 size = sizeof(struct sn_flush_device_common); 196 195 dev_entry->common = kzalloc(size, GFP_KERNEL); 197 - if (!dev_entry->common) 198 - BUG(); 196 + BUG_ON(!dev_entry->common); 199 197 if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) 200 198 status = sal_get_device_dmaflush_list( 201 199 hubdev->hdi_nasid, widget, device, ··· 322 326 */ 323 327 controller->platform_data = kzalloc(sizeof(struct sn_platform_data), 324 328 GFP_KERNEL); 325 - if (controller->platform_data == NULL) 326 - BUG(); 329 + BUG_ON(controller->platform_data == NULL); 327 330 sn_platform_data = 328 331 (struct sn_platform_data *) controller->platform_data; 329 332 sn_platform_data->provider_soft = provider_soft;
+4 -8
arch/ia64/sn/kernel/io_init.c
··· 128 128 { 129 129 controller->window = kcalloc(2, sizeof(struct pci_window), 130 130 GFP_KERNEL); 131 - if (controller->window == NULL) 132 - BUG(); 131 + BUG_ON(controller->window == NULL); 133 132 controller->window[0].offset = legacy_io; 134 133 controller->window[0].resource.name = "legacy_io"; 135 134 controller->window[0].resource.flags = IORESOURCE_IO; ··· 167 168 idx = controller->windows; 168 169 new_count = controller->windows + count; 169 170 new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); 170 - if (new_window == NULL) 171 - BUG(); 171 + BUG_ON(new_window == NULL); 172 172 if (controller->window) { 173 173 memcpy(new_window, controller->window, 174 174 sizeof(struct pci_window) * controller->windows); ··· 220 222 (u64) __pa(pcidev_info), 221 223 (u64) __pa(sn_irq_info)); 222 224 223 - if (status) 224 - BUG(); /* Cannot get platform pci device information */ 225 + BUG_ON(status); /* Cannot get platform pci device information */ 225 226 226 227 227 228 /* Copy over PIO Mapped Addresses */ ··· 304 307 prom_bussoft_ptr = __va(prom_bussoft_ptr); 305 308 306 309 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 307 - if (!controller) 308 - BUG(); 310 + BUG_ON(!controller); 309 311 controller->segment = segment; 310 312 311 313 /*
+2 -3
arch/ia64/sn/kernel/setup.c
··· 732 732 kl_config_hdr_t *klgraph_header; 733 733 nasid = cnodeid_to_nasid(node); 734 734 klgraph_header = ia64_sn_get_klconfig_addr(nasid); 735 - if (klgraph_header == NULL) 736 - BUG(); 735 + BUG_ON(klgraph_header == NULL); 737 736 brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); 738 737 while (brd) { 739 738 if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { ··· 749 750 { 750 751 long cpu; 751 752 752 - for (cpu = 0; cpu < NR_CPUS; cpu++) 753 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) 753 754 if (cpuid_to_nasid(cpu) == nasid && 754 755 cpuid_to_slice(cpu) == slice) 755 756 return cpu;
+6 -6
arch/ia64/sn/kernel/sn2/sn2_smp.c
··· 133 133 unsigned long itc; 134 134 135 135 itc = ia64_get_itc(); 136 - smp_flush_tlb_cpumask(mm->cpu_vm_mask); 136 + smp_flush_tlb_cpumask(*mm_cpumask(mm)); 137 137 itc = ia64_get_itc() - itc; 138 138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; 139 139 __get_cpu_var(ptcstats).shub_ipi_flushes++; ··· 182 182 nodes_clear(nodes_flushed); 183 183 i = 0; 184 184 185 - for_each_cpu_mask(cpu, mm->cpu_vm_mask) { 185 + for_each_cpu(cpu, mm_cpumask(mm)) { 186 186 cnode = cpu_to_node(cpu); 187 187 node_set(cnode, nodes_flushed); 188 188 lcpu = cpu; ··· 461 461 462 462 static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) 463 463 { 464 - if (*offset < NR_CPUS) 464 + if (*offset < nr_cpu_ids) 465 465 return offset; 466 466 return NULL; 467 467 } ··· 469 469 static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) 470 470 { 471 471 (*offset)++; 472 - if (*offset < NR_CPUS) 472 + if (*offset < nr_cpu_ids) 473 473 return offset; 474 474 return NULL; 475 475 } ··· 491 491 seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); 492 492 } 493 493 494 - if (cpu < NR_CPUS && cpu_online(cpu)) { 494 + if (cpu < nr_cpu_ids && cpu_online(cpu)) { 495 495 stat = &per_cpu(ptcstats, cpu); 496 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 497 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, ··· 554 554 555 555 proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, 556 556 NULL, &proc_sn2_ptc_operations); 557 - if (!&proc_sn2_ptc_operations) { 557 + if (!proc_sn2_ptc) { 558 558 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); 559 559 return -EINVAL; 560 560 }
+3 -5
arch/ia64/sn/kernel/sn2/sn_hwperf.c
··· 275 275 276 276 /* get it's interconnect topology */ 277 277 sz = op->ports * sizeof(struct sn_hwperf_port_info); 278 - if (sz > sizeof(ptdata)) 279 - BUG(); 278 + BUG_ON(sz > sizeof(ptdata)); 280 279 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, 281 280 SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, 282 281 (u64)&ptdata, 0, 0, NULL); ··· 309 310 if (router && (!found_cpu || !found_mem)) { 310 311 /* search for a node connected to the same router */ 311 312 sz = router->ports * sizeof(struct sn_hwperf_port_info); 312 - if (sz > sizeof(ptdata)) 313 - BUG(); 313 + BUG_ON(sz > sizeof(ptdata)); 314 314 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, 315 315 SN_HWPERF_ENUM_PORTS, router->id, sz, 316 316 (u64)&ptdata, 0, 0, NULL); ··· 610 612 op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; 611 613 612 614 if (cpu != SN_HWPERF_ARG_ANY_CPU) { 613 - if (cpu >= NR_CPUS || !cpu_online(cpu)) { 615 + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { 614 616 r = -EINVAL; 615 617 goto out; 616 618 }
+1 -3
arch/ia64/sn/pci/pcibr/pcibr_dma.c
··· 256 256 257 257 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; 258 258 259 - if (!hubinfo) { 260 - BUG(); 261 - } 259 + BUG_ON(!hubinfo); 262 260 263 261 flush_nasid_list = &hubinfo->hdi_flush_nasid_list; 264 262 if (flush_nasid_list->widget_p == NULL)
+17 -2
arch/ia64/xen/Makefile
··· 3 3 # 4 4 5 5 obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ 6 - hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o 6 + hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \ 7 + gate-data.o 7 8 8 9 obj-$(CONFIG_IA64_GENERIC) += machvec.o 10 + 11 + # The gate DSO image is built using a special linker script. 12 + include $(srctree)/arch/ia64/kernel/Makefile.gate 13 + 14 + # tell compiled for xen 15 + CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN 16 + AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN 17 + 18 + # use same file of native. 19 + $(obj)/gate.o: $(src)/../kernel/gate.S FORCE 20 + $(call if_changed_dep,as_o_S) 21 + $(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE 22 + $(call if_changed_dep,cpp_lds_S) 23 + 9 24 10 25 AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN 11 26 12 27 # xen multi compile 13 - ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S 28 + ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S 14 29 ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) 15 30 obj-y += $(ASM_PARAVIRT_OBJS) 16 31 define paravirtualized_xen
+3
arch/ia64/xen/gate-data.S
··· 1 + .section .data.gate.xen, "aw" 2 + 3 + .incbin "arch/ia64/xen/gate.so"
+2
arch/ia64/xen/hypercall.S
··· 9 9 #include <asm/intrinsics.h> 10 10 #include <asm/xen/privop.h> 11 11 12 + #ifdef __INTEL_COMPILER 12 13 /* 13 14 * Hypercalls without parameter. 14 15 */ ··· 73 72 br.ret.sptk.many rp 74 73 ;; 75 74 END(xen_set_rr0_to_rr4) 75 + #endif 76 76 77 77 GLOBAL_ENTRY(xen_send_ipi) 78 78 mov r14=r32
+48
arch/ia64/xen/time.c
··· 175 175 } while (unlikely(ret != lcycle)); 176 176 } 177 177 178 + /* based on xen_sched_clock() in arch/x86/xen/time.c. */ 179 + /* 180 + * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined, 181 + * something similar logic should be implemented here. 182 + */ 183 + /* 184 + * Xen sched_clock implementation. Returns the number of unstolen 185 + * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED 186 + * states. 187 + */ 188 + static unsigned long long xen_sched_clock(void) 189 + { 190 + struct vcpu_runstate_info runstate; 191 + 192 + unsigned long long now; 193 + unsigned long long offset; 194 + unsigned long long ret; 195 + 196 + /* 197 + * Ideally sched_clock should be called on a per-cpu basis 198 + * anyway, so preempt should already be disabled, but that's 199 + * not current practice at the moment. 200 + */ 201 + preempt_disable(); 202 + 203 + /* 204 + * both ia64_native_sched_clock() and xen's runstate are 205 + * based on mAR.ITC. So difference of them makes sense. 206 + */ 207 + now = ia64_native_sched_clock(); 208 + 209 + get_runstate_snapshot(&runstate); 210 + 211 + WARN_ON(runstate.state != RUNSTATE_running); 212 + 213 + offset = 0; 214 + if (now > runstate.state_entry_time) 215 + offset = now - runstate.state_entry_time; 216 + ret = runstate.time[RUNSTATE_blocked] + 217 + runstate.time[RUNSTATE_running] + 218 + offset; 219 + 220 + preempt_enable(); 221 + 222 + return ret; 223 + } 224 + 178 225 struct pv_time_ops xen_time_ops __initdata = { 179 226 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, 180 227 .do_steal_accounting = xen_do_steal_accounting, 181 228 .clocksource_resume = xen_itc_jitter_data_reset, 229 + .sched_clock = xen_sched_clock, 182 230 }; 183 231 184 232 /* Called after suspend, to resume time. */
+797 -3
arch/ia64/xen/xen_pv_ops.c
··· 24 24 #include <linux/irq.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/pm.h> 27 + #include <linux/unistd.h> 27 28 28 29 #include <asm/xen/hypervisor.h> 29 30 #include <asm/xen/xencomm.h> ··· 154 153 xen_setup_vcpu_info_placement(); 155 154 } 156 155 156 + #ifdef ASM_SUPPORTED 157 + static unsigned long __init_or_module 158 + xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type); 159 + #endif 160 + static void __init 161 + xen_patch_branch(unsigned long tag, unsigned long type); 162 + 157 163 static const struct pv_init_ops xen_init_ops __initconst = { 158 164 .banner = xen_banner, 159 165 ··· 171 163 .arch_setup_nomca = xen_arch_setup_nomca, 172 164 173 165 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, 166 + #ifdef ASM_SUPPORTED 167 + .patch_bundle = xen_patch_bundle, 168 + #endif 169 + .patch_branch = xen_patch_branch, 170 + }; 171 + 172 + /*************************************************************************** 173 + * pv_fsys_data 174 + * addresses for fsys 175 + */ 176 + 177 + extern unsigned long xen_fsyscall_table[NR_syscalls]; 178 + extern char xen_fsys_bubble_down[]; 179 + struct pv_fsys_data xen_fsys_data __initdata = { 180 + .fsyscall_table = (unsigned long *)xen_fsyscall_table, 181 + .fsys_bubble_down = (void *)xen_fsys_bubble_down, 182 + }; 183 + 184 + /*************************************************************************** 185 + * pv_patchdata 186 + * patchdata addresses 187 + */ 188 + 189 + #define DECLARE(name) \ 190 + extern unsigned long __xen_start_gate_##name##_patchlist[]; \ 191 + extern unsigned long __xen_end_gate_##name##_patchlist[] 192 + 193 + DECLARE(fsyscall); 194 + DECLARE(brl_fsys_bubble_down); 195 + DECLARE(vtop); 196 + DECLARE(mckinley_e9); 197 + 198 + extern unsigned long __xen_start_gate_section[]; 199 + 200 + #define ASSIGN(name) \ 201 + .start_##name##_patchlist = \ 202 + (unsigned long)__xen_start_gate_##name##_patchlist, \ 203 + .end_##name##_patchlist = \ 204 + (unsigned long)__xen_end_gate_##name##_patchlist 205 + 206 + static struct pv_patchdata xen_patchdata __initdata = { 207 + ASSIGN(fsyscall), 208 + ASSIGN(brl_fsys_bubble_down), 209 + ASSIGN(vtop), 210 + ASSIGN(mckinley_e9), 211 + 212 + .gate_section = (void*)__xen_start_gate_section, 174 213 }; 175 214 176 215 /*************************************************************************** 177 216 * pv_cpu_ops 178 217 * intrinsics hooks. 179 218 */ 219 + 220 + #ifndef ASM_SUPPORTED 221 + static void 222 + xen_set_itm_with_offset(unsigned long val) 223 + { 224 + /* ia64_cpu_local_tick() calls this with interrupt enabled. */ 225 + /* WARN_ON(!irqs_disabled()); */ 226 + xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); 227 + } 228 + 229 + static unsigned long 230 + xen_get_itm_with_offset(void) 231 + { 232 + /* unused at this moment */ 233 + printk(KERN_DEBUG "%s is called.\n", __func__); 234 + 235 + WARN_ON(!irqs_disabled()); 236 + return ia64_native_getreg(_IA64_REG_CR_ITM) + 237 + XEN_MAPPEDREGS->itc_offset; 238 + } 239 + 240 + /* ia64_set_itc() is only called by 241 + * cpu_init() with ia64_set_itc(0) and ia64_sync_itc(). 242 + * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant. 243 + */ 244 + static void 245 + xen_set_itc(unsigned long val) 246 + { 247 + unsigned long mitc; 248 + 249 + WARN_ON(!irqs_disabled()); 250 + mitc = ia64_native_getreg(_IA64_REG_AR_ITC); 251 + XEN_MAPPEDREGS->itc_offset = val - mitc; 252 + XEN_MAPPEDREGS->itc_last = val; 253 + } 254 + 255 + static unsigned long 256 + xen_get_itc(void) 257 + { 258 + unsigned long res; 259 + unsigned long itc_offset; 260 + unsigned long itc_last; 261 + unsigned long ret_itc_last; 262 + 263 + itc_offset = XEN_MAPPEDREGS->itc_offset; 264 + do { 265 + itc_last = XEN_MAPPEDREGS->itc_last; 266 + res = ia64_native_getreg(_IA64_REG_AR_ITC); 267 + res += itc_offset; 268 + if (itc_last >= res) 269 + res = itc_last + 1; 270 + ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, 271 + itc_last, res); 272 + } while (unlikely(ret_itc_last != itc_last)); 273 + return res; 274 + 275 + #if 0 276 + /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled. 277 + Should it be paravirtualized instead? */ 278 + WARN_ON(!irqs_disabled()); 279 + itc_offset = XEN_MAPPEDREGS->itc_offset; 280 + itc_last = XEN_MAPPEDREGS->itc_last; 281 + res = ia64_native_getreg(_IA64_REG_AR_ITC); 282 + res += itc_offset; 283 + if (itc_last >= res) 284 + res = itc_last + 1; 285 + XEN_MAPPEDREGS->itc_last = res; 286 + return res; 287 + #endif 288 + } 180 289 181 290 static void xen_setreg(int regnum, unsigned long val) 182 291 { ··· 306 181 xen_set_eflag(val); 307 182 break; 308 183 #endif 184 + case _IA64_REG_AR_ITC: 185 + xen_set_itc(val); 186 + break; 309 187 case _IA64_REG_CR_TPR: 310 188 xen_set_tpr(val); 311 189 break; 312 190 case _IA64_REG_CR_ITM: 313 - xen_set_itm(val); 191 + xen_set_itm_with_offset(val); 314 192 break; 315 193 case _IA64_REG_CR_EOI: 316 194 xen_eoi(val); ··· 337 209 res = xen_get_eflag(); 338 210 break; 339 211 #endif 212 + case _IA64_REG_AR_ITC: 213 + res = xen_get_itc(); 214 + break; 215 + case _IA64_REG_CR_ITM: 216 + res = xen_get_itm_with_offset(); 217 + break; 340 218 case _IA64_REG_CR_IVR: 341 219 res = xen_get_ivr(); 342 220 break; ··· 393 259 else 394 260 xen_rsm_i(); 395 261 } 262 + #else 263 + #define __DEFINE_FUNC(name, code) \ 264 + extern const char xen_ ## name ## _direct_start[]; \ 265 + extern const char xen_ ## name ## _direct_end[]; \ 266 + asm (".align 32\n" \ 267 + ".proc xen_" #name "\n" \ 268 + "xen_" #name ":\n" \ 269 + "xen_" #name "_direct_start:\n" \ 270 + code \ 271 + "xen_" #name "_direct_end:\n" \ 272 + "br.cond.sptk.many b6\n" \ 273 + ".endp xen_" #name "\n") 396 274 397 - static const struct pv_cpu_ops xen_cpu_ops __initdata = { 275 + #define DEFINE_VOID_FUNC0(name, code) \ 276 + extern void \ 277 + xen_ ## name (void); \ 278 + __DEFINE_FUNC(name, code) 279 + 280 + #define DEFINE_VOID_FUNC1(name, code) \ 281 + extern void \ 282 + xen_ ## name (unsigned long arg); \ 283 + __DEFINE_FUNC(name, code) 284 + 285 + #define DEFINE_VOID_FUNC1_VOID(name, code) \ 286 + extern void \ 287 + xen_ ## name (void *arg); \ 288 + __DEFINE_FUNC(name, code) 289 + 290 + #define DEFINE_VOID_FUNC2(name, code) \ 291 + extern void \ 292 + xen_ ## name (unsigned long arg0, \ 293 + unsigned long arg1); \ 294 + __DEFINE_FUNC(name, code) 295 + 296 + #define DEFINE_FUNC0(name, code) \ 297 + extern unsigned long \ 298 + xen_ ## name (void); \ 299 + __DEFINE_FUNC(name, code) 300 + 301 + #define DEFINE_FUNC1(name, type, code) \ 302 + extern unsigned long \ 303 + xen_ ## name (type arg); \ 304 + __DEFINE_FUNC(name, code) 305 + 306 + #define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS) 307 + 308 + /* 309 + * static void xen_set_itm_with_offset(unsigned long val) 310 + * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset); 311 + */ 312 + /* 2 bundles */ 313 + DEFINE_VOID_FUNC1(set_itm_with_offset, 314 + "mov r2 = " __stringify(XSI_BASE) " + " 315 + __stringify(XSI_ITC_OFFSET_OFS) "\n" 316 + ";;\n" 317 + "ld8 r3 = [r2]\n" 318 + ";;\n" 319 + "sub r8 = r8, r3\n" 320 + "break " __stringify(HYPERPRIVOP_SET_ITM) "\n"); 321 + 322 + /* 323 + * static unsigned long xen_get_itm_with_offset(void) 324 + * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset; 325 + */ 326 + /* 2 bundles */ 327 + DEFINE_FUNC0(get_itm_with_offset, 328 + "mov r2 = " __stringify(XSI_BASE) " + " 329 + __stringify(XSI_ITC_OFFSET_OFS) "\n" 330 + ";;\n" 331 + "ld8 r3 = [r2]\n" 332 + "mov r8 = cr.itm\n" 333 + ";;\n" 334 + "add r8 = r8, r2\n"); 335 + 336 + /* 337 + * static void xen_set_itc(unsigned long val) 338 + * unsigned long mitc; 339 + * 340 + * WARN_ON(!irqs_disabled()); 341 + * mitc = ia64_native_getreg(_IA64_REG_AR_ITC); 342 + * XEN_MAPPEDREGS->itc_offset = val - mitc; 343 + * XEN_MAPPEDREGS->itc_last = val; 344 + */ 345 + /* 2 bundles */ 346 + DEFINE_VOID_FUNC1(set_itc, 347 + "mov r2 = " __stringify(XSI_BASE) " + " 348 + __stringify(XSI_ITC_LAST_OFS) "\n" 349 + "mov r3 = ar.itc\n" 350 + ";;\n" 351 + "sub r3 = r8, r3\n" 352 + "st8 [r2] = r8, " 353 + __stringify(XSI_ITC_LAST_OFS) " - " 354 + __stringify(XSI_ITC_OFFSET_OFS) "\n" 355 + ";;\n" 356 + "st8 [r2] = r3\n"); 357 + 358 + /* 359 + * static unsigned long xen_get_itc(void) 360 + * unsigned long res; 361 + * unsigned long itc_offset; 362 + * unsigned long itc_last; 363 + * unsigned long ret_itc_last; 364 + * 365 + * itc_offset = XEN_MAPPEDREGS->itc_offset; 366 + * do { 367 + * itc_last = XEN_MAPPEDREGS->itc_last; 368 + * res = ia64_native_getreg(_IA64_REG_AR_ITC); 369 + * res += itc_offset; 370 + * if (itc_last >= res) 371 + * res = itc_last + 1; 372 + * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last, 373 + * itc_last, res); 374 + * } while (unlikely(ret_itc_last != itc_last)); 375 + * return res; 376 + */ 377 + /* 5 bundles */ 378 + DEFINE_FUNC0(get_itc, 379 + "mov r2 = " __stringify(XSI_BASE) " + " 380 + __stringify(XSI_ITC_OFFSET_OFS) "\n" 381 + ";;\n" 382 + "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - " 383 + __stringify(XSI_ITC_OFFSET_OFS) "\n" 384 + /* r9 = itc_offset */ 385 + /* r2 = XSI_ITC_OFFSET */ 386 + "888:\n" 387 + "mov r8 = ar.itc\n" /* res = ar.itc */ 388 + ";;\n" 389 + "ld8 r3 = [r2]\n" /* r3 = itc_last */ 390 + "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */ 391 + ";;\n" 392 + "cmp.gtu p6, p0 = r3, r8\n" 393 + ";;\n" 394 + "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */ 395 + ";;\n" 396 + "mov ar.ccv = r8\n" 397 + ";;\n" 398 + "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n" 399 + ";;\n" 400 + "cmp.ne p6, p0 = r10, r3\n" 401 + "(p6) hint @pause\n" 402 + "(p6) br.cond.spnt 888b\n"); 403 + 404 + DEFINE_VOID_FUNC1_VOID(fc, 405 + "break " __stringify(HYPERPRIVOP_FC) "\n"); 406 + 407 + /* 408 + * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR 409 + * masked_addr = *psr_i_addr_addr 410 + * pending_intr_addr = masked_addr - 1 411 + * if (val & IA64_PSR_I) { 412 + * masked = *masked_addr 413 + * *masked_addr = 0:xen_set_virtual_psr_i(1) 414 + * compiler barrier 415 + * if (masked) { 416 + * uint8_t pending = *pending_intr_addr; 417 + * if (pending) 418 + * XEN_HYPER_SSM_I 419 + * } 420 + * } else { 421 + * *masked_addr = 1:xen_set_virtual_psr_i(0) 422 + * } 423 + */ 424 + /* 6 bundles */ 425 + DEFINE_VOID_FUNC1(intrin_local_irq_restore, 426 + /* r8 = input value: 0 or IA64_PSR_I 427 + * p6 = (flags & IA64_PSR_I) 428 + * = if clause 429 + * p7 = !(flags & IA64_PSR_I) 430 + * = else clause 431 + */ 432 + "cmp.ne p6, p7 = r8, r0\n" 433 + "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 434 + ";;\n" 435 + /* r9 = XEN_PSR_I_ADDR */ 436 + "ld8 r9 = [r9]\n" 437 + ";;\n" 438 + 439 + /* r10 = masked previous value */ 440 + "(p6) ld1.acq r10 = [r9]\n" 441 + ";;\n" 442 + 443 + /* p8 = !masked interrupt masked previously? */ 444 + "(p6) cmp.ne.unc p8, p0 = r10, r0\n" 445 + 446 + /* p7 = else clause */ 447 + "(p7) mov r11 = 1\n" 448 + ";;\n" 449 + /* masked = 1 */ 450 + "(p7) st1.rel [r9] = r11\n" 451 + 452 + /* p6 = if clause */ 453 + /* masked = 0 454 + * r9 = masked_addr - 1 455 + * = pending_intr_addr 456 + */ 457 + "(p8) st1.rel [r9] = r0, -1\n" 458 + ";;\n" 459 + /* r8 = pending_intr */ 460 + "(p8) ld1.acq r11 = [r9]\n" 461 + ";;\n" 462 + /* p9 = interrupt pending? */ 463 + "(p8) cmp.ne.unc p9, p10 = r11, r0\n" 464 + ";;\n" 465 + "(p10) mf\n" 466 + /* issue hypercall to trigger interrupt */ 467 + "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"); 468 + 469 + DEFINE_VOID_FUNC2(ptcga, 470 + "break " __stringify(HYPERPRIVOP_PTC_GA) "\n"); 471 + DEFINE_VOID_FUNC2(set_rr, 472 + "break " __stringify(HYPERPRIVOP_SET_RR) "\n"); 473 + 474 + /* 475 + * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR; 476 + * tmp = *tmp 477 + * tmp = *tmp; 478 + * psr_i = tmp? 0: IA64_PSR_I; 479 + */ 480 + /* 4 bundles */ 481 + DEFINE_FUNC0(get_psr_i, 482 + "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 483 + ";;\n" 484 + "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */ 485 + "mov r8 = 0\n" /* psr_i = 0 */ 486 + ";;\n" 487 + "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */ 488 + ";;\n" 489 + "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */ 490 + ";;\n" 491 + "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n"); 492 + 493 + DEFINE_FUNC1(thash, unsigned long, 494 + "break " __stringify(HYPERPRIVOP_THASH) "\n"); 495 + DEFINE_FUNC1(get_cpuid, int, 496 + "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n"); 497 + DEFINE_FUNC1(get_pmd, int, 498 + "break " __stringify(HYPERPRIVOP_GET_PMD) "\n"); 499 + DEFINE_FUNC1(get_rr, unsigned long, 500 + "break " __stringify(HYPERPRIVOP_GET_RR) "\n"); 501 + 502 + /* 503 + * void xen_privop_ssm_i(void) 504 + * 505 + * int masked = !xen_get_virtual_psr_i(); 506 + * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr) 507 + * xen_set_virtual_psr_i(1) 508 + * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0 509 + * // compiler barrier 510 + * if (masked) { 511 + * uint8_t* pend_int_addr = 512 + * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1; 513 + * uint8_t pending = *pend_int_addr; 514 + * if (pending) 515 + * XEN_HYPER_SSM_I 516 + * } 517 + */ 518 + /* 4 bundles */ 519 + DEFINE_VOID_FUNC0(ssm_i, 520 + "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 521 + ";;\n" 522 + "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */ 523 + ";;\n" 524 + "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */ 525 + ";;\n" 526 + "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt 527 + * r8 = XEN_PSR_I_ADDR - 1 528 + * = pend_int_addr 529 + */ 530 + "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I 531 + * previously interrupt 532 + * masked? 533 + */ 534 + ";;\n" 535 + "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */ 536 + ";;\n" 537 + "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/ 538 + ";;\n" 539 + /* issue hypercall to get interrupt */ 540 + "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n" 541 + ";;\n"); 542 + 543 + /* 544 + * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr 545 + * = XEN_PSR_I_ADDR_ADDR; 546 + * psr_i_addr = *psr_i_addr_addr; 547 + * *psr_i_addr = 1; 548 + */ 549 + /* 2 bundles */ 550 + DEFINE_VOID_FUNC0(rsm_i, 551 + "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n" 552 + /* r8 = XEN_PSR_I_ADDR */ 553 + "mov r9 = 1\n" 554 + ";;\n" 555 + "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */ 556 + ";;\n" 557 + "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */ 558 + 559 + extern void 560 + xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, 561 + unsigned long val2, unsigned long val3, 562 + unsigned long val4); 563 + __DEFINE_FUNC(set_rr0_to_rr4, 564 + "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n"); 565 + 566 + 567 + extern unsigned long xen_getreg(int regnum); 568 + #define __DEFINE_GET_REG(id, privop) \ 569 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 570 + ";;\n" \ 571 + "cmp.eq p6, p0 = r2, r8\n" \ 572 + ";;\n" \ 573 + "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \ 574 + "(p6) br.cond.sptk.many b6\n" \ 575 + ";;\n" 576 + 577 + __DEFINE_FUNC(getreg, 578 + __DEFINE_GET_REG(PSR, PSR) 579 + #ifdef CONFIG_IA32_SUPPORT 580 + __DEFINE_GET_REG(AR_EFLAG, EFLAG) 581 + #endif 582 + 583 + /* get_itc */ 584 + "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" 585 + ";;\n" 586 + "cmp.eq p6, p0 = r2, r8\n" 587 + ";;\n" 588 + "(p6) br.cond.spnt xen_get_itc\n" 589 + ";;\n" 590 + 591 + /* get itm */ 592 + "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" 593 + ";;\n" 594 + "cmp.eq p6, p0 = r2, r8\n" 595 + ";;\n" 596 + "(p6) br.cond.spnt xen_get_itm_with_offset\n" 597 + ";;\n" 598 + 599 + __DEFINE_GET_REG(CR_IVR, IVR) 600 + __DEFINE_GET_REG(CR_TPR, TPR) 601 + 602 + /* fall back */ 603 + "movl r2 = ia64_native_getreg_func\n" 604 + ";;\n" 605 + "mov b7 = r2\n" 606 + ";;\n" 607 + "br.cond.sptk.many b7\n"); 608 + 609 + extern void xen_setreg(int regnum, unsigned long val); 610 + #define __DEFINE_SET_REG(id, privop) \ 611 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 612 + ";;\n" \ 613 + "cmp.eq p6, p0 = r2, r9\n" \ 614 + ";;\n" \ 615 + "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \ 616 + "(p6) br.cond.sptk.many b6\n" \ 617 + ";;\n" 618 + 619 + __DEFINE_FUNC(setreg, 620 + /* kr0 .. kr 7*/ 621 + /* 622 + * if (_IA64_REG_AR_KR0 <= regnum && 623 + * regnum <= _IA64_REG_AR_KR7) { 624 + * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0 625 + * register __val asm ("r9") = val 626 + * "break HYPERPRIVOP_SET_KR" 627 + * } 628 + */ 629 + "mov r17 = r9\n" 630 + "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n" 631 + ";;\n" 632 + "cmp.ge p6, p0 = r9, r2\n" 633 + "sub r17 = r17, r2\n" 634 + ";;\n" 635 + "(p6) cmp.ge.unc p7, p0 = " 636 + __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0) 637 + ", r17\n" 638 + ";;\n" 639 + "(p7) mov r9 = r8\n" 640 + ";;\n" 641 + "(p7) mov r8 = r17\n" 642 + "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n" 643 + 644 + /* set itm */ 645 + "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n" 646 + ";;\n" 647 + "cmp.eq p6, p0 = r2, r8\n" 648 + ";;\n" 649 + "(p6) br.cond.spnt xen_set_itm_with_offset\n" 650 + 651 + /* set itc */ 652 + "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" 653 + ";;\n" 654 + "cmp.eq p6, p0 = r2, r8\n" 655 + ";;\n" 656 + "(p6) br.cond.spnt xen_set_itc\n" 657 + 658 + #ifdef CONFIG_IA32_SUPPORT 659 + __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG) 660 + #endif 661 + __DEFINE_SET_REG(CR_TPR, SET_TPR) 662 + __DEFINE_SET_REG(CR_EOI, EOI) 663 + 664 + /* fall back */ 665 + "movl r2 = ia64_native_setreg_func\n" 666 + ";;\n" 667 + "mov b7 = r2\n" 668 + ";;\n" 669 + "br.cond.sptk.many b7\n"); 670 + #endif 671 + 672 + static const struct pv_cpu_ops xen_cpu_ops __initconst = { 398 673 .fc = xen_fc, 399 674 .thash = xen_thash, 400 675 .get_cpuid = xen_get_cpuid, ··· 880 337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); 881 338 } 882 339 883 - static const struct pv_iosapic_ops xen_iosapic_ops __initconst = { 340 + static struct pv_iosapic_ops xen_iosapic_ops __initdata = { 884 341 .pcat_compat_init = xen_pcat_compat_init, 885 342 .__get_irq_chip = xen_iosapic_get_irq_chip, 886 343 ··· 898 355 xen_info_init(); 899 356 pv_info = xen_info; 900 357 pv_init_ops = xen_init_ops; 358 + pv_fsys_data = xen_fsys_data; 359 + pv_patchdata = xen_patchdata; 901 360 pv_cpu_ops = xen_cpu_ops; 902 361 pv_iosapic_ops = xen_iosapic_ops; 903 362 pv_irq_ops = xen_irq_ops; 904 363 pv_time_ops = xen_time_ops; 905 364 906 365 paravirt_cpu_asm_init(&xen_cpu_asm_switch); 366 + } 367 + 368 + #ifdef ASM_SUPPORTED 369 + /*************************************************************************** 370 + * binary pacthing 371 + * pv_init_ops.patch_bundle 372 + */ 373 + 374 + #define DEFINE_FUNC_GETREG(name, privop) \ 375 + DEFINE_FUNC0(get_ ## name, \ 376 + "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n") 377 + 378 + DEFINE_FUNC_GETREG(psr, PSR); 379 + DEFINE_FUNC_GETREG(eflag, EFLAG); 380 + DEFINE_FUNC_GETREG(ivr, IVR); 381 + DEFINE_FUNC_GETREG(tpr, TPR); 382 + 383 + #define DEFINE_FUNC_SET_KR(n) \ 384 + DEFINE_VOID_FUNC0(set_kr ## n, \ 385 + ";;\n" \ 386 + "mov r9 = r8\n" \ 387 + "mov r8 = " #n "\n" \ 388 + "break " __stringify(HYPERPRIVOP_SET_KR) "\n") 389 + 390 + DEFINE_FUNC_SET_KR(0); 391 + DEFINE_FUNC_SET_KR(1); 392 + DEFINE_FUNC_SET_KR(2); 393 + DEFINE_FUNC_SET_KR(3); 394 + DEFINE_FUNC_SET_KR(4); 395 + DEFINE_FUNC_SET_KR(5); 396 + DEFINE_FUNC_SET_KR(6); 397 + DEFINE_FUNC_SET_KR(7); 398 + 399 + #define __DEFINE_FUNC_SETREG(name, privop) \ 400 + DEFINE_VOID_FUNC0(name, \ 401 + "break "__stringify(HYPERPRIVOP_ ## privop) "\n") 402 + 403 + #define DEFINE_FUNC_SETREG(name, privop) \ 404 + __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop) 405 + 406 + DEFINE_FUNC_SETREG(eflag, EFLAG); 407 + DEFINE_FUNC_SETREG(tpr, TPR); 408 + __DEFINE_FUNC_SETREG(eoi, EOI); 409 + 410 + extern const char xen_check_events[]; 411 + extern const char __xen_intrin_local_irq_restore_direct_start[]; 412 + extern const char __xen_intrin_local_irq_restore_direct_end[]; 413 + extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc; 414 + 415 + asm ( 416 + ".align 32\n" 417 + ".proc xen_check_events\n" 418 + "xen_check_events:\n" 419 + /* masked = 0 420 + * r9 = masked_addr - 1 421 + * = pending_intr_addr 422 + */ 423 + "st1.rel [r9] = r0, -1\n" 424 + ";;\n" 425 + /* r8 = pending_intr */ 426 + "ld1.acq r11 = [r9]\n" 427 + ";;\n" 428 + /* p9 = interrupt pending? */ 429 + "cmp.ne p9, p10 = r11, r0\n" 430 + ";;\n" 431 + "(p10) mf\n" 432 + /* issue hypercall to trigger interrupt */ 433 + "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n" 434 + "br.cond.sptk.many b6\n" 435 + ".endp xen_check_events\n" 436 + "\n" 437 + ".align 32\n" 438 + ".proc __xen_intrin_local_irq_restore_direct\n" 439 + "__xen_intrin_local_irq_restore_direct:\n" 440 + "__xen_intrin_local_irq_restore_direct_start:\n" 441 + "1:\n" 442 + "{\n" 443 + "cmp.ne p6, p7 = r8, r0\n" 444 + "mov r17 = ip\n" /* get ip to calc return address */ 445 + "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n" 446 + ";;\n" 447 + "}\n" 448 + "{\n" 449 + /* r9 = XEN_PSR_I_ADDR */ 450 + "ld8 r9 = [r9]\n" 451 + ";;\n" 452 + /* r10 = masked previous value */ 453 + "(p6) ld1.acq r10 = [r9]\n" 454 + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ 455 + ";;\n" 456 + "}\n" 457 + "{\n" 458 + /* p8 = !masked interrupt masked previously? */ 459 + "(p6) cmp.ne.unc p8, p0 = r10, r0\n" 460 + "\n" 461 + /* p7 = else clause */ 462 + "(p7) mov r11 = 1\n" 463 + ";;\n" 464 + "(p8) mov b6 = r17\n" /* set return address */ 465 + "}\n" 466 + "{\n" 467 + /* masked = 1 */ 468 + "(p7) st1.rel [r9] = r11\n" 469 + "\n" 470 + "[99:]\n" 471 + "(p8) brl.cond.dptk.few xen_check_events\n" 472 + "}\n" 473 + /* pv calling stub is 5 bundles. fill nop to adjust return address */ 474 + "{\n" 475 + "nop 0\n" 476 + "nop 0\n" 477 + "nop 0\n" 478 + "}\n" 479 + "1:\n" 480 + "__xen_intrin_local_irq_restore_direct_end:\n" 481 + ".endp __xen_intrin_local_irq_restore_direct\n" 482 + "\n" 483 + ".align 8\n" 484 + "__xen_intrin_local_irq_restore_direct_reloc:\n" 485 + "data8 99b\n" 486 + ); 487 + 488 + static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[] 489 + __initdata_or_module = 490 + { 491 + #define XEN_PATCH_BUNDLE_ELEM(name, type) \ 492 + { \ 493 + (void*)xen_ ## name ## _direct_start, \ 494 + (void*)xen_ ## name ## _direct_end, \ 495 + PARAVIRT_PATCH_TYPE_ ## type, \ 496 + } 497 + 498 + XEN_PATCH_BUNDLE_ELEM(fc, FC), 499 + XEN_PATCH_BUNDLE_ELEM(thash, THASH), 500 + XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), 501 + XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), 502 + XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA), 503 + XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR), 504 + XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR), 505 + XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), 506 + XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), 507 + XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), 508 + XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), 509 + { 510 + (void*)__xen_intrin_local_irq_restore_direct_start, 511 + (void*)__xen_intrin_local_irq_restore_direct_end, 512 + PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE, 513 + }, 514 + 515 + #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ 516 + { \ 517 + xen_get_ ## name ## _direct_start, \ 518 + xen_get_ ## name ## _direct_end, \ 519 + PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ 520 + } 521 + 522 + XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), 523 + XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG), 524 + 525 + XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR), 526 + XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR), 527 + 528 + XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC), 529 + XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM), 530 + 531 + 532 + #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 533 + { \ 534 + xen_ ## name ## _direct_start, \ 535 + xen_ ## name ## _direct_end, \ 536 + PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ 537 + } 538 + 539 + #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 540 + __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg) 541 + 542 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0), 543 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1), 544 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2), 545 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3), 546 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4), 547 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5), 548 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6), 549 + XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7), 550 + 551 + XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG), 552 + XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR), 553 + __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI), 554 + 555 + XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC), 556 + XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM), 557 + }; 558 + 559 + static unsigned long __init_or_module 560 + xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type) 561 + { 562 + const unsigned long nelems = sizeof(xen_patch_bundle_elems) / 563 + sizeof(xen_patch_bundle_elems[0]); 564 + unsigned long used; 565 + const struct paravirt_patch_bundle_elem *found; 566 + 567 + used = __paravirt_patch_apply_bundle(sbundle, ebundle, type, 568 + xen_patch_bundle_elems, nelems, 569 + &found); 570 + 571 + if (found == NULL) 572 + /* fallback */ 573 + return ia64_native_patch_bundle(sbundle, ebundle, type); 574 + if (used == 0) 575 + return used; 576 + 577 + /* relocation */ 578 + switch (type) { 579 + case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: { 580 + unsigned long reloc = 581 + __xen_intrin_local_irq_restore_direct_reloc; 582 + unsigned long reloc_offset = reloc - (unsigned long) 583 + __xen_intrin_local_irq_restore_direct_start; 584 + unsigned long tag = (unsigned long)sbundle + reloc_offset; 585 + paravirt_patch_reloc_brl(tag, xen_check_events); 586 + break; 587 + } 588 + default: 589 + /* nothing */ 590 + break; 591 + } 592 + return used; 593 + } 594 + #endif /* ASM_SUPPOTED */ 595 + 596 + const struct paravirt_patch_branch_target xen_branch_target[] 597 + __initconst = { 598 + #define PARAVIRT_BR_TARGET(name, type) \ 599 + { \ 600 + &xen_ ## name, \ 601 + PARAVIRT_PATCH_TYPE_BR_ ## type, \ 602 + } 603 + PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), 604 + PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), 605 + PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), 606 + PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), 607 + }; 608 + 609 + static void __init 610 + xen_patch_branch(unsigned long tag, unsigned long type) 611 + { 612 + const unsigned long nelem = 613 + sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); 614 + __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem); 907 615 }