Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ia64/pv_ops: implement binary patching optimization for native.

implement binary patching optimization for pv_cpu_ops.
With this optimization, indirect call for pv_cpu_ops methods can be
converted into inline execution or direct call.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by

Isaku Yamahata and committed by
Tony Luck
03f511dd bf7ab02f

+898 -25
+5 -1
arch/ia64/include/asm/intrinsics.h
··· 201 201 202 202 #ifndef __ASSEMBLY__ 203 203 #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) 204 - #define IA64_INTRINSIC_API(name) pv_cpu_ops.name 204 + #ifdef ASM_SUPPORTED 205 + # define IA64_INTRINSIC_API(name) paravirt_ ## name 206 + #else 207 + # define IA64_INTRINSIC_API(name) pv_cpu_ops.name 208 + #endif 205 209 #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name 206 210 #else 207 211 #define IA64_INTRINSIC_API(name) ia64_native_ ## name
+8
arch/ia64/include/asm/paravirt.h
··· 118 118 int (*arch_setup_nomca)(void); 119 119 120 120 void (*post_smp_prepare_boot_cpu)(void); 121 + 122 + #ifdef ASM_SUPPORTED 123 + unsigned long (*patch_bundle)(void *sbundle, void *ebundle, 124 + unsigned long type); 125 + unsigned long (*patch_inst)(unsigned long stag, unsigned long etag, 126 + unsigned long type); 127 + #endif 128 + void (*patch_branch)(unsigned long tag, unsigned long type); 121 129 }; 122 130 123 131 extern struct pv_init_ops pv_init_ops;
+338 -3
arch/ia64/include/asm/paravirt_privop.h
··· 60 60 /* Instructions paravirtualized for performance */ 61 61 /************************************************/ 62 62 63 + #ifndef ASM_SUPPORTED 64 + #define paravirt_ssm_i() pv_cpu_ops.ssm_i() 65 + #define paravirt_rsm_i() pv_cpu_ops.rsm_i() 66 + #define __paravirt_getreg() pv_cpu_ops.getreg() 67 + #endif 68 + 63 69 /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). 64 70 * static inline function doesn't satisfy it. */ 65 71 #define paravirt_ssm(mask) \ 66 72 do { \ 67 73 if ((mask) == IA64_PSR_I) \ 68 - pv_cpu_ops.ssm_i(); \ 74 + paravirt_ssm_i(); \ 69 75 else \ 70 76 ia64_native_ssm(mask); \ 71 77 } while (0) ··· 79 73 #define paravirt_rsm(mask) \ 80 74 do { \ 81 75 if ((mask) == IA64_PSR_I) \ 82 - pv_cpu_ops.rsm_i(); \ 76 + paravirt_rsm_i(); \ 83 77 else \ 84 78 ia64_native_rsm(mask); \ 85 79 } while (0) ··· 92 86 if ((reg) == _IA64_REG_IP) \ 93 87 res = ia64_native_getreg(_IA64_REG_IP); \ 94 88 else \ 95 - res = pv_cpu_ops.getreg(reg); \ 89 + res = __paravirt_getreg(reg); \ 96 90 res; \ 97 91 }) 98 92 ··· 126 120 #define ia64_work_processed_syscall \ 127 121 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) 128 122 #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) 123 + 124 + 125 + #if defined(CONFIG_PARAVIRT) 126 + /****************************************************************************** 127 + * binary patching infrastructure 128 + */ 129 + #define PARAVIRT_PATCH_TYPE_FC 1 130 + #define PARAVIRT_PATCH_TYPE_THASH 2 131 + #define PARAVIRT_PATCH_TYPE_GET_CPUID 3 132 + #define PARAVIRT_PATCH_TYPE_GET_PMD 4 133 + #define PARAVIRT_PATCH_TYPE_PTCGA 5 134 + #define PARAVIRT_PATCH_TYPE_GET_RR 6 135 + #define PARAVIRT_PATCH_TYPE_SET_RR 7 136 + #define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8 137 + #define PARAVIRT_PATCH_TYPE_SSM_I 9 138 + #define PARAVIRT_PATCH_TYPE_RSM_I 10 139 + #define PARAVIRT_PATCH_TYPE_GET_PSR_I 11 140 + #define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12 141 + 142 + /* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */ 143 + #define PARAVIRT_PATCH_TYPE_GETREG 0x10000000 144 + #define PARAVIRT_PATCH_TYPE_SETREG 0x20000000 145 + 146 + /* 147 + * struct task_struct* (*ia64_switch_to)(void* next_task); 148 + * void *ia64_leave_syscall; 149 + * void *ia64_work_processed_syscall 150 + * void *ia64_leave_kernel; 151 + */ 152 + 153 + #define PARAVIRT_PATCH_TYPE_BR_START 0x30000000 154 + #define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \ 155 + (PARAVIRT_PATCH_TYPE_BR_START + 0) 156 + #define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \ 157 + (PARAVIRT_PATCH_TYPE_BR_START + 1) 158 + #define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \ 159 + (PARAVIRT_PATCH_TYPE_BR_START + 2) 160 + #define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \ 161 + (PARAVIRT_PATCH_TYPE_BR_START + 3) 162 + 163 + #ifdef ASM_SUPPORTED 164 + #include <asm/paravirt_patch.h> 165 + 166 + /* 167 + * pv_cpu_ops calling stub. 168 + * normal function call convension can't be written by gcc 169 + * inline assembly. 170 + * 171 + * from the caller's point of view, 172 + * the following registers will be clobbered. 173 + * r2, r3 174 + * r8-r15 175 + * r16, r17 176 + * b6, b7 177 + * p6-p15 178 + * ar.ccv 179 + * 180 + * from the callee's point of view , 181 + * the following registers can be used. 182 + * r2, r3: scratch 183 + * r8: scratch, input argument0 and return value 184 + * r0-r15: scratch, input argument1-5 185 + * b6: return pointer 186 + * b7: scratch 187 + * p6-p15: scratch 188 + * ar.ccv: scratch 189 + * 190 + * other registers must not be changed. especially 191 + * b0: rp: preserved. gcc ignores b0 in clobbered register. 192 + * r16: saved gp 193 + */ 194 + /* 5 bundles */ 195 + #define __PARAVIRT_BR \ 196 + ";;\n" \ 197 + "{ .mlx\n" \ 198 + "nop 0\n" \ 199 + "movl r2 = %[op_addr]\n"/* get function pointer address */ \ 200 + ";;\n" \ 201 + "}\n" \ 202 + "1:\n" \ 203 + "{ .mii\n" \ 204 + "ld8 r2 = [r2]\n" /* load function descriptor address */ \ 205 + "mov r17 = ip\n" /* get ip to calc return address */ \ 206 + "mov r16 = gp\n" /* save gp */ \ 207 + ";;\n" \ 208 + "}\n" \ 209 + "{ .mii\n" \ 210 + "ld8 r3 = [r2], 8\n" /* load entry address */ \ 211 + "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \ 212 + ";;\n" \ 213 + "mov b7 = r3\n" /* set entry address */ \ 214 + "}\n" \ 215 + "{ .mib\n" \ 216 + "ld8 gp = [r2]\n" /* load gp value */ \ 217 + "mov b6 = r17\n" /* set return address */ \ 218 + "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \ 219 + "}\n" \ 220 + "1:\n" \ 221 + "{ .mii\n" \ 222 + "mov gp = r16\n" /* restore gp value */ \ 223 + "nop 0\n" \ 224 + "nop 0\n" \ 225 + ";;\n" \ 226 + "}\n" 227 + 228 + #define PARAVIRT_OP(op) \ 229 + [op_addr] "i"(&pv_cpu_ops.op) 230 + 231 + #define PARAVIRT_TYPE(type) \ 232 + PARAVIRT_PATCH_TYPE_ ## type 233 + 234 + #define PARAVIRT_REG_CLOBBERS0 \ 235 + "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ 236 + "r15", "r16", "r17" 237 + 238 + #define PARAVIRT_REG_CLOBBERS1 \ 239 + "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \ 240 + "r15", "r16", "r17" 241 + 242 + #define PARAVIRT_REG_CLOBBERS2 \ 243 + "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \ 244 + "r15", "r16", "r17" 245 + 246 + #define PARAVIRT_REG_CLOBBERS5 \ 247 + "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \ 248 + "r15", "r16", "r17" 249 + 250 + #define PARAVIRT_BR_CLOBBERS \ 251 + "b6", "b7" 252 + 253 + #define PARAVIRT_PR_CLOBBERS \ 254 + "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15" 255 + 256 + #define PARAVIRT_AR_CLOBBERS \ 257 + "ar.ccv" 258 + 259 + #define PARAVIRT_CLOBBERS0 \ 260 + PARAVIRT_REG_CLOBBERS0, \ 261 + PARAVIRT_BR_CLOBBERS, \ 262 + PARAVIRT_PR_CLOBBERS, \ 263 + PARAVIRT_AR_CLOBBERS, \ 264 + "memory" 265 + 266 + #define PARAVIRT_CLOBBERS1 \ 267 + PARAVIRT_REG_CLOBBERS1, \ 268 + PARAVIRT_BR_CLOBBERS, \ 269 + PARAVIRT_PR_CLOBBERS, \ 270 + PARAVIRT_AR_CLOBBERS, \ 271 + "memory" 272 + 273 + #define PARAVIRT_CLOBBERS2 \ 274 + PARAVIRT_REG_CLOBBERS2, \ 275 + PARAVIRT_BR_CLOBBERS, \ 276 + PARAVIRT_PR_CLOBBERS, \ 277 + PARAVIRT_AR_CLOBBERS, \ 278 + "memory" 279 + 280 + #define PARAVIRT_CLOBBERS5 \ 281 + PARAVIRT_REG_CLOBBERS5, \ 282 + PARAVIRT_BR_CLOBBERS, \ 283 + PARAVIRT_PR_CLOBBERS, \ 284 + PARAVIRT_AR_CLOBBERS, \ 285 + "memory" 286 + 287 + #define PARAVIRT_BR0(op, type) \ 288 + register unsigned long ia64_clobber asm ("r8"); \ 289 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 290 + PARAVIRT_TYPE(type)) \ 291 + : "=r"(ia64_clobber) \ 292 + : PARAVIRT_OP(op) \ 293 + : PARAVIRT_CLOBBERS0) 294 + 295 + #define PARAVIRT_BR0_RET(op, type) \ 296 + register unsigned long ia64_intri_res asm ("r8"); \ 297 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 298 + PARAVIRT_TYPE(type)) \ 299 + : "=r"(ia64_intri_res) \ 300 + : PARAVIRT_OP(op) \ 301 + : PARAVIRT_CLOBBERS0) 302 + 303 + #define PARAVIRT_BR1(op, type, arg1) \ 304 + register unsigned long __##arg1 asm ("r8") = arg1; \ 305 + register unsigned long ia64_clobber asm ("r8"); \ 306 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 307 + PARAVIRT_TYPE(type)) \ 308 + : "=r"(ia64_clobber) \ 309 + : PARAVIRT_OP(op), "0"(__##arg1) \ 310 + : PARAVIRT_CLOBBERS1) 311 + 312 + #define PARAVIRT_BR1_RET(op, type, arg1) \ 313 + register unsigned long ia64_intri_res asm ("r8"); \ 314 + register unsigned long __##arg1 asm ("r8") = arg1; \ 315 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 316 + PARAVIRT_TYPE(type)) \ 317 + : "=r"(ia64_intri_res) \ 318 + : PARAVIRT_OP(op), "0"(__##arg1) \ 319 + : PARAVIRT_CLOBBERS1) 320 + 321 + #define PARAVIRT_BR2(op, type, arg1, arg2) \ 322 + register unsigned long __##arg1 asm ("r8") = arg1; \ 323 + register unsigned long __##arg2 asm ("r9") = arg2; \ 324 + register unsigned long ia64_clobber1 asm ("r8"); \ 325 + register unsigned long ia64_clobber2 asm ("r9"); \ 326 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 327 + PARAVIRT_TYPE(type)) \ 328 + : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \ 329 + : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \ 330 + : PARAVIRT_CLOBBERS2) 331 + 332 + 333 + #define PARAVIRT_DEFINE_CPU_OP0(op, type) \ 334 + static inline void \ 335 + paravirt_ ## op (void) \ 336 + { \ 337 + PARAVIRT_BR0(op, type); \ 338 + } 339 + 340 + #define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \ 341 + static inline unsigned long \ 342 + paravirt_ ## op (void) \ 343 + { \ 344 + PARAVIRT_BR0_RET(op, type); \ 345 + return ia64_intri_res; \ 346 + } 347 + 348 + #define PARAVIRT_DEFINE_CPU_OP1(op, type) \ 349 + static inline void \ 350 + paravirt_ ## op (unsigned long arg1) \ 351 + { \ 352 + PARAVIRT_BR1(op, type, arg1); \ 353 + } 354 + 355 + #define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \ 356 + static inline unsigned long \ 357 + paravirt_ ## op (unsigned long arg1) \ 358 + { \ 359 + PARAVIRT_BR1_RET(op, type, arg1); \ 360 + return ia64_intri_res; \ 361 + } 362 + 363 + #define PARAVIRT_DEFINE_CPU_OP2(op, type) \ 364 + static inline void \ 365 + paravirt_ ## op (unsigned long arg1, \ 366 + unsigned long arg2) \ 367 + { \ 368 + PARAVIRT_BR2(op, type, arg1, arg2); \ 369 + } 370 + 371 + 372 + PARAVIRT_DEFINE_CPU_OP1(fc, FC); 373 + PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH) 374 + PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID) 375 + PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD) 376 + PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA) 377 + PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR) 378 + PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR) 379 + PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I) 380 + PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I) 381 + PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I) 382 + PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE) 383 + 384 + static inline void 385 + paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1, 386 + unsigned long val2, unsigned long val3, 387 + unsigned long val4) 388 + { 389 + register unsigned long __val0 asm ("r8") = val0; 390 + register unsigned long __val1 asm ("r9") = val1; 391 + register unsigned long __val2 asm ("r10") = val2; 392 + register unsigned long __val3 asm ("r11") = val3; 393 + register unsigned long __val4 asm ("r14") = val4; 394 + 395 + register unsigned long ia64_clobber0 asm ("r8"); 396 + register unsigned long ia64_clobber1 asm ("r9"); 397 + register unsigned long ia64_clobber2 asm ("r10"); 398 + register unsigned long ia64_clobber3 asm ("r11"); 399 + register unsigned long ia64_clobber4 asm ("r14"); 400 + 401 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, 402 + PARAVIRT_TYPE(SET_RR0_TO_RR4)) 403 + : "=r"(ia64_clobber0), 404 + "=r"(ia64_clobber1), 405 + "=r"(ia64_clobber2), 406 + "=r"(ia64_clobber3), 407 + "=r"(ia64_clobber4) 408 + : PARAVIRT_OP(set_rr0_to_rr4), 409 + "0"(__val0), "1"(__val1), "2"(__val2), 410 + "3"(__val3), "4"(__val4) 411 + : PARAVIRT_CLOBBERS5); 412 + } 413 + 414 + /* unsigned long paravirt_getreg(int reg) */ 415 + #define __paravirt_getreg(reg) \ 416 + ({ \ 417 + register unsigned long ia64_intri_res asm ("r8"); \ 418 + register unsigned long __reg asm ("r8") = (reg); \ 419 + \ 420 + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ 421 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 422 + PARAVIRT_TYPE(GETREG) \ 423 + + (reg)) \ 424 + : "=r"(ia64_intri_res) \ 425 + : PARAVIRT_OP(getreg), "0"(__reg) \ 426 + : PARAVIRT_CLOBBERS1); \ 427 + \ 428 + ia64_intri_res; \ 429 + }) 430 + 431 + /* void paravirt_setreg(int reg, unsigned long val) */ 432 + #define paravirt_setreg(reg, val) \ 433 + do { \ 434 + register unsigned long __val asm ("r8") = val; \ 435 + register unsigned long __reg asm ("r9") = reg; \ 436 + register unsigned long ia64_clobber1 asm ("r8"); \ 437 + register unsigned long ia64_clobber2 asm ("r9"); \ 438 + \ 439 + BUILD_BUG_ON(!__builtin_constant_p(reg)); \ 440 + asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \ 441 + PARAVIRT_TYPE(SETREG) \ 442 + + (reg)) \ 443 + : "=r"(ia64_clobber1), \ 444 + "=r"(ia64_clobber2) \ 445 + : PARAVIRT_OP(setreg), \ 446 + "1"(__reg), "0"(__val) \ 447 + : PARAVIRT_CLOBBERS2); \ 448 + } while (0) 449 + 450 + #endif /* ASM_SUPPORTED */ 451 + #endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */ 129 452 130 453 #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
+2 -1
arch/ia64/kernel/Makefile
··· 36 36 mca_recovery-y += mca_drv.o mca_drv_asm.o 37 37 obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 38 38 39 - obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o 39 + obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \ 40 + paravirt_patch.o 40 41 41 42 obj-$(CONFIG_IA64_ESI) += esi.o 42 43 ifneq ($(CONFIG_IA64_ESI),)
+519 -1
arch/ia64/kernel/paravirt.c
··· 46 46 * initialization hooks. 47 47 */ 48 48 49 - struct pv_init_ops pv_init_ops; 49 + static void __init 50 + ia64_native_patch_branch(unsigned long tag, unsigned long type); 51 + 52 + struct pv_init_ops pv_init_ops = 53 + { 54 + #ifdef ASM_SUPPORTED 55 + .patch_bundle = ia64_native_patch_bundle, 56 + #endif 57 + .patch_branch = ia64_native_patch_branch, 58 + }; 50 59 51 60 /*************************************************************************** 52 61 * pv_cpu_ops 53 62 * intrinsics hooks. 54 63 */ 55 64 65 + #ifndef ASM_SUPPORTED 56 66 /* ia64_native_xxx are macros so that we have to make them real functions */ 57 67 58 68 #define DEFINE_VOID_FUNC1(name) \ ··· 284 274 break; 285 275 } 286 276 } 277 + #else 278 + 279 + #define __DEFINE_FUNC(name, code) \ 280 + extern const char ia64_native_ ## name ## _direct_start[]; \ 281 + extern const char ia64_native_ ## name ## _direct_end[]; \ 282 + asm (".align 32\n" \ 283 + ".proc ia64_native_" #name "_func\n" \ 284 + "ia64_native_" #name "_func:\n" \ 285 + "ia64_native_" #name "_direct_start:\n" \ 286 + code \ 287 + "ia64_native_" #name "_direct_end:\n" \ 288 + "br.cond.sptk.many b6\n" \ 289 + ".endp ia64_native_" #name "_func\n") 290 + 291 + #define DEFINE_VOID_FUNC0(name, code) \ 292 + extern void \ 293 + ia64_native_ ## name ## _func(void); \ 294 + __DEFINE_FUNC(name, code) 295 + 296 + #define DEFINE_VOID_FUNC1(name, code) \ 297 + extern void \ 298 + ia64_native_ ## name ## _func(unsigned long arg); \ 299 + __DEFINE_FUNC(name, code) 300 + 301 + #define DEFINE_VOID_FUNC2(name, code) \ 302 + extern void \ 303 + ia64_native_ ## name ## _func(unsigned long arg0, \ 304 + unsigned long arg1); \ 305 + __DEFINE_FUNC(name, code) 306 + 307 + #define DEFINE_FUNC0(name, code) \ 308 + extern unsigned long \ 309 + ia64_native_ ## name ## _func(void); \ 310 + __DEFINE_FUNC(name, code) 311 + 312 + #define DEFINE_FUNC1(name, type, code) \ 313 + extern unsigned long \ 314 + ia64_native_ ## name ## _func(type arg); \ 315 + __DEFINE_FUNC(name, code) 316 + 317 + DEFINE_VOID_FUNC1(fc, 318 + "fc r8\n"); 319 + DEFINE_VOID_FUNC1(intrin_local_irq_restore, 320 + ";;\n" 321 + " cmp.ne p6, p7 = r8, r0\n" 322 + ";;\n" 323 + "(p6) ssm psr.i\n" 324 + "(p7) rsm psr.i\n" 325 + ";;\n" 326 + "(p6) srlz.d\n"); 327 + 328 + DEFINE_VOID_FUNC2(ptcga, 329 + "ptc.ga r8, r9\n"); 330 + DEFINE_VOID_FUNC2(set_rr, 331 + "mov rr[r8] = r9\n"); 332 + 333 + /* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */ 334 + DEFINE_FUNC0(get_psr_i, 335 + "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n" 336 + "mov r8 = psr\n" 337 + ";;\n" 338 + "and r8 = r2, r8\n"); 339 + 340 + DEFINE_FUNC1(thash, unsigned long, 341 + "thash r8 = r8\n"); 342 + DEFINE_FUNC1(get_cpuid, int, 343 + "mov r8 = cpuid[r8]\n"); 344 + DEFINE_FUNC1(get_pmd, int, 345 + "mov r8 = pmd[r8]\n"); 346 + DEFINE_FUNC1(get_rr, unsigned long, 347 + "mov r8 = rr[r8]\n"); 348 + 349 + DEFINE_VOID_FUNC0(ssm_i, 350 + "ssm psr.i\n"); 351 + DEFINE_VOID_FUNC0(rsm_i, 352 + "rsm psr.i\n"); 353 + 354 + extern void 355 + ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1, 356 + unsigned long val2, unsigned long val3, 357 + unsigned long val4); 358 + __DEFINE_FUNC(set_rr0_to_rr4, 359 + "mov rr[r0] = r8\n" 360 + "movl r2 = 0x2000000000000000\n" 361 + ";;\n" 362 + "mov rr[r2] = r9\n" 363 + "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */ 364 + ";;\n" 365 + "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */ 366 + "mov rr[r3] = r10\n" 367 + ";;\n" 368 + "mov rr[r2] = r11\n" 369 + "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */ 370 + ";;\n" 371 + "mov rr[r3] = r14\n"); 372 + 373 + extern unsigned long ia64_native_getreg_func(int regnum); 374 + asm(".global ia64_native_getreg_func\n"); 375 + #define __DEFINE_GET_REG(id, reg) \ 376 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 377 + ";;\n" \ 378 + "cmp.eq p6, p0 = r2, r8\n" \ 379 + ";;\n" \ 380 + "(p6) mov r8 = " #reg "\n" \ 381 + "(p6) br.cond.sptk.many b6\n" \ 382 + ";;\n" 383 + #define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg) 384 + #define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg) 385 + 386 + __DEFINE_FUNC(getreg, 387 + __DEFINE_GET_REG(GP, gp) 388 + /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */ 389 + __DEFINE_GET_REG(PSR, psr) 390 + __DEFINE_GET_REG(TP, tp) 391 + __DEFINE_GET_REG(SP, sp) 392 + 393 + __DEFINE_GET_REG(AR_KR0, ar0) 394 + __DEFINE_GET_REG(AR_KR1, ar1) 395 + __DEFINE_GET_REG(AR_KR2, ar2) 396 + __DEFINE_GET_REG(AR_KR3, ar3) 397 + __DEFINE_GET_REG(AR_KR4, ar4) 398 + __DEFINE_GET_REG(AR_KR5, ar5) 399 + __DEFINE_GET_REG(AR_KR6, ar6) 400 + __DEFINE_GET_REG(AR_KR7, ar7) 401 + __DEFINE_GET_AR(RSC, rsc) 402 + __DEFINE_GET_AR(BSP, bsp) 403 + __DEFINE_GET_AR(BSPSTORE, bspstore) 404 + __DEFINE_GET_AR(RNAT, rnat) 405 + __DEFINE_GET_AR(FCR, fcr) 406 + __DEFINE_GET_AR(EFLAG, eflag) 407 + __DEFINE_GET_AR(CSD, csd) 408 + __DEFINE_GET_AR(SSD, ssd) 409 + __DEFINE_GET_REG(AR_CFLAG, ar27) 410 + __DEFINE_GET_AR(FSR, fsr) 411 + __DEFINE_GET_AR(FIR, fir) 412 + __DEFINE_GET_AR(FDR, fdr) 413 + __DEFINE_GET_AR(CCV, ccv) 414 + __DEFINE_GET_AR(UNAT, unat) 415 + __DEFINE_GET_AR(FPSR, fpsr) 416 + __DEFINE_GET_AR(ITC, itc) 417 + __DEFINE_GET_AR(PFS, pfs) 418 + __DEFINE_GET_AR(LC, lc) 419 + __DEFINE_GET_AR(EC, ec) 420 + 421 + __DEFINE_GET_CR(DCR, dcr) 422 + __DEFINE_GET_CR(ITM, itm) 423 + __DEFINE_GET_CR(IVA, iva) 424 + __DEFINE_GET_CR(PTA, pta) 425 + __DEFINE_GET_CR(IPSR, ipsr) 426 + __DEFINE_GET_CR(ISR, isr) 427 + __DEFINE_GET_CR(IIP, iip) 428 + __DEFINE_GET_CR(IFA, ifa) 429 + __DEFINE_GET_CR(ITIR, itir) 430 + __DEFINE_GET_CR(IIPA, iipa) 431 + __DEFINE_GET_CR(IFS, ifs) 432 + __DEFINE_GET_CR(IIM, iim) 433 + __DEFINE_GET_CR(IHA, iha) 434 + __DEFINE_GET_CR(LID, lid) 435 + __DEFINE_GET_CR(IVR, ivr) 436 + __DEFINE_GET_CR(TPR, tpr) 437 + __DEFINE_GET_CR(EOI, eoi) 438 + __DEFINE_GET_CR(IRR0, irr0) 439 + __DEFINE_GET_CR(IRR1, irr1) 440 + __DEFINE_GET_CR(IRR2, irr2) 441 + __DEFINE_GET_CR(IRR3, irr3) 442 + __DEFINE_GET_CR(ITV, itv) 443 + __DEFINE_GET_CR(PMV, pmv) 444 + __DEFINE_GET_CR(CMCV, cmcv) 445 + __DEFINE_GET_CR(LRR0, lrr0) 446 + __DEFINE_GET_CR(LRR1, lrr1) 447 + 448 + "mov r8 = -1\n" /* unsupported case */ 449 + ); 450 + 451 + extern void ia64_native_setreg_func(int regnum, unsigned long val); 452 + asm(".global ia64_native_setreg_func\n"); 453 + #define __DEFINE_SET_REG(id, reg) \ 454 + "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \ 455 + ";;\n" \ 456 + "cmp.eq p6, p0 = r2, r9\n" \ 457 + ";;\n" \ 458 + "(p6) mov " #reg " = r8\n" \ 459 + "(p6) br.cond.sptk.many b6\n" \ 460 + ";;\n" 461 + #define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg) 462 + #define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg) 463 + __DEFINE_FUNC(setreg, 464 + "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n" 465 + ";;\n" 466 + "cmp.eq p6, p0 = r2, r9\n" 467 + ";;\n" 468 + "(p6) mov psr.l = r8\n" 469 + #ifdef HAVE_SERIALIZE_DIRECTIVE 470 + ".serialize.data\n" 471 + #endif 472 + "(p6) br.cond.sptk.many b6\n" 473 + __DEFINE_SET_REG(GP, gp) 474 + __DEFINE_SET_REG(SP, sp) 475 + 476 + __DEFINE_SET_REG(AR_KR0, ar0) 477 + __DEFINE_SET_REG(AR_KR1, ar1) 478 + __DEFINE_SET_REG(AR_KR2, ar2) 479 + __DEFINE_SET_REG(AR_KR3, ar3) 480 + __DEFINE_SET_REG(AR_KR4, ar4) 481 + __DEFINE_SET_REG(AR_KR5, ar5) 482 + __DEFINE_SET_REG(AR_KR6, ar6) 483 + __DEFINE_SET_REG(AR_KR7, ar7) 484 + __DEFINE_SET_AR(RSC, rsc) 485 + __DEFINE_SET_AR(BSP, bsp) 486 + __DEFINE_SET_AR(BSPSTORE, bspstore) 487 + __DEFINE_SET_AR(RNAT, rnat) 488 + __DEFINE_SET_AR(FCR, fcr) 489 + __DEFINE_SET_AR(EFLAG, eflag) 490 + __DEFINE_SET_AR(CSD, csd) 491 + __DEFINE_SET_AR(SSD, ssd) 492 + __DEFINE_SET_REG(AR_CFLAG, ar27) 493 + __DEFINE_SET_AR(FSR, fsr) 494 + __DEFINE_SET_AR(FIR, fir) 495 + __DEFINE_SET_AR(FDR, fdr) 496 + __DEFINE_SET_AR(CCV, ccv) 497 + __DEFINE_SET_AR(UNAT, unat) 498 + __DEFINE_SET_AR(FPSR, fpsr) 499 + __DEFINE_SET_AR(ITC, itc) 500 + __DEFINE_SET_AR(PFS, pfs) 501 + __DEFINE_SET_AR(LC, lc) 502 + __DEFINE_SET_AR(EC, ec) 503 + 504 + __DEFINE_SET_CR(DCR, dcr) 505 + __DEFINE_SET_CR(ITM, itm) 506 + __DEFINE_SET_CR(IVA, iva) 507 + __DEFINE_SET_CR(PTA, pta) 508 + __DEFINE_SET_CR(IPSR, ipsr) 509 + __DEFINE_SET_CR(ISR, isr) 510 + __DEFINE_SET_CR(IIP, iip) 511 + __DEFINE_SET_CR(IFA, ifa) 512 + __DEFINE_SET_CR(ITIR, itir) 513 + __DEFINE_SET_CR(IIPA, iipa) 514 + __DEFINE_SET_CR(IFS, ifs) 515 + __DEFINE_SET_CR(IIM, iim) 516 + __DEFINE_SET_CR(IHA, iha) 517 + __DEFINE_SET_CR(LID, lid) 518 + __DEFINE_SET_CR(IVR, ivr) 519 + __DEFINE_SET_CR(TPR, tpr) 520 + __DEFINE_SET_CR(EOI, eoi) 521 + __DEFINE_SET_CR(IRR0, irr0) 522 + __DEFINE_SET_CR(IRR1, irr1) 523 + __DEFINE_SET_CR(IRR2, irr2) 524 + __DEFINE_SET_CR(IRR3, irr3) 525 + __DEFINE_SET_CR(ITV, itv) 526 + __DEFINE_SET_CR(PMV, pmv) 527 + __DEFINE_SET_CR(CMCV, cmcv) 528 + __DEFINE_SET_CR(LRR0, lrr0) 529 + __DEFINE_SET_CR(LRR1, lrr1) 530 + ); 531 + #endif 287 532 288 533 struct pv_cpu_ops pv_cpu_ops = { 289 534 .fc = ia64_native_fc_func, ··· 633 368 .do_steal_accounting = ia64_native_do_steal_accounting, 634 369 .sched_clock = ia64_native_sched_clock, 635 370 }; 371 + 372 + /*************************************************************************** 373 + * binary pacthing 374 + * pv_init_ops.patch_bundle 375 + */ 376 + 377 + #ifdef ASM_SUPPORTED 378 + #define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \ 379 + __DEFINE_FUNC(get_ ## name, \ 380 + ";;\n" \ 381 + "mov r8 = " #reg "\n" \ 382 + ";;\n") 383 + 384 + #define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ 385 + __DEFINE_FUNC(set_ ## name, \ 386 + ";;\n" \ 387 + "mov " #reg " = r8\n" \ 388 + ";;\n") 389 + 390 + #define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \ 391 + IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \ 392 + IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \ 393 + 394 + #define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \ 395 + IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg) 396 + 397 + #define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \ 398 + IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg) 399 + 400 + 401 + IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr); 402 + IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp); 403 + 404 + /* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */ 405 + __DEFINE_FUNC(set_psr_l, 406 + ";;\n" 407 + "mov psr.l = r8\n" 408 + #ifdef HAVE_SERIALIZE_DIRECTIVE 409 + ".serialize.data\n" 410 + #endif 411 + ";;\n"); 412 + 413 + IA64_NATIVE_PATCH_DEFINE_REG(gp, gp); 414 + IA64_NATIVE_PATCH_DEFINE_REG(sp, sp); 415 + 416 + IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0); 417 + IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1); 418 + IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2); 419 + IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3); 420 + IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4); 421 + IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5); 422 + IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6); 423 + IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7); 424 + 425 + IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc); 426 + IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp); 427 + IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore); 428 + IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat); 429 + IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr); 430 + IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag); 431 + IA64_NATIVE_PATCH_DEFINE_AR(csd, csd); 432 + IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd); 433 + IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27); 434 + IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr); 435 + IA64_NATIVE_PATCH_DEFINE_AR(fir, fir); 436 + IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr); 437 + IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv); 438 + IA64_NATIVE_PATCH_DEFINE_AR(unat, unat); 439 + IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr); 440 + IA64_NATIVE_PATCH_DEFINE_AR(itc, itc); 441 + IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs); 442 + IA64_NATIVE_PATCH_DEFINE_AR(lc, lc); 443 + IA64_NATIVE_PATCH_DEFINE_AR(ec, ec); 444 + 445 + IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr); 446 + IA64_NATIVE_PATCH_DEFINE_CR(itm, itm); 447 + IA64_NATIVE_PATCH_DEFINE_CR(iva, iva); 448 + IA64_NATIVE_PATCH_DEFINE_CR(pta, pta); 449 + IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr); 450 + IA64_NATIVE_PATCH_DEFINE_CR(isr, isr); 451 + IA64_NATIVE_PATCH_DEFINE_CR(iip, iip); 452 + IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa); 453 + IA64_NATIVE_PATCH_DEFINE_CR(itir, itir); 454 + IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa); 455 + IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs); 456 + IA64_NATIVE_PATCH_DEFINE_CR(iim, iim); 457 + IA64_NATIVE_PATCH_DEFINE_CR(iha, iha); 458 + IA64_NATIVE_PATCH_DEFINE_CR(lid, lid); 459 + IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr); 460 + IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr); 461 + IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi); 462 + IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0); 463 + IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1); 464 + IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2); 465 + IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3); 466 + IA64_NATIVE_PATCH_DEFINE_CR(itv, itv); 467 + IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv); 468 + IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv); 469 + IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0); 470 + IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1); 471 + 472 + static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[] 473 + __initdata_or_module = 474 + { 475 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \ 476 + { \ 477 + (void*)ia64_native_ ## name ## _direct_start, \ 478 + (void*)ia64_native_ ## name ## _direct_end, \ 479 + PARAVIRT_PATCH_TYPE_ ## type, \ 480 + } 481 + 482 + IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC), 483 + IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH), 484 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID), 485 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD), 486 + IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA), 487 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR), 488 + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR), 489 + IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4), 490 + IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I), 491 + IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I), 492 + IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I), 493 + IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore, 494 + INTRIN_LOCAL_IRQ_RESTORE), 495 + 496 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \ 497 + { \ 498 + (void*)ia64_native_get_ ## name ## _direct_start, \ 499 + (void*)ia64_native_get_ ## name ## _direct_end, \ 500 + PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \ 501 + } 502 + 503 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 504 + { \ 505 + (void*)ia64_native_set_ ## name ## _direct_start, \ 506 + (void*)ia64_native_set_ ## name ## _direct_end, \ 507 + PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \ 508 + } 509 + 510 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \ 511 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \ 512 + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \ 513 + 514 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \ 515 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg) 516 + 517 + #define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \ 518 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg) 519 + 520 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR), 521 + IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP), 522 + 523 + IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L), 524 + 525 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP), 526 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP), 527 + 528 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0), 529 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1), 530 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2), 531 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3), 532 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4), 533 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5), 534 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6), 535 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7), 536 + 537 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC), 538 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP), 539 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE), 540 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT), 541 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR), 542 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG), 543 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD), 544 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD), 545 + IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG), 546 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR), 547 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR), 548 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR), 549 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV), 550 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT), 551 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR), 552 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC), 553 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS), 554 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC), 555 + IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC), 556 + 557 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR), 558 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM), 559 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA), 560 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA), 561 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR), 562 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR), 563 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP), 564 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA), 565 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR), 566 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA), 567 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS), 568 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM), 569 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA), 570 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID), 571 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR), 572 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR), 573 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI), 574 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0), 575 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1), 576 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2), 577 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3), 578 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV), 579 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV), 580 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV), 581 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0), 582 + IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1), 583 + }; 584 + 585 + unsigned long __init_or_module 586 + ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type) 587 + { 588 + const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) / 589 + sizeof(ia64_native_patch_bundle_elems[0]); 590 + 591 + return __paravirt_patch_apply_bundle(sbundle, ebundle, type, 592 + ia64_native_patch_bundle_elems, 593 + nelems, NULL); 594 + } 595 + #endif /* ASM_SUPPOTED */ 596 + 597 + extern const char ia64_native_switch_to[]; 598 + extern const char ia64_native_leave_syscall[]; 599 + extern const char ia64_native_work_processed_syscall[]; 600 + extern const char ia64_native_leave_kernel[]; 601 + 602 + const struct paravirt_patch_branch_target ia64_native_branch_target[] 603 + __initconst = { 604 + #define PARAVIRT_BR_TARGET(name, type) \ 605 + { \ 606 + ia64_native_ ## name, \ 607 + PARAVIRT_PATCH_TYPE_BR_ ## type, \ 608 + } 609 + PARAVIRT_BR_TARGET(switch_to, SWITCH_TO), 610 + PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL), 611 + PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL), 612 + PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL), 613 + }; 614 + 615 + static void __init 616 + ia64_native_patch_branch(unsigned long tag, unsigned long type) 617 + { 618 + const unsigned long nelem = 619 + sizeof(ia64_native_branch_target) / 620 + sizeof(ia64_native_branch_target[0]); 621 + __paravirt_patch_apply_branch(tag, type, 622 + ia64_native_branch_target, nelem); 623 + }
+24 -19
arch/ia64/kernel/paravirtentry.S
··· 20 20 * 21 21 */ 22 22 23 + #include <linux/init.h> 23 24 #include <asm/asmmacro.h> 24 25 #include <asm/asm-offsets.h> 26 + #include <asm/paravirt_privop.h> 27 + #include <asm/paravirt_patch.h> 25 28 #include "entry.h" 26 29 27 30 #define DATA8(sym, init_value) \ ··· 35 32 data8 init_value ; \ 36 33 .popsection 37 34 38 - #define BRANCH(targ, reg, breg) \ 39 - movl reg=targ ; \ 40 - ;; \ 41 - ld8 reg=[reg] ; \ 42 - ;; \ 43 - mov breg=reg ; \ 35 + #define BRANCH(targ, reg, breg, type) \ 36 + PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \ 37 + ;; \ 38 + movl reg=targ ; \ 39 + ;; \ 40 + ld8 reg=[reg] ; \ 41 + ;; \ 42 + mov breg=reg ; \ 44 43 br.cond.sptk.many breg 45 44 46 - #define BRANCH_PROC(sym, reg, breg) \ 47 - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 48 - GLOBAL_ENTRY(paravirt_ ## sym) ; \ 49 - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 45 + #define BRANCH_PROC(sym, reg, breg, type) \ 46 + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 47 + GLOBAL_ENTRY(paravirt_ ## sym) ; \ 48 + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ 50 49 END(paravirt_ ## sym) 51 50 52 - #define BRANCH_PROC_UNWINFO(sym, reg, breg) \ 53 - DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 54 - GLOBAL_ENTRY(paravirt_ ## sym) ; \ 55 - PT_REGS_UNWIND_INFO(0) ; \ 56 - BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 51 + #define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \ 52 + DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 53 + GLOBAL_ENTRY(paravirt_ ## sym) ; \ 54 + PT_REGS_UNWIND_INFO(0) ; \ 55 + BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \ 57 56 END(paravirt_ ## sym) 58 57 59 58 60 - BRANCH_PROC(switch_to, r22, b7) 61 - BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) 62 - BRANCH_PROC(work_processed_syscall, r2, b7) 63 - BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) 59 + BRANCH_PROC(switch_to, r22, b7, SWITCH_TO) 60 + BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL) 61 + BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL) 62 + BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL) 64 63 65 64 66 65 #ifdef CONFIG_MODULES
+2
arch/ia64/kernel/setup.c
··· 52 52 #include <asm/meminit.h> 53 53 #include <asm/page.h> 54 54 #include <asm/paravirt.h> 55 + #include <asm/paravirt_patch.h> 55 56 #include <asm/patch.h> 56 57 #include <asm/pgtable.h> 57 58 #include <asm/processor.h> ··· 538 537 paravirt_arch_setup_early(); 539 538 540 539 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 540 + paravirt_patch_apply(); 541 541 542 542 *cmdline_p = __va(ia64_boot_param->command_line); 543 543 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);