Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions

This patch provides the MMIO load/store emulation for instructions
of 'double & vector unsigned char & vector signed char & vector
unsigned short & vector signed short & vector unsigned int & vector
signed int & vector double '.

The instructions that this adds emulation for are:

- ldx, ldux, lwax,
- lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux,
- stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx,
- lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx,
- stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x

[paulus@ozlabs.org - some cleanups, fixes and rework, make it
compile for Book E, fix build when PR KVM is built in]

Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>

authored by

Bin Lu and committed by
Paul Mackerras
6f63e81b 307d9279

+731 -7
+5
arch/powerpc/include/asm/disassemble.h
··· 87 87 return (inst >> 11) & 0x7fff; 88 88 } 89 89 90 + static inline unsigned int get_tx_or_sx(u32 inst) 91 + { 92 + return (inst) & 0x1; 93 + } 94 + 90 95 #define IS_XFORM(inst) (get_op(inst) == 31) 91 96 #define IS_DSFORM(inst) (get_op(inst) >= 56) 92 97
+23
arch/powerpc/include/asm/kvm_host.h
··· 438 438 unsigned int index; 439 439 }; 440 440 441 + #define KVMPPC_VSX_COPY_NONE 0 442 + #define KVMPPC_VSX_COPY_WORD 1 443 + #define KVMPPC_VSX_COPY_DWORD 2 444 + #define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 445 + 441 446 struct openpic; 442 447 443 448 struct kvm_vcpu_arch { ··· 646 641 u8 io_gpr; /* GPR used as IO source/target */ 647 642 u8 mmio_host_swabbed; 648 643 u8 mmio_sign_extend; 644 + /* conversion between single and double precision */ 645 + u8 mmio_sp64_extend; 646 + /* 647 + * Number of simulations for vsx. 648 + * If we use 2*8bytes to simulate 1*16bytes, 649 + * then the number should be 2 and 650 + * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. 651 + * If we use 4*4bytes to simulate 1*16bytes, 652 + * the number should be 4 and 653 + * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. 654 + */ 655 + u8 mmio_vsx_copy_nums; 656 + u8 mmio_vsx_offset; 657 + u8 mmio_vsx_copy_type; 658 + u8 mmio_vsx_tx_sx_enabled; 649 659 u8 osi_needed; 650 660 u8 osi_enabled; 651 661 u8 papr_enabled; ··· 749 729 }; 750 730 751 731 #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] 732 + #define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j]) 733 + #define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i]) 752 734 753 735 /* Values for vcpu->arch.state */ 754 736 #define KVMPPC_VCPU_NOTREADY 0 ··· 764 742 #define KVM_MMIO_REG_FPR 0x0020 765 743 #define KVM_MMIO_REG_QPR 0x0040 766 744 #define KVM_MMIO_REG_FQPR 0x0060 745 + #define KVM_MMIO_REG_VSX 0x0080 767 746 768 747 #define __KVM_HAVE_ARCH_WQP 769 748 #define __KVM_HAVE_CREATE_DEVICE
+7
arch/powerpc/include/asm/kvm_ppc.h
··· 78 78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 79 79 unsigned int rt, unsigned int bytes, 80 80 int is_default_endian); 81 + extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 82 + unsigned int rt, unsigned int bytes, 83 + int is_default_endian, int mmio_sign_extend); 81 84 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 82 85 u64 val, unsigned int bytes, 83 86 int is_default_endian); 87 + extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 88 + int rs, unsigned int bytes, 89 + int is_default_endian); 84 90 85 91 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, 86 92 enum instruction_type type, u32 *inst); ··· 249 243 u64 dval; 250 244 vector128 vval; 251 245 u64 vsxval[2]; 246 + u32 vsx32val[4]; 252 247 struct { 253 248 u64 addr; 254 249 u64 length;
+50
arch/powerpc/include/asm/ppc-opcode.h
··· 86 86 #define OP_TRAP_64 2 87 87 88 88 #define OP_31_XOP_TRAP 4 89 + #define OP_31_XOP_LDX 21 89 90 #define OP_31_XOP_LWZX 23 91 + #define OP_31_XOP_LDUX 53 90 92 #define OP_31_XOP_DCBST 54 91 93 #define OP_31_XOP_LWZUX 55 92 94 #define OP_31_XOP_TRAP_64 68 ··· 101 99 #define OP_31_XOP_LHZX 279 102 100 #define OP_31_XOP_LHZUX 311 103 101 #define OP_31_XOP_MFSPR 339 102 + #define OP_31_XOP_LWAX 341 104 103 #define OP_31_XOP_LHAX 343 105 104 #define OP_31_XOP_LHAUX 375 106 105 #define OP_31_XOP_STHX 407 ··· 111 108 #define OP_31_XOP_LWBRX 534 112 109 #define OP_31_XOP_TLBSYNC 566 113 110 #define OP_31_XOP_STWBRX 662 111 + #define OP_31_XOP_STFSX 663 112 + #define OP_31_XOP_STFSUX 695 113 + #define OP_31_XOP_STFDX 727 114 + #define OP_31_XOP_STFDUX 759 114 115 #define OP_31_XOP_LHBRX 790 115 116 #define OP_31_XOP_STHBRX 918 117 + #define OP_31_XOP_STFIWX 983 118 + 119 + /* VSX Scalar Load Instructions */ 120 + #define OP_31_XOP_LXSDX 588 121 + #define OP_31_XOP_LXSSPX 524 122 + #define OP_31_XOP_LXSIWAX 76 123 + #define OP_31_XOP_LXSIWZX 12 124 + 125 + /* VSX Scalar Store Instructions */ 126 + #define OP_31_XOP_STXSDX 716 127 + #define OP_31_XOP_STXSSPX 652 128 + #define OP_31_XOP_STXSIWX 140 129 + 130 + /* VSX Vector Load Instructions */ 131 + #define OP_31_XOP_LXVD2X 844 132 + #define OP_31_XOP_LXVW4X 780 133 + 134 + /* VSX Vector Load and Splat Instruction */ 135 + #define OP_31_XOP_LXVDSX 332 136 + 137 + /* VSX Vector Store Instructions */ 138 + #define OP_31_XOP_STXVD2X 972 139 + #define OP_31_XOP_STXVW4X 908 140 + 141 + #define OP_31_XOP_LFSX 535 142 + #define OP_31_XOP_LFSUX 567 143 + #define OP_31_XOP_LFDX 599 144 + #define OP_31_XOP_LFDUX 631 116 145 117 146 #define OP_LWZ 32 147 + #define OP_STFS 52 148 + #define OP_STFSU 53 149 + #define OP_STFD 54 150 + #define OP_STFDU 55 118 151 #define OP_LD 58 119 152 #define OP_LWZU 33 120 153 #define OP_LBZ 34 ··· 166 127 #define OP_LHAU 43 167 128 #define OP_STH 44 168 129 #define OP_STHU 45 130 + #define OP_LMW 46 131 + #define OP_STMW 47 132 + #define OP_LFS 48 133 + #define OP_LFSU 49 134 + #define OP_LFD 50 135 + #define OP_LFDU 51 136 + #define OP_STFS 52 137 + #define OP_STFSU 53 138 + #define OP_STFD 54 139 + #define OP_STFDU 55 140 + #define OP_LQ 56 169 141 170 142 /* sorted alphabetically */ 171 143 #define PPC_INST_BHRBE 0x7c00025c
+334 -1
arch/powerpc/kvm/emulate_loadstore.c
··· 34 34 #include "timing.h" 35 35 #include "trace.h" 36 36 37 + #ifdef CONFIG_PPC_FPU 38 + static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) 39 + { 40 + if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { 41 + kvmppc_core_queue_fpunavail(vcpu); 42 + return true; 43 + } 44 + 45 + return false; 46 + } 47 + #endif /* CONFIG_PPC_FPU */ 48 + 49 + #ifdef CONFIG_VSX 50 + static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) 51 + { 52 + if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { 53 + kvmppc_core_queue_vsx_unavail(vcpu); 54 + return true; 55 + } 56 + 57 + return false; 58 + } 59 + #endif /* CONFIG_VSX */ 60 + 37 61 /* XXX to do: 38 62 * lhax 39 63 * lhaux ··· 89 65 ra = get_ra(inst); 90 66 rs = get_rs(inst); 91 67 rt = get_rt(inst); 68 + 69 + /* 70 + * if mmio_vsx_tx_sx_enabled == 0, copy data between 71 + * VSR[0..31] and memory 72 + * if mmio_vsx_tx_sx_enabled == 1, copy data between 73 + * VSR[32..63] and memory 74 + */ 75 + vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); 76 + vcpu->arch.mmio_vsx_copy_nums = 0; 77 + vcpu->arch.mmio_vsx_offset = 0; 78 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; 79 + vcpu->arch.mmio_sp64_extend = 0; 80 + vcpu->arch.mmio_sign_extend = 0; 92 81 93 82 switch (get_op(inst)) { 94 83 case 31: ··· 194 157 2, 0); 195 158 break; 196 159 160 + case OP_31_XOP_LDX: 161 + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); 162 + break; 163 + 164 + case OP_31_XOP_LDUX: 165 + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); 166 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 167 + break; 168 + 169 + case OP_31_XOP_LWAX: 170 + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); 171 + break; 172 + 173 + #ifdef CONFIG_PPC_FPU 174 + case OP_31_XOP_LFSX: 175 + if (kvmppc_check_fp_disabled(vcpu)) 176 + return EMULATE_DONE; 177 + vcpu->arch.mmio_sp64_extend = 1; 178 + emulated = kvmppc_handle_load(run, vcpu, 179 + KVM_MMIO_REG_FPR|rt, 4, 1); 180 + break; 181 + 182 + case OP_31_XOP_LFSUX: 183 + if (kvmppc_check_fp_disabled(vcpu)) 184 + return EMULATE_DONE; 185 + vcpu->arch.mmio_sp64_extend = 1; 186 + emulated = kvmppc_handle_load(run, vcpu, 187 + KVM_MMIO_REG_FPR|rt, 4, 1); 188 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 189 + break; 190 + 191 + case OP_31_XOP_LFDX: 192 + if (kvmppc_check_fp_disabled(vcpu)) 193 + return EMULATE_DONE; 194 + emulated = kvmppc_handle_load(run, vcpu, 195 + KVM_MMIO_REG_FPR|rt, 8, 1); 196 + break; 197 + 198 + case OP_31_XOP_LFDUX: 199 + if (kvmppc_check_fp_disabled(vcpu)) 200 + return EMULATE_DONE; 201 + emulated = kvmppc_handle_load(run, vcpu, 202 + KVM_MMIO_REG_FPR|rt, 8, 1); 203 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 204 + break; 205 + 206 + case OP_31_XOP_STFSX: 207 + if (kvmppc_check_fp_disabled(vcpu)) 208 + return EMULATE_DONE; 209 + vcpu->arch.mmio_sp64_extend = 1; 210 + emulated = kvmppc_handle_store(run, vcpu, 211 + VCPU_FPR(vcpu, rs), 4, 1); 212 + break; 213 + 214 + case OP_31_XOP_STFSUX: 215 + if (kvmppc_check_fp_disabled(vcpu)) 216 + return EMULATE_DONE; 217 + vcpu->arch.mmio_sp64_extend = 1; 218 + emulated = kvmppc_handle_store(run, vcpu, 219 + VCPU_FPR(vcpu, rs), 4, 1); 220 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 221 + break; 222 + 223 + case OP_31_XOP_STFDX: 224 + if (kvmppc_check_fp_disabled(vcpu)) 225 + return EMULATE_DONE; 226 + emulated = kvmppc_handle_store(run, vcpu, 227 + VCPU_FPR(vcpu, rs), 228 + 8, 1); 229 + break; 230 + 231 + case OP_31_XOP_STFDUX: 232 + if (kvmppc_check_fp_disabled(vcpu)) 233 + return EMULATE_DONE; 234 + emulated = kvmppc_handle_store(run, vcpu, 235 + VCPU_FPR(vcpu, rs), 236 + 8, 1); 237 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 238 + break; 239 + 240 + case OP_31_XOP_STFIWX: 241 + if (kvmppc_check_fp_disabled(vcpu)) 242 + return EMULATE_DONE; 243 + emulated = kvmppc_handle_store(run, vcpu, 244 + VCPU_FPR(vcpu, rs), 245 + 4, 1); 246 + break; 247 + #endif 248 + 249 + #ifdef CONFIG_VSX 250 + case OP_31_XOP_LXSDX: 251 + if (kvmppc_check_vsx_disabled(vcpu)) 252 + return EMULATE_DONE; 253 + vcpu->arch.mmio_vsx_copy_nums = 1; 254 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 255 + emulated = kvmppc_handle_vsx_load(run, vcpu, 256 + KVM_MMIO_REG_VSX|rt, 8, 1, 0); 257 + break; 258 + 259 + case OP_31_XOP_LXSSPX: 260 + if (kvmppc_check_vsx_disabled(vcpu)) 261 + return EMULATE_DONE; 262 + vcpu->arch.mmio_vsx_copy_nums = 1; 263 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 264 + vcpu->arch.mmio_sp64_extend = 1; 265 + emulated = kvmppc_handle_vsx_load(run, vcpu, 266 + KVM_MMIO_REG_VSX|rt, 4, 1, 0); 267 + break; 268 + 269 + case OP_31_XOP_LXSIWAX: 270 + if (kvmppc_check_vsx_disabled(vcpu)) 271 + return EMULATE_DONE; 272 + vcpu->arch.mmio_vsx_copy_nums = 1; 273 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 274 + emulated = kvmppc_handle_vsx_load(run, vcpu, 275 + KVM_MMIO_REG_VSX|rt, 4, 1, 1); 276 + break; 277 + 278 + case OP_31_XOP_LXSIWZX: 279 + if (kvmppc_check_vsx_disabled(vcpu)) 280 + return EMULATE_DONE; 281 + vcpu->arch.mmio_vsx_copy_nums = 1; 282 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 283 + emulated = kvmppc_handle_vsx_load(run, vcpu, 284 + KVM_MMIO_REG_VSX|rt, 4, 1, 0); 285 + break; 286 + 287 + case OP_31_XOP_LXVD2X: 288 + /* 289 + * In this case, the official load/store process is like this: 290 + * Step1, exit from vm by page fault isr, then kvm save vsr. 291 + * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS 292 + * as reference. 293 + * 294 + * Step2, copy data between memory and VCPU 295 + * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use 296 + * 2copies*8bytes or 4copies*4bytes 297 + * to simulate one copy of 16bytes. 298 + * Also there is an endian issue here, we should notice the 299 + * layout of memory. 300 + * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. 301 + * If host is little-endian, kvm will call XXSWAPD for 302 + * LXVD2X_ROT/STXVD2X_ROT. 303 + * So, if host is little-endian, 304 + * the postion of memeory should be swapped. 305 + * 306 + * Step3, return to guest, kvm reset register. 307 + * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS 308 + * as reference. 309 + */ 310 + if (kvmppc_check_vsx_disabled(vcpu)) 311 + return EMULATE_DONE; 312 + vcpu->arch.mmio_vsx_copy_nums = 2; 313 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 314 + emulated = kvmppc_handle_vsx_load(run, vcpu, 315 + KVM_MMIO_REG_VSX|rt, 8, 1, 0); 316 + break; 317 + 318 + case OP_31_XOP_LXVW4X: 319 + if (kvmppc_check_vsx_disabled(vcpu)) 320 + return EMULATE_DONE; 321 + vcpu->arch.mmio_vsx_copy_nums = 4; 322 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; 323 + emulated = kvmppc_handle_vsx_load(run, vcpu, 324 + KVM_MMIO_REG_VSX|rt, 4, 1, 0); 325 + break; 326 + 327 + case OP_31_XOP_LXVDSX: 328 + if (kvmppc_check_vsx_disabled(vcpu)) 329 + return EMULATE_DONE; 330 + vcpu->arch.mmio_vsx_copy_nums = 1; 331 + vcpu->arch.mmio_vsx_copy_type = 332 + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; 333 + emulated = kvmppc_handle_vsx_load(run, vcpu, 334 + KVM_MMIO_REG_VSX|rt, 8, 1, 0); 335 + break; 336 + 337 + case OP_31_XOP_STXSDX: 338 + if (kvmppc_check_vsx_disabled(vcpu)) 339 + return EMULATE_DONE; 340 + vcpu->arch.mmio_vsx_copy_nums = 1; 341 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 342 + emulated = kvmppc_handle_vsx_store(run, vcpu, 343 + rs, 8, 1); 344 + break; 345 + 346 + case OP_31_XOP_STXSSPX: 347 + if (kvmppc_check_vsx_disabled(vcpu)) 348 + return EMULATE_DONE; 349 + vcpu->arch.mmio_vsx_copy_nums = 1; 350 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 351 + vcpu->arch.mmio_sp64_extend = 1; 352 + emulated = kvmppc_handle_vsx_store(run, vcpu, 353 + rs, 4, 1); 354 + break; 355 + 356 + case OP_31_XOP_STXSIWX: 357 + if (kvmppc_check_vsx_disabled(vcpu)) 358 + return EMULATE_DONE; 359 + vcpu->arch.mmio_vsx_offset = 1; 360 + vcpu->arch.mmio_vsx_copy_nums = 1; 361 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; 362 + emulated = kvmppc_handle_vsx_store(run, vcpu, 363 + rs, 4, 1); 364 + break; 365 + 366 + case OP_31_XOP_STXVD2X: 367 + if (kvmppc_check_vsx_disabled(vcpu)) 368 + return EMULATE_DONE; 369 + vcpu->arch.mmio_vsx_copy_nums = 2; 370 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; 371 + emulated = kvmppc_handle_vsx_store(run, vcpu, 372 + rs, 8, 1); 373 + break; 374 + 375 + case OP_31_XOP_STXVW4X: 376 + if (kvmppc_check_vsx_disabled(vcpu)) 377 + return EMULATE_DONE; 378 + vcpu->arch.mmio_vsx_copy_nums = 4; 379 + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; 380 + emulated = kvmppc_handle_vsx_store(run, vcpu, 381 + rs, 4, 1); 382 + break; 383 + #endif /* CONFIG_VSX */ 197 384 default: 198 385 emulated = EMULATE_FAIL; 199 386 break; ··· 428 167 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 429 168 break; 430 169 431 - /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ 170 + #ifdef CONFIG_PPC_FPU 171 + case OP_STFS: 172 + if (kvmppc_check_fp_disabled(vcpu)) 173 + return EMULATE_DONE; 174 + vcpu->arch.mmio_sp64_extend = 1; 175 + emulated = kvmppc_handle_store(run, vcpu, 176 + VCPU_FPR(vcpu, rs), 177 + 4, 1); 178 + break; 179 + 180 + case OP_STFSU: 181 + if (kvmppc_check_fp_disabled(vcpu)) 182 + return EMULATE_DONE; 183 + vcpu->arch.mmio_sp64_extend = 1; 184 + emulated = kvmppc_handle_store(run, vcpu, 185 + VCPU_FPR(vcpu, rs), 186 + 4, 1); 187 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 188 + break; 189 + 190 + case OP_STFD: 191 + if (kvmppc_check_fp_disabled(vcpu)) 192 + return EMULATE_DONE; 193 + emulated = kvmppc_handle_store(run, vcpu, 194 + VCPU_FPR(vcpu, rs), 195 + 8, 1); 196 + break; 197 + 198 + case OP_STFDU: 199 + if (kvmppc_check_fp_disabled(vcpu)) 200 + return EMULATE_DONE; 201 + emulated = kvmppc_handle_store(run, vcpu, 202 + VCPU_FPR(vcpu, rs), 203 + 8, 1); 204 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 205 + break; 206 + #endif 207 + 208 + /* TBD: Add support for other 64 bit load variants like ldu etc. */ 432 209 case OP_LD: 433 210 rt = get_rt(inst); 434 211 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); ··· 550 251 2, 1); 551 252 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 552 253 break; 254 + 255 + #ifdef CONFIG_PPC_FPU 256 + case OP_LFS: 257 + if (kvmppc_check_fp_disabled(vcpu)) 258 + return EMULATE_DONE; 259 + vcpu->arch.mmio_sp64_extend = 1; 260 + emulated = kvmppc_handle_load(run, vcpu, 261 + KVM_MMIO_REG_FPR|rt, 4, 1); 262 + break; 263 + 264 + case OP_LFSU: 265 + if (kvmppc_check_fp_disabled(vcpu)) 266 + return EMULATE_DONE; 267 + vcpu->arch.mmio_sp64_extend = 1; 268 + emulated = kvmppc_handle_load(run, vcpu, 269 + KVM_MMIO_REG_FPR|rt, 4, 1); 270 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 271 + break; 272 + 273 + case OP_LFD: 274 + if (kvmppc_check_fp_disabled(vcpu)) 275 + return EMULATE_DONE; 276 + emulated = kvmppc_handle_load(run, vcpu, 277 + KVM_MMIO_REG_FPR|rt, 8, 1); 278 + break; 279 + 280 + case OP_LFDU: 281 + if (kvmppc_check_fp_disabled(vcpu)) 282 + return EMULATE_DONE; 283 + emulated = kvmppc_handle_load(run, vcpu, 284 + KVM_MMIO_REG_FPR|rt, 8, 1); 285 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); 286 + break; 287 + #endif 553 288 554 289 default: 555 290 emulated = EMULATE_FAIL;
+312 -6
arch/powerpc/kvm/powerpc.c
··· 37 37 #include <asm/cputhreads.h> 38 38 #include <asm/irqflags.h> 39 39 #include <asm/iommu.h> 40 + #include <asm/switch_to.h> 40 41 #include "timing.h" 41 42 #include "irq.h" 42 43 #include "../mm/mmu_decl.h" ··· 802 801 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); 803 802 } 804 803 804 + #ifdef CONFIG_VSX 805 + static inline int kvmppc_get_vsr_dword_offset(int index) 806 + { 807 + int offset; 808 + 809 + if ((index != 0) && (index != 1)) 810 + return -1; 811 + 812 + #ifdef __BIG_ENDIAN 813 + offset = index; 814 + #else 815 + offset = 1 - index; 816 + #endif 817 + 818 + return offset; 819 + } 820 + 821 + static inline int kvmppc_get_vsr_word_offset(int index) 822 + { 823 + int offset; 824 + 825 + if ((index > 3) || (index < 0)) 826 + return -1; 827 + 828 + #ifdef __BIG_ENDIAN 829 + offset = index; 830 + #else 831 + offset = 3 - index; 832 + #endif 833 + return offset; 834 + } 835 + 836 + static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, 837 + u64 gpr) 838 + { 839 + union kvmppc_one_reg val; 840 + int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 841 + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 842 + 843 + if (offset == -1) 844 + return; 845 + 846 + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 847 + val.vval = VCPU_VSX_VR(vcpu, index); 848 + val.vsxval[offset] = gpr; 849 + VCPU_VSX_VR(vcpu, index) = val.vval; 850 + } else { 851 + VCPU_VSX_FPR(vcpu, index, offset) = gpr; 852 + } 853 + } 854 + 855 + static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, 856 + u64 gpr) 857 + { 858 + union kvmppc_one_reg val; 859 + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 860 + 861 + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 862 + val.vval = VCPU_VSX_VR(vcpu, index); 863 + val.vsxval[0] = gpr; 864 + val.vsxval[1] = gpr; 865 + VCPU_VSX_VR(vcpu, index) = val.vval; 866 + } else { 867 + VCPU_VSX_FPR(vcpu, index, 0) = gpr; 868 + VCPU_VSX_FPR(vcpu, index, 1) = gpr; 869 + } 870 + } 871 + 872 + static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, 873 + u32 gpr32) 874 + { 875 + union kvmppc_one_reg val; 876 + int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 877 + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; 878 + int dword_offset, word_offset; 879 + 880 + if (offset == -1) 881 + return; 882 + 883 + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { 884 + val.vval = VCPU_VSX_VR(vcpu, index); 885 + val.vsx32val[offset] = gpr32; 886 + VCPU_VSX_VR(vcpu, index) = val.vval; 887 + } else { 888 + dword_offset = offset / 2; 889 + word_offset = offset % 2; 890 + val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); 891 + val.vsx32val[word_offset] = gpr32; 892 + VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; 893 + } 894 + } 895 + #endif /* CONFIG_VSX */ 896 + 897 + #ifdef CONFIG_PPC_FPU 898 + static inline u64 sp_to_dp(u32 fprs) 899 + { 900 + u64 fprd; 901 + 902 + preempt_disable(); 903 + enable_kernel_fp(); 904 + asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) 905 + : "fr0"); 906 + preempt_enable(); 907 + return fprd; 908 + } 909 + 910 + static inline u32 dp_to_sp(u64 fprd) 911 + { 912 + u32 fprs; 913 + 914 + preempt_disable(); 915 + enable_kernel_fp(); 916 + asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) 917 + : "fr0"); 918 + preempt_enable(); 919 + return fprs; 920 + } 921 + 922 + #else 923 + #define sp_to_dp(x) (x) 924 + #define dp_to_sp(x) (x) 925 + #endif /* CONFIG_PPC_FPU */ 926 + 805 927 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 806 928 struct kvm_run *run) 807 929 { ··· 951 827 } 952 828 } 953 829 830 + /* conversion between single and double precision */ 831 + if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) 832 + gpr = sp_to_dp(gpr); 833 + 954 834 if (vcpu->arch.mmio_sign_extend) { 955 835 switch (run->mmio.len) { 956 836 #ifdef CONFIG_PPC64 ··· 971 843 } 972 844 } 973 845 974 - kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 975 - 976 846 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { 977 847 case KVM_MMIO_REG_GPR: 978 848 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); ··· 985 859 case KVM_MMIO_REG_FQPR: 986 860 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; 987 861 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; 862 + break; 863 + #endif 864 + #ifdef CONFIG_VSX 865 + case KVM_MMIO_REG_VSX: 866 + if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) 867 + kvmppc_set_vsr_dword(vcpu, gpr); 868 + else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) 869 + kvmppc_set_vsr_word(vcpu, gpr); 870 + else if (vcpu->arch.mmio_vsx_copy_type == 871 + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) 872 + kvmppc_set_vsr_dword_dump(vcpu, gpr); 988 873 break; 989 874 #endif 990 875 default: ··· 1064 927 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); 1065 928 } 1066 929 930 + #ifdef CONFIG_VSX 931 + int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 932 + unsigned int rt, unsigned int bytes, 933 + int is_default_endian, int mmio_sign_extend) 934 + { 935 + enum emulation_result emulated = EMULATE_DONE; 936 + 937 + /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ 938 + if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || 939 + (vcpu->arch.mmio_vsx_copy_nums < 0) ) { 940 + return EMULATE_FAIL; 941 + } 942 + 943 + while (vcpu->arch.mmio_vsx_copy_nums) { 944 + emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 945 + is_default_endian, mmio_sign_extend); 946 + 947 + if (emulated != EMULATE_DONE) 948 + break; 949 + 950 + vcpu->arch.paddr_accessed += run->mmio.len; 951 + 952 + vcpu->arch.mmio_vsx_copy_nums--; 953 + vcpu->arch.mmio_vsx_offset++; 954 + } 955 + return emulated; 956 + } 957 + #endif /* CONFIG_VSX */ 958 + 1067 959 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1068 960 u64 val, unsigned int bytes, int is_default_endian) 1069 961 { ··· 1117 951 run->mmio.is_write = 1; 1118 952 vcpu->mmio_needed = 1; 1119 953 vcpu->mmio_is_write = 1; 954 + 955 + if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) 956 + val = dp_to_sp(val); 1120 957 1121 958 /* Store the value at the lowest bytes in 'data'. */ 1122 959 if (!host_swabbed) { ··· 1153 984 return EMULATE_DO_MMIO; 1154 985 } 1155 986 EXPORT_SYMBOL_GPL(kvmppc_handle_store); 987 + 988 + #ifdef CONFIG_VSX 989 + static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) 990 + { 991 + u32 dword_offset, word_offset; 992 + union kvmppc_one_reg reg; 993 + int vsx_offset = 0; 994 + int copy_type = vcpu->arch.mmio_vsx_copy_type; 995 + int result = 0; 996 + 997 + switch (copy_type) { 998 + case KVMPPC_VSX_COPY_DWORD: 999 + vsx_offset = 1000 + kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); 1001 + 1002 + if (vsx_offset == -1) { 1003 + result = -1; 1004 + break; 1005 + } 1006 + 1007 + if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1008 + *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); 1009 + } else { 1010 + reg.vval = VCPU_VSX_VR(vcpu, rs); 1011 + *val = reg.vsxval[vsx_offset]; 1012 + } 1013 + break; 1014 + 1015 + case KVMPPC_VSX_COPY_WORD: 1016 + vsx_offset = 1017 + kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); 1018 + 1019 + if (vsx_offset == -1) { 1020 + result = -1; 1021 + break; 1022 + } 1023 + 1024 + if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { 1025 + dword_offset = vsx_offset / 2; 1026 + word_offset = vsx_offset % 2; 1027 + reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); 1028 + *val = reg.vsx32val[word_offset]; 1029 + } else { 1030 + reg.vval = VCPU_VSX_VR(vcpu, rs); 1031 + *val = reg.vsx32val[vsx_offset]; 1032 + } 1033 + break; 1034 + 1035 + default: 1036 + result = -1; 1037 + break; 1038 + } 1039 + 1040 + return result; 1041 + } 1042 + 1043 + int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1044 + int rs, unsigned int bytes, int is_default_endian) 1045 + { 1046 + u64 val; 1047 + enum emulation_result emulated = EMULATE_DONE; 1048 + 1049 + vcpu->arch.io_gpr = rs; 1050 + 1051 + /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ 1052 + if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || 1053 + (vcpu->arch.mmio_vsx_copy_nums < 0) ) { 1054 + return EMULATE_FAIL; 1055 + } 1056 + 1057 + while (vcpu->arch.mmio_vsx_copy_nums) { 1058 + if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1059 + return EMULATE_FAIL; 1060 + 1061 + emulated = kvmppc_handle_store(run, vcpu, 1062 + val, bytes, is_default_endian); 1063 + 1064 + if (emulated != EMULATE_DONE) 1065 + break; 1066 + 1067 + vcpu->arch.paddr_accessed += run->mmio.len; 1068 + 1069 + vcpu->arch.mmio_vsx_copy_nums--; 1070 + vcpu->arch.mmio_vsx_offset++; 1071 + } 1072 + 1073 + return emulated; 1074 + } 1075 + 1076 + static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, 1077 + struct kvm_run *run) 1078 + { 1079 + enum emulation_result emulated = EMULATE_FAIL; 1080 + int r; 1081 + 1082 + vcpu->arch.paddr_accessed += run->mmio.len; 1083 + 1084 + if (!vcpu->mmio_is_write) { 1085 + emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, 1086 + run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1087 + } else { 1088 + emulated = kvmppc_handle_vsx_store(run, vcpu, 1089 + vcpu->arch.io_gpr, run->mmio.len, 1); 1090 + } 1091 + 1092 + switch (emulated) { 1093 + case EMULATE_DO_MMIO: 1094 + run->exit_reason = KVM_EXIT_MMIO; 1095 + r = RESUME_HOST; 1096 + break; 1097 + case EMULATE_FAIL: 1098 + pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); 1099 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1100 + run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 1101 + r = RESUME_HOST; 1102 + break; 1103 + default: 1104 + r = RESUME_GUEST; 1105 + break; 1106 + } 1107 + return r; 1108 + } 1109 + #endif /* CONFIG_VSX */ 1156 1110 1157 1111 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) 1158 1112 { ··· 1379 1087 int r; 1380 1088 sigset_t sigsaved; 1381 1089 1382 - if (vcpu->sigset_active) 1383 - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1384 - 1385 1090 if (vcpu->mmio_needed) { 1091 + vcpu->mmio_needed = 0; 1386 1092 if (!vcpu->mmio_is_write) 1387 1093 kvmppc_complete_mmio_load(vcpu, run); 1388 - vcpu->mmio_needed = 0; 1094 + #ifdef CONFIG_VSX 1095 + if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1096 + vcpu->arch.mmio_vsx_copy_nums--; 1097 + vcpu->arch.mmio_vsx_offset++; 1098 + } 1099 + 1100 + if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1101 + r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); 1102 + if (r == RESUME_HOST) { 1103 + vcpu->mmio_needed = 1; 1104 + return r; 1105 + } 1106 + } 1107 + #endif 1389 1108 } else if (vcpu->arch.osi_needed) { 1390 1109 u64 *gprs = run->osi.gprs; 1391 1110 int i; ··· 1417 1114 vcpu->arch.epr_needed = 0; 1418 1115 #endif 1419 1116 } 1117 + 1118 + if (vcpu->sigset_active) 1119 + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1420 1120 1421 1121 if (run->immediate_exit) 1422 1122 r = -EINTR;