Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Implement correct SID mapping on Book3s_32

Up until now we were doing segment mappings wrong on Book3s_32. For Book3s_64
we were using a trick where we know that a single mmu_context gives us 16 bits
of context ids.

The mm system on Book3s_32 instead uses a clever algorithm to distribute VSIDs
across the available range, so a context id really only gives us 16 available
VSIDs.

To keep at least a few guest processes in the SID shadow, let's map a number of
contexts that we can use as VSID pool. This makes the code be actually correct
and shouldn't hurt performance too much.

Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Alexander Graf and committed by
Avi Kivity
8b6db3bc ad087376

+48 -32
+13 -2
arch/powerpc/include/asm/kvm_book3s.h
··· 60 60 #define SID_MAP_NUM (1 << SID_MAP_BITS) 61 61 #define SID_MAP_MASK (SID_MAP_NUM - 1) 62 62 63 + #ifdef CONFIG_PPC_BOOK3S_64 64 + #define SID_CONTEXTS 1 65 + #else 66 + #define SID_CONTEXTS 128 67 + #define VSID_POOL_SIZE (SID_CONTEXTS * 16) 68 + #endif 69 + 63 70 struct kvmppc_vcpu_book3s { 64 71 struct kvm_vcpu vcpu; 65 72 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; ··· 85 78 u64 sdr1; 86 79 u64 hior; 87 80 u64 msr_mask; 88 - u64 vsid_first; 89 81 u64 vsid_next; 82 + #ifdef CONFIG_PPC_BOOK3S_32 83 + u32 vsid_pool[VSID_POOL_SIZE]; 84 + #else 85 + u64 vsid_first; 90 86 u64 vsid_max; 91 - int context_id; 87 + #endif 88 + int context_id[SID_CONTEXTS]; 92 89 ulong prog_flags; /* flags to inject when giving a 700 trap */ 93 90 }; 94 91
+31 -26
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 275 275 backwards_map = !backwards_map; 276 276 277 277 /* Uh-oh ... out of mappings. Let's flush! */ 278 - if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { 279 - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 278 + if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { 279 + vcpu_book3s->vsid_next = 0; 280 280 memset(vcpu_book3s->sid_map, 0, 281 281 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 282 282 kvmppc_mmu_pte_flush(vcpu, 0, 0); 283 283 kvmppc_mmu_flush_segments(vcpu); 284 284 } 285 - map->host_vsid = vcpu_book3s->vsid_next; 286 - 287 - /* Would have to be 111 to be completely aligned with the rest of 288 - Linux, but that is just way too little space! */ 289 - vcpu_book3s->vsid_next+=1; 285 + map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; 286 + vcpu_book3s->vsid_next++; 290 287 291 288 map->guest_vsid = gvsid; 292 289 map->valid = true; ··· 330 333 331 334 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 332 335 { 336 + int i; 337 + 333 338 kvmppc_mmu_hpte_destroy(vcpu); 334 339 preempt_disable(); 335 - __destroy_context(to_book3s(vcpu)->context_id); 340 + for (i = 0; i < SID_CONTEXTS; i++) 341 + __destroy_context(to_book3s(vcpu)->context_id[i]); 336 342 preempt_enable(); 337 343 } 338 344 339 345 /* From mm/mmu_context_hash32.c */ 340 - #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) 346 + #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) 341 347 342 348 int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 343 349 { 344 350 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 345 351 int err; 346 352 ulong sdr1; 353 + int i; 354 + int j; 347 355 348 - err = __init_new_context(); 349 - if (err < 0) 350 - return -1; 351 - vcpu3s->context_id = err; 356 + for (i = 0; i < SID_CONTEXTS; i++) { 357 + err = __init_new_context(); 358 + if (err < 0) 359 + goto init_fail; 360 + vcpu3s->context_id[i] = err; 352 361 353 - vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; 354 - vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); 362 + /* Remember context id for this combination */ 363 + for (j = 0; j < 16; j++) 364 + vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); 365 + } 355 366 356 - #if 0 /* XXX still doesn't guarantee uniqueness */ 357 - /* We could collide with the Linux vsid space because the vsid 358 - * wraps around at 24 bits. We're safe if we do our own space 359 - * though, so let's always set the highest bit. */ 360 - 361 - vcpu3s->vsid_max |= 0x00800000; 362 - vcpu3s->vsid_first |= 0x00800000; 363 - #endif 364 - BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); 365 - 366 - vcpu3s->vsid_next = vcpu3s->vsid_first; 367 + vcpu3s->vsid_next = 0; 367 368 368 369 /* Remember where the HTAB is */ 369 370 asm ( "mfsdr1 %0" : "=r"(sdr1) ); ··· 371 376 kvmppc_mmu_hpte_init(vcpu); 372 377 373 378 return 0; 379 + 380 + init_fail: 381 + for (j = 0; j < i; j++) { 382 + if (!vcpu3s->context_id[j]) 383 + continue; 384 + 385 + __destroy_context(to_book3s(vcpu)->context_id[j]); 386 + } 387 + 388 + return -1; 374 389 }
+4 -4
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 286 286 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 287 287 { 288 288 kvmppc_mmu_hpte_destroy(vcpu); 289 - __destroy_context(to_book3s(vcpu)->context_id); 289 + __destroy_context(to_book3s(vcpu)->context_id[0]); 290 290 } 291 291 292 292 int kvmppc_mmu_init(struct kvm_vcpu *vcpu) ··· 297 297 err = __init_new_context(); 298 298 if (err < 0) 299 299 return -1; 300 - vcpu3s->context_id = err; 300 + vcpu3s->context_id[0] = err; 301 301 302 - vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; 303 - vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; 302 + vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; 303 + vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; 304 304 vcpu3s->vsid_next = vcpu3s->vsid_first; 305 305 306 306 kvmppc_mmu_hpte_init(vcpu);