Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Move fields between struct kvm_vcpu_arch and kvmppc_vcpu_book3s

This moves the slb field, which represents the state of the emulated
SLB, from the kvmppc_vcpu_book3s struct to the kvm_vcpu_arch, and the
hpte_hash_[v]pte[_long] fields from kvm_vcpu_arch to kvmppc_vcpu_book3s.
This is in accord with the principle that the kvm_vcpu_arch struct
represents the state of the emulated CPU, and the kvmppc_vcpu_book3s
struct holds the auxiliary data structures used in the emulation.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
c4befc58 149dbdb1

+107 -98
+19 -16
arch/powerpc/include/asm/kvm_book3s.h
··· 24 24 #include <linux/kvm_host.h> 25 25 #include <asm/kvm_book3s_asm.h> 26 26 27 - struct kvmppc_slb { 28 - u64 esid; 29 - u64 vsid; 30 - u64 orige; 31 - u64 origv; 32 - bool valid : 1; 33 - bool Ks : 1; 34 - bool Kp : 1; 35 - bool nx : 1; 36 - bool large : 1; /* PTEs are 16MB */ 37 - bool tb : 1; /* 1TB segment */ 38 - bool class : 1; 39 - }; 40 - 41 27 struct kvmppc_bat { 42 28 u64 raw; 43 29 u32 bepi; ··· 53 67 #define VSID_POOL_SIZE (SID_CONTEXTS * 16) 54 68 #endif 55 69 70 + struct hpte_cache { 71 + struct hlist_node list_pte; 72 + struct hlist_node list_pte_long; 73 + struct hlist_node list_vpte; 74 + struct hlist_node list_vpte_long; 75 + struct rcu_head rcu_head; 76 + u64 host_va; 77 + u64 pfn; 78 + ulong slot; 79 + struct kvmppc_pte pte; 80 + }; 81 + 56 82 struct kvmppc_vcpu_book3s { 57 83 struct kvm_vcpu vcpu; 58 84 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; 59 85 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 60 - struct kvmppc_slb slb[64]; 61 86 struct { 62 87 u64 esid; 63 88 u64 vsid; ··· 78 81 struct kvmppc_bat dbat[8]; 79 82 u64 hid[6]; 80 83 u64 gqr[8]; 81 - int slb_nr; 82 84 u64 sdr1; 83 85 u64 hior; 84 86 u64 msr_mask; ··· 90 94 #endif 91 95 int context_id[SID_CONTEXTS]; 92 96 ulong prog_flags; /* flags to inject when giving a 700 trap */ 97 + 98 + struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 99 + struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 100 + struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 101 + struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 102 + int hpte_cache_count; 103 + spinlock_t mmu_lock; 93 104 }; 94 105 95 106 #define CONTEXT_HOST 0
+15 -19
arch/powerpc/include/asm/kvm_host.h
··· 163 163 bool (*is_dcbz32)(struct kvm_vcpu *vcpu); 164 164 }; 165 165 166 - struct hpte_cache { 167 - struct hlist_node list_pte; 168 - struct hlist_node list_pte_long; 169 - struct hlist_node list_vpte; 170 - struct hlist_node list_vpte_long; 171 - struct rcu_head rcu_head; 172 - u64 host_va; 173 - u64 pfn; 174 - ulong slot; 175 - struct kvmppc_pte pte; 166 + struct kvmppc_slb { 167 + u64 esid; 168 + u64 vsid; 169 + u64 orige; 170 + u64 origv; 171 + bool valid : 1; 172 + bool Ks : 1; 173 + bool Kp : 1; 174 + bool nx : 1; 175 + bool large : 1; /* PTEs are 16MB */ 176 + bool tb : 1; /* 1TB segment */ 177 + bool class : 1; 176 178 }; 177 179 178 180 struct kvm_vcpu_arch { ··· 189 187 ulong highmem_handler; 190 188 ulong rmcall; 191 189 ulong host_paca_phys; 190 + struct kvmppc_slb slb[64]; 191 + int slb_max; /* # valid entries in slb[] */ 192 + int slb_nr; /* total number of entries in SLB */ 192 193 struct kvmppc_mmu mmu; 193 194 #endif 194 195 ··· 310 305 struct kvm_vcpu_arch_shared *shared; 311 306 unsigned long magic_page_pa; /* phys addr to map the magic page to */ 312 307 unsigned long magic_page_ea; /* effect. addr to map the magic page to */ 313 - 314 - #ifdef CONFIG_PPC_BOOK3S 315 - struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 316 - struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 317 - struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 318 - struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 319 - int hpte_cache_count; 320 - spinlock_t mmu_lock; 321 - #endif 322 308 }; 323 309 324 310 #endif /* __POWERPC_KVM_HOST_H__ */
+5 -4
arch/powerpc/kvm/book3s.c
··· 17 17 #include <linux/kvm_host.h> 18 18 #include <linux/err.h> 19 19 #include <linux/slab.h> 20 - #include "trace.h" 21 20 22 21 #include <asm/reg.h> 23 22 #include <asm/cputable.h> ··· 32 33 #include <linux/sched.h> 33 34 #include <linux/vmalloc.h> 34 35 #include <linux/highmem.h> 36 + 37 + #include "trace.h" 35 38 36 39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 37 40 ··· 1192 1191 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1193 1192 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1194 1193 for (i = 0; i < 64; i++) { 1195 - sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; 1196 - sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; 1194 + sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; 1195 + sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 1197 1196 } 1198 1197 } else { 1199 1198 for (i = 0; i < 16; i++) ··· 1341 1340 vcpu->arch.pvr = 0x84202; 1342 1341 #endif 1343 1342 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1344 - vcpu_book3s->slb_nr = 64; 1343 + vcpu->arch.slb_nr = 64; 1345 1344 1346 1345 /* remember where some real-mode handlers are */ 1347 1346 vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
+25 -29
arch/powerpc/kvm/book3s_64_mmu.c
··· 41 41 } 42 42 43 43 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( 44 - struct kvmppc_vcpu_book3s *vcpu_book3s, 44 + struct kvm_vcpu *vcpu, 45 45 gva_t eaddr) 46 46 { 47 47 int i; 48 48 u64 esid = GET_ESID(eaddr); 49 49 u64 esid_1t = GET_ESID_1T(eaddr); 50 50 51 - for (i = 0; i < vcpu_book3s->slb_nr; i++) { 51 + for (i = 0; i < vcpu->arch.slb_nr; i++) { 52 52 u64 cmp_esid = esid; 53 53 54 - if (!vcpu_book3s->slb[i].valid) 54 + if (!vcpu->arch.slb[i].valid) 55 55 continue; 56 56 57 - if (vcpu_book3s->slb[i].tb) 57 + if (vcpu->arch.slb[i].tb) 58 58 cmp_esid = esid_1t; 59 59 60 - if (vcpu_book3s->slb[i].esid == cmp_esid) 61 - return &vcpu_book3s->slb[i]; 60 + if (vcpu->arch.slb[i].esid == cmp_esid) 61 + return &vcpu->arch.slb[i]; 62 62 } 63 63 64 64 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", 65 65 eaddr, esid, esid_1t); 66 - for (i = 0; i < vcpu_book3s->slb_nr; i++) { 67 - if (vcpu_book3s->slb[i].vsid) 66 + for (i = 0; i < vcpu->arch.slb_nr; i++) { 67 + if (vcpu->arch.slb[i].vsid) 68 68 dprintk(" %d: %c%c%c %llx %llx\n", i, 69 - vcpu_book3s->slb[i].valid ? 'v' : ' ', 70 - vcpu_book3s->slb[i].large ? 'l' : ' ', 71 - vcpu_book3s->slb[i].tb ? 't' : ' ', 72 - vcpu_book3s->slb[i].esid, 73 - vcpu_book3s->slb[i].vsid); 69 + vcpu->arch.slb[i].valid ? 'v' : ' ', 70 + vcpu->arch.slb[i].large ? 'l' : ' ', 71 + vcpu->arch.slb[i].tb ? 't' : ' ', 72 + vcpu->arch.slb[i].esid, 73 + vcpu->arch.slb[i].vsid); 74 74 } 75 75 76 76 return NULL; ··· 81 81 { 82 82 struct kvmppc_slb *slb; 83 83 84 - slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); 84 + slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); 85 85 if (!slb) 86 86 return 0; 87 87 ··· 180 180 return 0; 181 181 } 182 182 183 - slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); 183 + slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); 184 184 if (!slbe) 185 185 goto no_seg_found; 186 186 ··· 320 320 esid_1t = GET_ESID_1T(rb); 321 321 slb_nr = rb & 0xfff; 322 322 323 - if (slb_nr > vcpu_book3s->slb_nr) 323 + if (slb_nr > vcpu->arch.slb_nr) 324 324 return; 325 325 326 - slbe = &vcpu_book3s->slb[slb_nr]; 326 + slbe = &vcpu->arch.slb[slb_nr]; 327 327 328 328 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 329 329 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; ··· 344 344 345 345 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) 346 346 { 347 - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 348 347 struct kvmppc_slb *slbe; 349 348 350 - if (slb_nr > vcpu_book3s->slb_nr) 349 + if (slb_nr > vcpu->arch.slb_nr) 351 350 return 0; 352 351 353 - slbe = &vcpu_book3s->slb[slb_nr]; 352 + slbe = &vcpu->arch.slb[slb_nr]; 354 353 355 354 return slbe->orige; 356 355 } 357 356 358 357 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) 359 358 { 360 - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 361 359 struct kvmppc_slb *slbe; 362 360 363 - if (slb_nr > vcpu_book3s->slb_nr) 361 + if (slb_nr > vcpu->arch.slb_nr) 364 362 return 0; 365 363 366 - slbe = &vcpu_book3s->slb[slb_nr]; 364 + slbe = &vcpu->arch.slb[slb_nr]; 367 365 368 366 return slbe->origv; 369 367 } 370 368 371 369 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 372 370 { 373 - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 374 371 struct kvmppc_slb *slbe; 375 372 376 373 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 377 374 378 - slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); 375 + slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 379 376 380 377 if (!slbe) 381 378 return; ··· 386 389 387 390 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 388 391 { 389 - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 390 392 int i; 391 393 392 394 dprintk("KVM MMU: slbia()\n"); 393 395 394 - for (i = 1; i < vcpu_book3s->slb_nr; i++) 395 - vcpu_book3s->slb[i].valid = false; 396 + for (i = 1; i < vcpu->arch.slb_nr; i++) 397 + vcpu->arch.slb[i].valid = false; 396 398 397 399 if (vcpu->arch.shared->msr & MSR_IR) { 398 400 kvmppc_mmu_flush_segments(vcpu); ··· 460 464 ulong mp_ea = vcpu->arch.magic_page_ea; 461 465 462 466 if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 463 - slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 467 + slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 464 468 if (slb) 465 469 gvsid = slb->vsid; 466 470 }
+42 -29
arch/powerpc/kvm/book3s_mmu_hpte.c
··· 21 21 #include <linux/kvm_host.h> 22 22 #include <linux/hash.h> 23 23 #include <linux/slab.h> 24 - #include "trace.h" 25 24 26 25 #include <asm/kvm_ppc.h> 27 26 #include <asm/kvm_book3s.h> 28 27 #include <asm/machdep.h> 29 28 #include <asm/mmu_context.h> 30 29 #include <asm/hw_irq.h> 30 + 31 + #include "trace.h" 31 32 32 33 #define PTE_SIZE 12 33 34 ··· 59 58 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 60 59 { 61 60 u64 index; 61 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 62 62 63 63 trace_kvm_book3s_mmu_map(pte); 64 64 65 - spin_lock(&vcpu->arch.mmu_lock); 65 + spin_lock(&vcpu3s->mmu_lock); 66 66 67 67 /* Add to ePTE list */ 68 68 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); 69 - hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); 69 + hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]); 70 70 71 71 /* Add to ePTE_long list */ 72 72 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); 73 73 hlist_add_head_rcu(&pte->list_pte_long, 74 - &vcpu->arch.hpte_hash_pte_long[index]); 74 + &vcpu3s->hpte_hash_pte_long[index]); 75 75 76 76 /* Add to vPTE list */ 77 77 index = kvmppc_mmu_hash_vpte(pte->pte.vpage); 78 - hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); 78 + hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]); 79 79 80 80 /* Add to vPTE_long list */ 81 81 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); 82 82 hlist_add_head_rcu(&pte->list_vpte_long, 83 - &vcpu->arch.hpte_hash_vpte_long[index]); 83 + &vcpu3s->hpte_hash_vpte_long[index]); 84 84 85 - spin_unlock(&vcpu->arch.mmu_lock); 85 + spin_unlock(&vcpu3s->mmu_lock); 86 86 } 87 87 88 88 static void free_pte_rcu(struct rcu_head *head) ··· 94 92 95 93 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 96 94 { 95 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 96 + 97 97 trace_kvm_book3s_mmu_invalidate(pte); 98 98 99 99 /* Different for 32 and 64 bit */ 100 100 kvmppc_mmu_invalidate_pte(vcpu, pte); 101 101 102 - spin_lock(&vcpu->arch.mmu_lock); 102 + spin_lock(&vcpu3s->mmu_lock); 103 103 104 104 /* pte already invalidated in between? */ 105 105 if (hlist_unhashed(&pte->list_pte)) { 106 - spin_unlock(&vcpu->arch.mmu_lock); 106 + spin_unlock(&vcpu3s->mmu_lock); 107 107 return; 108 108 } 109 109 ··· 119 115 else 120 116 kvm_release_pfn_clean(pte->pfn); 121 117 122 - spin_unlock(&vcpu->arch.mmu_lock); 118 + spin_unlock(&vcpu3s->mmu_lock); 123 119 124 - vcpu->arch.hpte_cache_count--; 120 + vcpu3s->hpte_cache_count--; 125 121 call_rcu(&pte->rcu_head, free_pte_rcu); 126 122 } 127 123 128 124 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) 129 125 { 126 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 130 127 struct hpte_cache *pte; 131 128 struct hlist_node *node; 132 129 int i; ··· 135 130 rcu_read_lock(); 136 131 137 132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 138 - struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 133 + struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 139 134 140 135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 141 136 invalidate_pte(vcpu, pte); ··· 146 141 147 142 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) 148 143 { 144 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 149 145 struct hlist_head *list; 150 146 struct hlist_node *node; 151 147 struct hpte_cache *pte; 152 148 153 149 /* Find the list of entries in the map */ 154 - list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 150 + list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; 155 151 156 152 rcu_read_lock(); 157 153 ··· 166 160 167 161 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) 168 162 { 163 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 169 164 struct hlist_head *list; 170 165 struct hlist_node *node; 171 166 struct hpte_cache *pte; 172 167 173 168 /* Find the list of entries in the map */ 174 - list = &vcpu->arch.hpte_hash_pte_long[ 169 + list = &vcpu3s->hpte_hash_pte_long[ 175 170 kvmppc_mmu_hash_pte_long(guest_ea)]; 176 171 177 172 rcu_read_lock(); ··· 210 203 /* Flush with mask 0xfffffffff */ 211 204 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) 212 205 { 206 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 213 207 struct hlist_head *list; 214 208 struct hlist_node *node; 215 209 struct hpte_cache *pte; 216 210 u64 vp_mask = 0xfffffffffULL; 217 211 218 - list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 212 + list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; 219 213 220 214 rcu_read_lock(); 221 215 ··· 231 223 /* Flush with mask 0xffffff000 */ 232 224 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) 233 225 { 226 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 234 227 struct hlist_head *list; 235 228 struct hlist_node *node; 236 229 struct hpte_cache *pte; 237 230 u64 vp_mask = 0xffffff000ULL; 238 231 239 - list = &vcpu->arch.hpte_hash_vpte_long[ 232 + list = &vcpu3s->hpte_hash_vpte_long[ 240 233 kvmppc_mmu_hash_vpte_long(guest_vp)]; 241 234 242 235 rcu_read_lock(); ··· 270 261 271 262 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 272 263 { 264 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 273 265 struct hlist_node *node; 274 266 struct hpte_cache *pte; 275 267 int i; ··· 280 270 rcu_read_lock(); 281 271 282 272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 283 - struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; 273 + struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 284 274 285 275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 286 276 if ((pte->pte.raddr >= pa_start) && ··· 293 283 294 284 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 295 285 { 286 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 296 287 struct hpte_cache *pte; 297 288 298 289 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); 299 - vcpu->arch.hpte_cache_count++; 290 + vcpu3s->hpte_cache_count++; 300 291 301 - if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM) 292 + if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) 302 293 kvmppc_mmu_pte_flush_all(vcpu); 303 294 304 295 return pte; ··· 320 309 321 310 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) 322 311 { 323 - /* init hpte lookup hashes */ 324 - kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, 325 - ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); 326 - kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, 327 - ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); 328 - kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, 329 - ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); 330 - kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, 331 - ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); 312 + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 332 313 333 - spin_lock_init(&vcpu->arch.mmu_lock); 314 + /* init hpte lookup hashes */ 315 + kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte, 316 + ARRAY_SIZE(vcpu3s->hpte_hash_pte)); 317 + kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long, 318 + ARRAY_SIZE(vcpu3s->hpte_hash_pte_long)); 319 + kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte, 320 + ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); 321 + kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, 322 + ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); 323 + 324 + spin_lock_init(&vcpu3s->mmu_lock); 334 325 335 326 return 0; 336 327 }
+1 -1
arch/powerpc/kvm/trace.h
··· 252 252 ), 253 253 254 254 TP_fast_assign( 255 - __entry->count = vcpu->arch.hpte_cache_count; 255 + __entry->count = to_book3s(vcpu)->hpte_cache_count; 256 256 __entry->p1 = p1; 257 257 __entry->p2 = p2; 258 258 __entry->type = type;