Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35-rc5 440 lines 11 kB view raw
1/* 2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License, version 2, as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 20 */ 21 22#include <linux/kvm_host.h> 23 24#include <asm/kvm_ppc.h> 25#include <asm/kvm_book3s.h> 26#include <asm/mmu-hash64.h> 27#include <asm/machdep.h> 28#include <asm/mmu_context.h> 29#include <asm/hw_irq.h> 30 31#define PTE_SIZE 12 32#define VSID_ALL 0 33 34/* #define DEBUG_MMU */ 35/* #define DEBUG_SLB */ 36 37#ifdef DEBUG_MMU 38#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) 39#else 40#define dprintk_mmu(a, ...) do { } while(0) 41#endif 42 43#ifdef DEBUG_SLB 44#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__) 45#else 46#define dprintk_slb(a, ...) do { } while(0) 47#endif 48 49static void invalidate_pte(struct hpte_cache *pte) 50{ 51 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", 52 pte->pte.eaddr, pte->pte.vpage, pte->host_va); 53 54 ppc_md.hpte_invalidate(pte->slot, pte->host_va, 55 MMU_PAGE_4K, MMU_SEGSIZE_256M, 56 false); 57 pte->host_va = 0; 58 59 if (pte->pte.may_write) 60 kvm_release_pfn_dirty(pte->pfn); 61 else 62 kvm_release_pfn_clean(pte->pfn); 63} 64 65void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) 66{ 67 int i; 68 69 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", 70 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); 71 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 72 73 guest_ea &= ea_mask; 74 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { 75 struct hpte_cache *pte; 76 77 pte = &vcpu->arch.hpte_cache[i]; 78 if (!pte->host_va) 79 continue; 80 81 if ((pte->pte.eaddr & ea_mask) == guest_ea) { 82 invalidate_pte(pte); 83 } 84 } 85 86 /* Doing a complete flush -> start from scratch */ 87 if (!ea_mask) 88 vcpu->arch.hpte_cache_offset = 0; 89} 90 91void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) 92{ 93 int i; 94 95 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", 96 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); 97 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 98 99 guest_vp &= vp_mask; 100 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { 101 struct hpte_cache *pte; 102 103 pte = &vcpu->arch.hpte_cache[i]; 104 if (!pte->host_va) 105 continue; 106 107 if ((pte->pte.vpage & vp_mask) == guest_vp) { 108 invalidate_pte(pte); 109 } 110 } 111} 112 113void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 114{ 115 int i; 116 117 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n", 118 vcpu->arch.hpte_cache_offset, pa_start, pa_end); 119 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); 120 121 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { 122 struct hpte_cache *pte; 123 124 pte = &vcpu->arch.hpte_cache[i]; 125 if (!pte->host_va) 126 continue; 127 128 if ((pte->pte.raddr >= pa_start) && 129 (pte->pte.raddr < pa_end)) { 130 invalidate_pte(pte); 131 } 132 } 133} 134 135struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) 136{ 137 int i; 138 u64 guest_vp; 139 140 guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); 141 for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { 142 struct hpte_cache *pte; 143 144 pte = &vcpu->arch.hpte_cache[i]; 145 if (!pte->host_va) 146 continue; 147 148 if (pte->pte.vpage == guest_vp) 149 return &pte->pte; 150 } 151 152 return NULL; 153} 154 155static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) 156{ 157 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) 158 kvmppc_mmu_pte_flush(vcpu, 0, 0); 159 160 return vcpu->arch.hpte_cache_offset++; 161} 162 163/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using 164 * a hash, so we don't waste cycles on looping */ 165static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) 166{ 167 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ 168 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ 169 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ 170 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ 171 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ 172 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ 173 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ 174 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); 175} 176 177 178static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) 179{ 180 struct kvmppc_sid_map *map; 181 u16 sid_map_mask; 182 183 if (vcpu->arch.msr & MSR_PR) 184 gvsid |= VSID_PR; 185 186 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 187 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 188 if (map->guest_vsid == gvsid) { 189 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", 190 gvsid, map->host_vsid); 191 return map; 192 } 193 194 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; 195 if (map->guest_vsid == gvsid) { 196 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", 197 gvsid, map->host_vsid); 198 return map; 199 } 200 201 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", 202 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid); 203 return NULL; 204} 205 206int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 207{ 208 pfn_t hpaddr; 209 ulong hash, hpteg, va; 210 u64 vsid; 211 int ret; 212 int rflags = 0x192; 213 int vflags = 0; 214 int attempt = 0; 215 struct kvmppc_sid_map *map; 216 217 /* Get host physical address for gpa */ 218 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 219 if (kvm_is_error_hva(hpaddr)) { 220 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 221 return -EINVAL; 222 } 223 hpaddr <<= PAGE_SHIFT; 224#if PAGE_SHIFT == 12 225#elif PAGE_SHIFT == 16 226 hpaddr |= orig_pte->raddr & 0xf000; 227#else 228#error Unknown page size 229#endif 230 231 /* and write the mapping ea -> hpa into the pt */ 232 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 233 map = find_sid_vsid(vcpu, vsid); 234 if (!map) { 235 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); 236 WARN_ON(ret < 0); 237 map = find_sid_vsid(vcpu, vsid); 238 } 239 if (!map) { 240 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", 241 vsid, orig_pte->eaddr); 242 WARN_ON(true); 243 return -EINVAL; 244 } 245 246 vsid = map->host_vsid; 247 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); 248 249 if (!orig_pte->may_write) 250 rflags |= HPTE_R_PP; 251 else 252 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 253 254 if (!orig_pte->may_execute) 255 rflags |= HPTE_R_N; 256 257 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); 258 259map_again: 260 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 261 262 /* In case we tried normal mapping already, let's nuke old entries */ 263 if (attempt > 1) 264 if (ppc_md.hpte_remove(hpteg) < 0) 265 return -1; 266 267 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 268 269 if (ret < 0) { 270 /* If we couldn't map a primary PTE, try a secondary */ 271 hash = ~hash; 272 vflags ^= HPTE_V_SECONDARY; 273 attempt++; 274 goto map_again; 275 } else { 276 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); 277 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; 278 279 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", 280 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', 281 (rflags & HPTE_R_N) ? '-' : 'x', 282 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); 283 284 /* The ppc_md code may give us a secondary entry even though we 285 asked for a primary. Fix up. */ 286 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { 287 hash = ~hash; 288 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 289 } 290 291 pte->slot = hpteg + (ret & 7); 292 pte->host_va = va; 293 pte->pte = *orig_pte; 294 pte->pfn = hpaddr >> PAGE_SHIFT; 295 } 296 297 return 0; 298} 299 300static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 301{ 302 struct kvmppc_sid_map *map; 303 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 304 u16 sid_map_mask; 305 static int backwards_map = 0; 306 307 if (vcpu->arch.msr & MSR_PR) 308 gvsid |= VSID_PR; 309 310 /* We might get collisions that trap in preceding order, so let's 311 map them differently */ 312 313 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 314 if (backwards_map) 315 sid_map_mask = SID_MAP_MASK - sid_map_mask; 316 317 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 318 319 /* Make sure we're taking the other map next time */ 320 backwards_map = !backwards_map; 321 322 /* Uh-oh ... out of mappings. Let's flush! */ 323 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { 324 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 325 memset(vcpu_book3s->sid_map, 0, 326 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 327 kvmppc_mmu_pte_flush(vcpu, 0, 0); 328 kvmppc_mmu_flush_segments(vcpu); 329 } 330 map->host_vsid = vcpu_book3s->vsid_next++; 331 332 map->guest_vsid = gvsid; 333 map->valid = true; 334 335 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", 336 sid_map_mask, gvsid, map->host_vsid); 337 338 return map; 339} 340 341static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) 342{ 343 int i; 344 int max_slb_size = 64; 345 int found_inval = -1; 346 int r; 347 348 if (!to_svcpu(vcpu)->slb_max) 349 to_svcpu(vcpu)->slb_max = 1; 350 351 /* Are we overwriting? */ 352 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { 353 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) 354 found_inval = i; 355 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) 356 return i; 357 } 358 359 /* Found a spare entry that was invalidated before */ 360 if (found_inval > 0) 361 return found_inval; 362 363 /* No spare invalid entry, so create one */ 364 365 if (mmu_slb_size < 64) 366 max_slb_size = mmu_slb_size; 367 368 /* Overflowing -> purge */ 369 if ((to_svcpu(vcpu)->slb_max) == max_slb_size) 370 kvmppc_mmu_flush_segments(vcpu); 371 372 r = to_svcpu(vcpu)->slb_max; 373 to_svcpu(vcpu)->slb_max++; 374 375 return r; 376} 377 378int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) 379{ 380 u64 esid = eaddr >> SID_SHIFT; 381 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; 382 u64 slb_vsid = SLB_VSID_USER; 383 u64 gvsid; 384 int slb_index; 385 struct kvmppc_sid_map *map; 386 387 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); 388 389 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 390 /* Invalidate an entry */ 391 to_svcpu(vcpu)->slb[slb_index].esid = 0; 392 return -ENOENT; 393 } 394 395 map = find_sid_vsid(vcpu, gvsid); 396 if (!map) 397 map = create_sid_map(vcpu, gvsid); 398 399 map->guest_esid = esid; 400 401 slb_vsid |= (map->host_vsid << 12); 402 slb_vsid &= ~SLB_VSID_KP; 403 slb_esid |= slb_index; 404 405 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; 406 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; 407 408 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); 409 410 return 0; 411} 412 413void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 414{ 415 to_svcpu(vcpu)->slb_max = 1; 416 to_svcpu(vcpu)->slb[0].esid = 0; 417} 418 419void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 420{ 421 kvmppc_mmu_pte_flush(vcpu, 0, 0); 422 __destroy_context(to_book3s(vcpu)->context_id); 423} 424 425int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 426{ 427 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 428 int err; 429 430 err = __init_new_context(); 431 if (err < 0) 432 return -1; 433 vcpu3s->context_id = err; 434 435 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; 436 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; 437 vcpu3s->vsid_next = vcpu3s->vsid_first; 438 439 return 0; 440}