Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.8-rc4 1430 lines 37 kB view raw
1/* 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. 3 * 4 * Author: Yu Liu, yu.liu@freescale.com 5 * Scott Wood, scottwood@freescale.com 6 * Ashish Kalra, ashish.kalra@freescale.com 7 * Varun Sethi, varun.sethi@freescale.com 8 * 9 * Description: 10 * This file is based on arch/powerpc/kvm/44x_tlb.c, 11 * by Hollis Blanchard <hollisb@us.ibm.com>. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License, version 2, as 15 * published by the Free Software Foundation. 16 */ 17 18#include <linux/kernel.h> 19#include <linux/types.h> 20#include <linux/slab.h> 21#include <linux/string.h> 22#include <linux/kvm.h> 23#include <linux/kvm_host.h> 24#include <linux/highmem.h> 25#include <linux/log2.h> 26#include <linux/uaccess.h> 27#include <linux/sched.h> 28#include <linux/rwsem.h> 29#include <linux/vmalloc.h> 30#include <linux/hugetlb.h> 31#include <asm/kvm_ppc.h> 32 33#include "e500.h" 34#include "trace.h" 35#include "timing.h" 36 37#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 38 39static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 40 41static inline unsigned int gtlb0_get_next_victim( 42 struct kvmppc_vcpu_e500 *vcpu_e500) 43{ 44 unsigned int victim; 45 46 victim = vcpu_e500->gtlb_nv[0]++; 47 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways)) 48 vcpu_e500->gtlb_nv[0] = 0; 49 50 return victim; 51} 52 53static inline unsigned int tlb1_max_shadow_size(void) 54{ 55 /* reserve one entry for magic page */ 56 return host_tlb_params[1].entries - tlbcam_index - 1; 57} 58 59static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) 60{ 61 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); 62} 63 64static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) 65{ 66 /* Mask off reserved bits. */ 67 mas3 &= MAS3_ATTRIB_MASK; 68 69#ifndef CONFIG_KVM_BOOKE_HV 70 if (!usermode) { 71 /* Guest is in supervisor mode, 72 * so we need to translate guest 73 * supervisor permissions into user permissions. */ 74 mas3 &= ~E500_TLB_USER_PERM_MASK; 75 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; 76 } 77 mas3 |= E500_TLB_SUPER_PERM_MASK; 78#endif 79 return mas3; 80} 81 82static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) 83{ 84#ifdef CONFIG_SMP 85 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; 86#else 87 return mas2 & MAS2_ATTRIB_MASK; 88#endif 89} 90 91/* 92 * writing shadow tlb entry to host TLB 93 */ 94static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, 95 uint32_t mas0) 96{ 97 unsigned long flags; 98 99 local_irq_save(flags); 100 mtspr(SPRN_MAS0, mas0); 101 mtspr(SPRN_MAS1, stlbe->mas1); 102 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); 103 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 104 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 105#ifdef CONFIG_KVM_BOOKE_HV 106 mtspr(SPRN_MAS8, stlbe->mas8); 107#endif 108 asm volatile("isync; tlbwe" : : : "memory"); 109 110#ifdef CONFIG_KVM_BOOKE_HV 111 /* Must clear mas8 for other host tlbwe's */ 112 mtspr(SPRN_MAS8, 0); 113 isync(); 114#endif 115 local_irq_restore(flags); 116 117 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, 118 stlbe->mas2, stlbe->mas7_3); 119} 120 121/* 122 * Acquire a mas0 with victim hint, as if we just took a TLB miss. 123 * 124 * We don't care about the address we're searching for, other than that it's 125 * in the right set and is not present in the TLB. Using a zero PID and a 126 * userspace address means we don't have to set and then restore MAS5, or 127 * calculate a proper MAS6 value. 128 */ 129static u32 get_host_mas0(unsigned long eaddr) 130{ 131 unsigned long flags; 132 u32 mas0; 133 134 local_irq_save(flags); 135 mtspr(SPRN_MAS6, 0); 136 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); 137 mas0 = mfspr(SPRN_MAS0); 138 local_irq_restore(flags); 139 140 return mas0; 141} 142 143/* sesel is for tlb1 only */ 144static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 145 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) 146{ 147 u32 mas0; 148 149 if (tlbsel == 0) { 150 mas0 = get_host_mas0(stlbe->mas2); 151 __write_host_tlbe(stlbe, mas0); 152 } else { 153 __write_host_tlbe(stlbe, 154 MAS0_TLBSEL(1) | 155 MAS0_ESEL(to_htlb1_esel(sesel))); 156 } 157} 158 159#ifdef CONFIG_KVM_E500V2 160void kvmppc_map_magic(struct kvm_vcpu *vcpu) 161{ 162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 163 struct kvm_book3e_206_tlb_entry magic; 164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 165 unsigned int stid; 166 pfn_t pfn; 167 168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; 169 get_page(pfn_to_page(pfn)); 170 171 preempt_disable(); 172 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); 173 174 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | 175 MAS1_TSIZE(BOOK3E_PAGESZ_4K); 176 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 177 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | 178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 179 magic.mas8 = 0; 180 181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 182 preempt_enable(); 183} 184#endif 185 186static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, 187 int tlbsel, int esel) 188{ 189 struct kvm_book3e_206_tlb_entry *gtlbe = 190 get_entry(vcpu_e500, tlbsel, esel); 191 192 if (tlbsel == 1 && 193 vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) { 194 u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; 195 int hw_tlb_indx; 196 unsigned long flags; 197 198 local_irq_save(flags); 199 while (tmp) { 200 hw_tlb_indx = __ilog2_u64(tmp & -tmp); 201 mtspr(SPRN_MAS0, 202 MAS0_TLBSEL(1) | 203 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); 204 mtspr(SPRN_MAS1, 0); 205 asm volatile("tlbwe"); 206 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; 207 tmp &= tmp - 1; 208 } 209 mb(); 210 vcpu_e500->g2h_tlb1_map[esel] = 0; 211 vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP; 212 local_irq_restore(flags); 213 214 return; 215 } 216 217 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ 218 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); 219} 220 221static int tlb0_set_base(gva_t addr, int sets, int ways) 222{ 223 int set_base; 224 225 set_base = (addr >> PAGE_SHIFT) & (sets - 1); 226 set_base *= ways; 227 228 return set_base; 229} 230 231static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr) 232{ 233 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets, 234 vcpu_e500->gtlb_params[0].ways); 235} 236 237static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel) 238{ 239 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 240 int esel = get_tlb_esel_bit(vcpu); 241 242 if (tlbsel == 0) { 243 esel &= vcpu_e500->gtlb_params[0].ways - 1; 244 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2); 245 } else { 246 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; 247 } 248 249 return esel; 250} 251 252/* Search the guest TLB for a matching entry. */ 253static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, 254 gva_t eaddr, int tlbsel, unsigned int pid, int as) 255{ 256 int size = vcpu_e500->gtlb_params[tlbsel].entries; 257 unsigned int set_base, offset; 258 int i; 259 260 if (tlbsel == 0) { 261 set_base = gtlb0_set_base(vcpu_e500, eaddr); 262 size = vcpu_e500->gtlb_params[0].ways; 263 } else { 264 if (eaddr < vcpu_e500->tlb1_min_eaddr || 265 eaddr > vcpu_e500->tlb1_max_eaddr) 266 return -1; 267 set_base = 0; 268 } 269 270 offset = vcpu_e500->gtlb_offset[tlbsel]; 271 272 for (i = 0; i < size; i++) { 273 struct kvm_book3e_206_tlb_entry *tlbe = 274 &vcpu_e500->gtlb_arch[offset + set_base + i]; 275 unsigned int tid; 276 277 if (eaddr < get_tlb_eaddr(tlbe)) 278 continue; 279 280 if (eaddr > get_tlb_end(tlbe)) 281 continue; 282 283 tid = get_tlb_tid(tlbe); 284 if (tid && (tid != pid)) 285 continue; 286 287 if (!get_tlb_v(tlbe)) 288 continue; 289 290 if (get_tlb_ts(tlbe) != as && as != -1) 291 continue; 292 293 return set_base + i; 294 } 295 296 return -1; 297} 298 299static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, 300 struct kvm_book3e_206_tlb_entry *gtlbe, 301 pfn_t pfn) 302{ 303 ref->pfn = pfn; 304 ref->flags = E500_TLB_VALID; 305 306 if (tlbe_is_writable(gtlbe)) 307 kvm_set_pfn_dirty(pfn); 308} 309 310static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) 311{ 312 if (ref->flags & E500_TLB_VALID) { 313 trace_kvm_booke206_ref_release(ref->pfn, ref->flags); 314 ref->flags = 0; 315 } 316} 317 318static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) 319{ 320 if (vcpu_e500->g2h_tlb1_map) 321 memset(vcpu_e500->g2h_tlb1_map, 0, 322 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); 323 if (vcpu_e500->h2g_tlb1_rmap) 324 memset(vcpu_e500->h2g_tlb1_rmap, 0, 325 sizeof(unsigned int) * host_tlb_params[1].entries); 326} 327 328static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 329{ 330 int tlbsel = 0; 331 int i; 332 333 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { 334 struct tlbe_ref *ref = 335 &vcpu_e500->gtlb_priv[tlbsel][i].ref; 336 kvmppc_e500_ref_release(ref); 337 } 338} 339 340static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) 341{ 342 int stlbsel = 1; 343 int i; 344 345 kvmppc_e500_tlbil_all(vcpu_e500); 346 347 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { 348 struct tlbe_ref *ref = 349 &vcpu_e500->tlb_refs[stlbsel][i]; 350 kvmppc_e500_ref_release(ref); 351 } 352 353 clear_tlb_privs(vcpu_e500); 354} 355 356void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) 357{ 358 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 359 clear_tlb_refs(vcpu_e500); 360 clear_tlb1_bitmap(vcpu_e500); 361} 362 363static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 364 unsigned int eaddr, int as) 365{ 366 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 367 unsigned int victim, tsized; 368 int tlbsel; 369 370 /* since we only have two TLBs, only lower bit is used. */ 371 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; 372 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; 373 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; 374 375 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 376 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 377 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) 378 | MAS1_TID(get_tlbmiss_tid(vcpu)) 379 | MAS1_TSIZE(tsized); 380 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) 381 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); 382 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 383 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1) 384 | (get_cur_pid(vcpu) << 16) 385 | (as ? MAS6_SAS : 0); 386} 387 388/* TID must be supplied by the caller */ 389static inline void kvmppc_e500_setup_stlbe( 390 struct kvm_vcpu *vcpu, 391 struct kvm_book3e_206_tlb_entry *gtlbe, 392 int tsize, struct tlbe_ref *ref, u64 gvaddr, 393 struct kvm_book3e_206_tlb_entry *stlbe) 394{ 395 pfn_t pfn = ref->pfn; 396 u32 pr = vcpu->arch.shared->msr & MSR_PR; 397 398 BUG_ON(!(ref->flags & E500_TLB_VALID)); 399 400 /* Force IPROT=0 for all guest mappings. */ 401 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; 402 stlbe->mas2 = (gvaddr & MAS2_EPN) | 403 e500_shadow_mas2_attrib(gtlbe->mas2, pr); 404 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 405 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); 406 407#ifdef CONFIG_KVM_BOOKE_HV 408 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; 409#endif 410} 411 412static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 413 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 414 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, 415 struct tlbe_ref *ref) 416{ 417 struct kvm_memory_slot *slot; 418 unsigned long pfn = 0; /* silence GCC warning */ 419 unsigned long hva; 420 int pfnmap = 0; 421 int tsize = BOOK3E_PAGESZ_4K; 422 423 /* 424 * Translate guest physical to true physical, acquiring 425 * a page reference if it is normal, non-reserved memory. 426 * 427 * gfn_to_memslot() must succeed because otherwise we wouldn't 428 * have gotten this far. Eventually we should just pass the slot 429 * pointer through from the first lookup. 430 */ 431 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); 432 hva = gfn_to_hva_memslot(slot, gfn); 433 434 if (tlbsel == 1) { 435 struct vm_area_struct *vma; 436 down_read(&current->mm->mmap_sem); 437 438 vma = find_vma(current->mm, hva); 439 if (vma && hva >= vma->vm_start && 440 (vma->vm_flags & VM_PFNMAP)) { 441 /* 442 * This VMA is a physically contiguous region (e.g. 443 * /dev/mem) that bypasses normal Linux page 444 * management. Find the overlap between the 445 * vma and the memslot. 446 */ 447 448 unsigned long start, end; 449 unsigned long slot_start, slot_end; 450 451 pfnmap = 1; 452 453 start = vma->vm_pgoff; 454 end = start + 455 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); 456 457 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); 458 459 slot_start = pfn - (gfn - slot->base_gfn); 460 slot_end = slot_start + slot->npages; 461 462 if (start < slot_start) 463 start = slot_start; 464 if (end > slot_end) 465 end = slot_end; 466 467 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> 468 MAS1_TSIZE_SHIFT; 469 470 /* 471 * e500 doesn't implement the lowest tsize bit, 472 * or 1K pages. 473 */ 474 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); 475 476 /* 477 * Now find the largest tsize (up to what the guest 478 * requested) that will cover gfn, stay within the 479 * range, and for which gfn and pfn are mutually 480 * aligned. 481 */ 482 483 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { 484 unsigned long gfn_start, gfn_end, tsize_pages; 485 tsize_pages = 1 << (tsize - 2); 486 487 gfn_start = gfn & ~(tsize_pages - 1); 488 gfn_end = gfn_start + tsize_pages; 489 490 if (gfn_start + pfn - gfn < start) 491 continue; 492 if (gfn_end + pfn - gfn > end) 493 continue; 494 if ((gfn & (tsize_pages - 1)) != 495 (pfn & (tsize_pages - 1))) 496 continue; 497 498 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 499 pfn &= ~(tsize_pages - 1); 500 break; 501 } 502 } else if (vma && hva >= vma->vm_start && 503 (vma->vm_flags & VM_HUGETLB)) { 504 unsigned long psize = vma_kernel_pagesize(vma); 505 506 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> 507 MAS1_TSIZE_SHIFT; 508 509 /* 510 * Take the largest page size that satisfies both host 511 * and guest mapping 512 */ 513 tsize = min(__ilog2(psize) - 10, tsize); 514 515 /* 516 * e500 doesn't implement the lowest tsize bit, 517 * or 1K pages. 518 */ 519 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); 520 } 521 522 up_read(&current->mm->mmap_sem); 523 } 524 525 if (likely(!pfnmap)) { 526 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); 527 pfn = gfn_to_pfn_memslot(slot, gfn); 528 if (is_error_noslot_pfn(pfn)) { 529 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 530 (long)gfn); 531 return; 532 } 533 534 /* Align guest and physical address to page map boundaries */ 535 pfn &= ~(tsize_pages - 1); 536 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 537 } 538 539 /* Drop old ref and setup new one. */ 540 kvmppc_e500_ref_release(ref); 541 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 542 543 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 544 ref, gvaddr, stlbe); 545 546 /* Clear i-cache for new pages */ 547 kvmppc_mmu_flush_icache(pfn); 548 549 /* Drop refcount on page, so that mmu notifiers can clear it */ 550 kvm_release_pfn_clean(pfn); 551} 552 553/* XXX only map the one-one case, for now use TLB0 */ 554static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, 555 int esel, 556 struct kvm_book3e_206_tlb_entry *stlbe) 557{ 558 struct kvm_book3e_206_tlb_entry *gtlbe; 559 struct tlbe_ref *ref; 560 561 gtlbe = get_entry(vcpu_e500, 0, esel); 562 ref = &vcpu_e500->gtlb_priv[0][esel].ref; 563 564 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 565 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 566 gtlbe, 0, stlbe, ref); 567} 568 569/* Caller must ensure that the specified guest TLB entry is safe to insert into 570 * the shadow TLB. */ 571/* XXX for both one-one and one-to-many , for now use TLB1 */ 572static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 573 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 574 struct kvm_book3e_206_tlb_entry *stlbe, int esel) 575{ 576 struct tlbe_ref *ref; 577 unsigned int victim; 578 579 victim = vcpu_e500->host_tlb1_nv++; 580 581 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) 582 vcpu_e500->host_tlb1_nv = 0; 583 584 ref = &vcpu_e500->tlb_refs[1][victim]; 585 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); 586 587 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; 588 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; 589 if (vcpu_e500->h2g_tlb1_rmap[victim]) { 590 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; 591 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); 592 } 593 vcpu_e500->h2g_tlb1_rmap[victim] = esel; 594 595 return victim; 596} 597 598static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) 599{ 600 int size = vcpu_e500->gtlb_params[1].entries; 601 unsigned int offset; 602 gva_t eaddr; 603 int i; 604 605 vcpu_e500->tlb1_min_eaddr = ~0UL; 606 vcpu_e500->tlb1_max_eaddr = 0; 607 offset = vcpu_e500->gtlb_offset[1]; 608 609 for (i = 0; i < size; i++) { 610 struct kvm_book3e_206_tlb_entry *tlbe = 611 &vcpu_e500->gtlb_arch[offset + i]; 612 613 if (!get_tlb_v(tlbe)) 614 continue; 615 616 eaddr = get_tlb_eaddr(tlbe); 617 vcpu_e500->tlb1_min_eaddr = 618 min(vcpu_e500->tlb1_min_eaddr, eaddr); 619 620 eaddr = get_tlb_end(tlbe); 621 vcpu_e500->tlb1_max_eaddr = 622 max(vcpu_e500->tlb1_max_eaddr, eaddr); 623 } 624} 625 626static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, 627 struct kvm_book3e_206_tlb_entry *gtlbe) 628{ 629 unsigned long start, end, size; 630 631 size = get_tlb_bytes(gtlbe); 632 start = get_tlb_eaddr(gtlbe) & ~(size - 1); 633 end = start + size - 1; 634 635 return vcpu_e500->tlb1_min_eaddr == start || 636 vcpu_e500->tlb1_max_eaddr == end; 637} 638 639/* This function is supposed to be called for a adding a new valid tlb entry */ 640static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, 641 struct kvm_book3e_206_tlb_entry *gtlbe) 642{ 643 unsigned long start, end, size; 644 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 645 646 if (!get_tlb_v(gtlbe)) 647 return; 648 649 size = get_tlb_bytes(gtlbe); 650 start = get_tlb_eaddr(gtlbe) & ~(size - 1); 651 end = start + size - 1; 652 653 vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); 654 vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); 655} 656 657static inline int kvmppc_e500_gtlbe_invalidate( 658 struct kvmppc_vcpu_e500 *vcpu_e500, 659 int tlbsel, int esel) 660{ 661 struct kvm_book3e_206_tlb_entry *gtlbe = 662 get_entry(vcpu_e500, tlbsel, esel); 663 664 if (unlikely(get_tlb_iprot(gtlbe))) 665 return -1; 666 667 if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) 668 kvmppc_recalc_tlb1map_range(vcpu_e500); 669 670 gtlbe->mas1 = 0; 671 672 return 0; 673} 674 675int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) 676{ 677 int esel; 678 679 if (value & MMUCSR0_TLB0FI) 680 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) 681 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); 682 if (value & MMUCSR0_TLB1FI) 683 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) 684 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 685 686 /* Invalidate all vcpu id mappings */ 687 kvmppc_e500_tlbil_all(vcpu_e500); 688 689 return EMULATE_DONE; 690} 691 692int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea) 693{ 694 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 695 unsigned int ia; 696 int esel, tlbsel; 697 698 ia = (ea >> 2) & 0x1; 699 700 /* since we only have two TLBs, only lower bit is used. */ 701 tlbsel = (ea >> 3) & 0x1; 702 703 if (ia) { 704 /* invalidate all entries */ 705 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; 706 esel++) 707 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 708 } else { 709 ea &= 0xfffff000; 710 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, 711 get_cur_pid(vcpu), -1); 712 if (esel >= 0) 713 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 714 } 715 716 /* Invalidate all vcpu id mappings */ 717 kvmppc_e500_tlbil_all(vcpu_e500); 718 719 return EMULATE_DONE; 720} 721 722static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, 723 int pid, int type) 724{ 725 struct kvm_book3e_206_tlb_entry *tlbe; 726 int tid, esel; 727 728 /* invalidate all entries */ 729 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { 730 tlbe = get_entry(vcpu_e500, tlbsel, esel); 731 tid = get_tlb_tid(tlbe); 732 if (type == 0 || tid == pid) { 733 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 734 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 735 } 736 } 737} 738 739static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, 740 gva_t ea) 741{ 742 int tlbsel, esel; 743 744 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 745 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); 746 if (esel >= 0) { 747 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 748 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 749 break; 750 } 751 } 752} 753 754int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea) 755{ 756 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 757 int pid = get_cur_spid(vcpu); 758 759 if (type == 0 || type == 1) { 760 tlbilx_all(vcpu_e500, 0, pid, type); 761 tlbilx_all(vcpu_e500, 1, pid, type); 762 } else if (type == 3) { 763 tlbilx_one(vcpu_e500, pid, ea); 764 } 765 766 return EMULATE_DONE; 767} 768 769int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu) 770{ 771 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 772 int tlbsel, esel; 773 struct kvm_book3e_206_tlb_entry *gtlbe; 774 775 tlbsel = get_tlb_tlbsel(vcpu); 776 esel = get_tlb_esel(vcpu, tlbsel); 777 778 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 779 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0); 780 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 781 vcpu->arch.shared->mas1 = gtlbe->mas1; 782 vcpu->arch.shared->mas2 = gtlbe->mas2; 783 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; 784 785 return EMULATE_DONE; 786} 787 788int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea) 789{ 790 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 791 int as = !!get_cur_sas(vcpu); 792 unsigned int pid = get_cur_spid(vcpu); 793 int esel, tlbsel; 794 struct kvm_book3e_206_tlb_entry *gtlbe = NULL; 795 796 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 797 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 798 if (esel >= 0) { 799 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 800 break; 801 } 802 } 803 804 if (gtlbe) { 805 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1; 806 807 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) 808 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 809 vcpu->arch.shared->mas1 = gtlbe->mas1; 810 vcpu->arch.shared->mas2 = gtlbe->mas2; 811 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3; 812 } else { 813 int victim; 814 815 /* since we only have two TLBs, only lower bit is used. */ 816 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1; 817 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; 818 819 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) 820 | MAS0_ESEL(victim) 821 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 822 vcpu->arch.shared->mas1 = 823 (vcpu->arch.shared->mas6 & MAS6_SPID0) 824 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0)) 825 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0)); 826 vcpu->arch.shared->mas2 &= MAS2_EPN; 827 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 & 828 MAS2_ATTRIB_MASK; 829 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | 830 MAS3_U2 | MAS3_U3; 831 } 832 833 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 834 return EMULATE_DONE; 835} 836 837/* sesel is for tlb1 only */ 838static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 839 struct kvm_book3e_206_tlb_entry *gtlbe, 840 struct kvm_book3e_206_tlb_entry *stlbe, 841 int stlbsel, int sesel) 842{ 843 int stid; 844 845 preempt_disable(); 846 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); 847 848 stlbe->mas1 |= MAS1_TID(stid); 849 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); 850 preempt_enable(); 851} 852 853int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 854{ 855 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 856 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; 857 int tlbsel, esel, stlbsel, sesel; 858 int recal = 0; 859 860 tlbsel = get_tlb_tlbsel(vcpu); 861 esel = get_tlb_esel(vcpu, tlbsel); 862 863 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 864 865 if (get_tlb_v(gtlbe)) { 866 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 867 if ((tlbsel == 1) && 868 kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) 869 recal = 1; 870 } 871 872 gtlbe->mas1 = vcpu->arch.shared->mas1; 873 gtlbe->mas2 = vcpu->arch.shared->mas2; 874 if (!(vcpu->arch.shared->msr & MSR_CM)) 875 gtlbe->mas2 &= 0xffffffffUL; 876 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3; 877 878 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, 879 gtlbe->mas2, gtlbe->mas7_3); 880 881 if (tlbsel == 1) { 882 /* 883 * If a valid tlb1 entry is overwritten then recalculate the 884 * min/max TLB1 map address range otherwise no need to look 885 * in tlb1 array. 886 */ 887 if (recal) 888 kvmppc_recalc_tlb1map_range(vcpu_e500); 889 else 890 kvmppc_set_tlb1map_range(vcpu, gtlbe); 891 } 892 893 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 894 if (tlbe_is_host_safe(vcpu, gtlbe)) { 895 u64 eaddr; 896 u64 raddr; 897 898 switch (tlbsel) { 899 case 0: 900 /* TLB0 */ 901 gtlbe->mas1 &= ~MAS1_TSIZE(~0); 902 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); 903 904 stlbsel = 0; 905 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 906 sesel = 0; /* unused */ 907 908 break; 909 910 case 1: 911 /* TLB1 */ 912 eaddr = get_tlb_eaddr(gtlbe); 913 raddr = get_tlb_raddr(gtlbe); 914 915 /* Create a 4KB mapping on the host. 916 * If the guest wanted a large page, 917 * only the first 4KB is mapped here and the rest 918 * are mapped on the fly. */ 919 stlbsel = 1; 920 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, 921 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel); 922 break; 923 924 default: 925 BUG(); 926 } 927 928 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 929 } 930 931 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 932 return EMULATE_DONE; 933} 934 935static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, 936 gva_t eaddr, unsigned int pid, int as) 937{ 938 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 939 int esel, tlbsel; 940 941 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 942 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); 943 if (esel >= 0) 944 return index_of(tlbsel, esel); 945 } 946 947 return -1; 948} 949 950/* 'linear_address' is actually an encoding of AS|PID|EADDR . */ 951int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, 952 struct kvm_translation *tr) 953{ 954 int index; 955 gva_t eaddr; 956 u8 pid; 957 u8 as; 958 959 eaddr = tr->linear_address; 960 pid = (tr->linear_address >> 32) & 0xff; 961 as = (tr->linear_address >> 40) & 0x1; 962 963 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); 964 if (index < 0) { 965 tr->valid = 0; 966 return 0; 967 } 968 969 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); 970 /* XXX what does "writeable" and "usermode" even mean? */ 971 tr->valid = 1; 972 973 return 0; 974} 975 976 977int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 978{ 979 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 980 981 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 982} 983 984int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 985{ 986 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 987 988 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 989} 990 991void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) 992{ 993 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 994 995 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); 996} 997 998void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) 999{ 1000 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 1001 1002 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); 1003} 1004 1005gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, 1006 gva_t eaddr) 1007{ 1008 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1009 struct kvm_book3e_206_tlb_entry *gtlbe; 1010 u64 pgmask; 1011 1012 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index)); 1013 pgmask = get_tlb_bytes(gtlbe) - 1; 1014 1015 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 1016} 1017 1018void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 1019{ 1020} 1021 1022void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, 1023 unsigned int index) 1024{ 1025 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1026 struct tlbe_priv *priv; 1027 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; 1028 int tlbsel = tlbsel_of(index); 1029 int esel = esel_of(index); 1030 int stlbsel, sesel; 1031 1032 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 1033 1034 switch (tlbsel) { 1035 case 0: 1036 stlbsel = 0; 1037 sesel = 0; /* unused */ 1038 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 1039 1040 /* Only triggers after clear_tlb_refs */ 1041 if (unlikely(!(priv->ref.flags & E500_TLB_VALID))) 1042 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 1043 else 1044 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, 1045 &priv->ref, eaddr, &stlbe); 1046 break; 1047 1048 case 1: { 1049 gfn_t gfn = gpaddr >> PAGE_SHIFT; 1050 1051 stlbsel = 1; 1052 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, 1053 gtlbe, &stlbe, esel); 1054 break; 1055 } 1056 1057 default: 1058 BUG(); 1059 break; 1060 } 1061 1062 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 1063} 1064 1065/************* MMU Notifiers *************/ 1066 1067int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) 1068{ 1069 trace_kvm_unmap_hva(hva); 1070 1071 /* 1072 * Flush all shadow tlb entries everywhere. This is slow, but 1073 * we are 100% sure that we catch the to be unmapped page 1074 */ 1075 kvm_flush_remote_tlbs(kvm); 1076 1077 return 0; 1078} 1079 1080int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1081{ 1082 /* kvm_unmap_hva flushes everything anyways */ 1083 kvm_unmap_hva(kvm, start); 1084 1085 return 0; 1086} 1087 1088int kvm_age_hva(struct kvm *kvm, unsigned long hva) 1089{ 1090 /* XXX could be more clever ;) */ 1091 return 0; 1092} 1093 1094int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 1095{ 1096 /* XXX could be more clever ;) */ 1097 return 0; 1098} 1099 1100void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1101{ 1102 /* The page will get remapped properly on its next fault */ 1103 kvm_unmap_hva(kvm, hva); 1104} 1105 1106/*****************************************/ 1107 1108static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) 1109{ 1110 int i; 1111 1112 clear_tlb1_bitmap(vcpu_e500); 1113 kfree(vcpu_e500->g2h_tlb1_map); 1114 1115 clear_tlb_refs(vcpu_e500); 1116 kfree(vcpu_e500->gtlb_priv[0]); 1117 kfree(vcpu_e500->gtlb_priv[1]); 1118 1119 if (vcpu_e500->shared_tlb_pages) { 1120 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch, 1121 PAGE_SIZE))); 1122 1123 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) { 1124 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]); 1125 put_page(vcpu_e500->shared_tlb_pages[i]); 1126 } 1127 1128 vcpu_e500->num_shared_tlb_pages = 0; 1129 1130 kfree(vcpu_e500->shared_tlb_pages); 1131 vcpu_e500->shared_tlb_pages = NULL; 1132 } else { 1133 kfree(vcpu_e500->gtlb_arch); 1134 } 1135 1136 vcpu_e500->gtlb_arch = NULL; 1137} 1138 1139void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1140{ 1141 sregs->u.e.mas0 = vcpu->arch.shared->mas0; 1142 sregs->u.e.mas1 = vcpu->arch.shared->mas1; 1143 sregs->u.e.mas2 = vcpu->arch.shared->mas2; 1144 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; 1145 sregs->u.e.mas4 = vcpu->arch.shared->mas4; 1146 sregs->u.e.mas6 = vcpu->arch.shared->mas6; 1147 1148 sregs->u.e.mmucfg = vcpu->arch.mmucfg; 1149 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; 1150 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; 1151 sregs->u.e.tlbcfg[2] = 0; 1152 sregs->u.e.tlbcfg[3] = 0; 1153} 1154 1155int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 1156{ 1157 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { 1158 vcpu->arch.shared->mas0 = sregs->u.e.mas0; 1159 vcpu->arch.shared->mas1 = sregs->u.e.mas1; 1160 vcpu->arch.shared->mas2 = sregs->u.e.mas2; 1161 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; 1162 vcpu->arch.shared->mas4 = sregs->u.e.mas4; 1163 vcpu->arch.shared->mas6 = sregs->u.e.mas6; 1164 } 1165 1166 return 0; 1167} 1168 1169int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, 1170 struct kvm_config_tlb *cfg) 1171{ 1172 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1173 struct kvm_book3e_206_tlb_params params; 1174 char *virt; 1175 struct page **pages; 1176 struct tlbe_priv *privs[2] = {}; 1177 u64 *g2h_bitmap = NULL; 1178 size_t array_len; 1179 u32 sets; 1180 int num_pages, ret, i; 1181 1182 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV) 1183 return -EINVAL; 1184 1185 if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params, 1186 sizeof(params))) 1187 return -EFAULT; 1188 1189 if (params.tlb_sizes[1] > 64) 1190 return -EINVAL; 1191 if (params.tlb_ways[1] != params.tlb_sizes[1]) 1192 return -EINVAL; 1193 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0) 1194 return -EINVAL; 1195 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0) 1196 return -EINVAL; 1197 1198 if (!is_power_of_2(params.tlb_ways[0])) 1199 return -EINVAL; 1200 1201 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]); 1202 if (!is_power_of_2(sets)) 1203 return -EINVAL; 1204 1205 array_len = params.tlb_sizes[0] + params.tlb_sizes[1]; 1206 array_len *= sizeof(struct kvm_book3e_206_tlb_entry); 1207 1208 if (cfg->array_len < array_len) 1209 return -EINVAL; 1210 1211 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) - 1212 cfg->array / PAGE_SIZE; 1213 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 1214 if (!pages) 1215 return -ENOMEM; 1216 1217 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); 1218 if (ret < 0) 1219 goto err_pages; 1220 1221 if (ret != num_pages) { 1222 num_pages = ret; 1223 ret = -EFAULT; 1224 goto err_put_page; 1225 } 1226 1227 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL); 1228 if (!virt) { 1229 ret = -ENOMEM; 1230 goto err_put_page; 1231 } 1232 1233 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0], 1234 GFP_KERNEL); 1235 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1], 1236 GFP_KERNEL); 1237 1238 if (!privs[0] || !privs[1]) { 1239 ret = -ENOMEM; 1240 goto err_privs; 1241 } 1242 1243 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], 1244 GFP_KERNEL); 1245 if (!g2h_bitmap) { 1246 ret = -ENOMEM; 1247 goto err_privs; 1248 } 1249 1250 free_gtlb(vcpu_e500); 1251 1252 vcpu_e500->gtlb_priv[0] = privs[0]; 1253 vcpu_e500->gtlb_priv[1] = privs[1]; 1254 vcpu_e500->g2h_tlb1_map = g2h_bitmap; 1255 1256 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) 1257 (virt + (cfg->array & (PAGE_SIZE - 1))); 1258 1259 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; 1260 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; 1261 1262 vcpu_e500->gtlb_offset[0] = 0; 1263 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; 1264 1265 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; 1266 1267 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1268 if (params.tlb_sizes[0] <= 2048) 1269 vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; 1270 vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; 1271 1272 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1273 vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; 1274 vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; 1275 1276 vcpu_e500->shared_tlb_pages = pages; 1277 vcpu_e500->num_shared_tlb_pages = num_pages; 1278 1279 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0]; 1280 vcpu_e500->gtlb_params[0].sets = sets; 1281 1282 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; 1283 vcpu_e500->gtlb_params[1].sets = 1; 1284 1285 kvmppc_recalc_tlb1map_range(vcpu_e500); 1286 return 0; 1287 1288err_privs: 1289 kfree(privs[0]); 1290 kfree(privs[1]); 1291 1292err_put_page: 1293 for (i = 0; i < num_pages; i++) 1294 put_page(pages[i]); 1295 1296err_pages: 1297 kfree(pages); 1298 return ret; 1299} 1300 1301int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, 1302 struct kvm_dirty_tlb *dirty) 1303{ 1304 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1305 kvmppc_recalc_tlb1map_range(vcpu_e500); 1306 clear_tlb_refs(vcpu_e500); 1307 return 0; 1308} 1309 1310int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) 1311{ 1312 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; 1313 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); 1314 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; 1315 1316 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; 1317 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 1318 1319 /* 1320 * This should never happen on real e500 hardware, but is 1321 * architecturally possible -- e.g. in some weird nested 1322 * virtualization case. 1323 */ 1324 if (host_tlb_params[0].entries == 0 || 1325 host_tlb_params[1].entries == 0) { 1326 pr_err("%s: need to know host tlb size\n", __func__); 1327 return -ENODEV; 1328 } 1329 1330 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> 1331 TLBnCFG_ASSOC_SHIFT; 1332 host_tlb_params[1].ways = host_tlb_params[1].entries; 1333 1334 if (!is_power_of_2(host_tlb_params[0].entries) || 1335 !is_power_of_2(host_tlb_params[0].ways) || 1336 host_tlb_params[0].entries < host_tlb_params[0].ways || 1337 host_tlb_params[0].ways == 0) { 1338 pr_err("%s: bad tlb0 host config: %u entries %u ways\n", 1339 __func__, host_tlb_params[0].entries, 1340 host_tlb_params[0].ways); 1341 return -ENODEV; 1342 } 1343 1344 host_tlb_params[0].sets = 1345 host_tlb_params[0].entries / host_tlb_params[0].ways; 1346 host_tlb_params[1].sets = 1; 1347 1348 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; 1349 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; 1350 1351 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM; 1352 vcpu_e500->gtlb_params[0].sets = 1353 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM; 1354 1355 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE; 1356 vcpu_e500->gtlb_params[1].sets = 1; 1357 1358 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); 1359 if (!vcpu_e500->gtlb_arch) 1360 return -ENOMEM; 1361 1362 vcpu_e500->gtlb_offset[0] = 0; 1363 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE; 1364 1365 vcpu_e500->tlb_refs[0] = 1366 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, 1367 GFP_KERNEL); 1368 if (!vcpu_e500->tlb_refs[0]) 1369 goto err; 1370 1371 vcpu_e500->tlb_refs[1] = 1372 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, 1373 GFP_KERNEL); 1374 if (!vcpu_e500->tlb_refs[1]) 1375 goto err; 1376 1377 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) * 1378 vcpu_e500->gtlb_params[0].entries, 1379 GFP_KERNEL); 1380 if (!vcpu_e500->gtlb_priv[0]) 1381 goto err; 1382 1383 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) * 1384 vcpu_e500->gtlb_params[1].entries, 1385 GFP_KERNEL); 1386 if (!vcpu_e500->gtlb_priv[1]) 1387 goto err; 1388 1389 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) * 1390 vcpu_e500->gtlb_params[1].entries, 1391 GFP_KERNEL); 1392 if (!vcpu_e500->g2h_tlb1_map) 1393 goto err; 1394 1395 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * 1396 host_tlb_params[1].entries, 1397 GFP_KERNEL); 1398 if (!vcpu_e500->h2g_tlb1_rmap) 1399 goto err; 1400 1401 /* Init TLB configuration register */ 1402 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & 1403 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1404 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; 1405 vcpu->arch.tlbcfg[0] |= 1406 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; 1407 1408 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & 1409 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1410 vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries; 1411 vcpu->arch.tlbcfg[1] |= 1412 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; 1413 1414 kvmppc_recalc_tlb1map_range(vcpu_e500); 1415 return 0; 1416 1417err: 1418 free_gtlb(vcpu_e500); 1419 kfree(vcpu_e500->tlb_refs[0]); 1420 kfree(vcpu_e500->tlb_refs[1]); 1421 return -1; 1422} 1423 1424void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 1425{ 1426 free_gtlb(vcpu_e500); 1427 kfree(vcpu_e500->h2g_tlb1_rmap); 1428 kfree(vcpu_e500->tlb_refs[0]); 1429 kfree(vcpu_e500->tlb_refs[1]); 1430}