Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: ia64: add directed mmio range support for kvm guests

Using vt-d, kvm guests can be assigned physcial devices, so
this patch introduce a new mmio type (directed mmio)
to handle its mmio access.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>

authored by

Xiantao Zhang and committed by
Avi Kivity
b010eb51 1cbea809

+33 -22
+1 -1
arch/ia64/include/asm/kvm_host.h
··· 132 132 #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ 133 133 #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ 134 134 #define GPFN_GFW (6UL << 60) /* Guest Firmware */ 135 - #define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ 135 + #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ 136 136 137 137 #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ 138 138 #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
+2 -2
arch/ia64/kvm/kvm-ia64.c
··· 1447 1447 if (!kvm_is_mmio_pfn(pfn)) { 1448 1448 kvm_set_pmt_entry(kvm, base_gfn + i, 1449 1449 pfn << PAGE_SHIFT, 1450 - _PAGE_MA_WB); 1450 + _PAGE_AR_RWX | _PAGE_MA_WB); 1451 1451 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); 1452 1452 } else { 1453 1453 kvm_set_pmt_entry(kvm, base_gfn + i, 1454 - GPFN_LOW_MMIO | (pfn << PAGE_SHIFT), 1454 + GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), 1455 1455 _PAGE_MA_UC); 1456 1456 memslot->rmap[i] = 0; 1457 1457 }
+13 -13
arch/ia64/kvm/vcpu.h
··· 313 313 trp->rid = rid; 314 314 } 315 315 316 - extern u64 kvm_lookup_mpa(u64 gpfn); 317 - extern u64 kvm_gpa_to_mpa(u64 gpa); 316 + extern u64 kvm_get_mpt_entry(u64 gpfn); 318 317 319 - /* Return I/O type if trye */ 320 - #define __gpfn_is_io(gpfn) \ 321 - ({ \ 322 - u64 pte, ret = 0; \ 323 - pte = kvm_lookup_mpa(gpfn); \ 324 - if (!(pte & GPFN_INV_MASK)) \ 325 - ret = pte & GPFN_IO_MASK; \ 326 - ret; \ 327 - }) 328 - 318 + /* Return I/ */ 319 + static inline u64 __gpfn_is_io(u64 gpfn) 320 + { 321 + u64 pte; 322 + pte = kvm_get_mpt_entry(gpfn); 323 + if (!(pte & GPFN_INV_MASK)) { 324 + pte = pte & GPFN_IO_MASK; 325 + if (pte != GPFN_PHYS_MMIO) 326 + return pte; 327 + } 328 + return 0; 329 + } 329 330 #endif 330 - 331 331 #define IA64_NO_FAULT 0 332 332 #define IA64_FAULT 1 333 333
+17 -6
arch/ia64/kvm/vtlb.c
··· 390 390 391 391 u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) 392 392 { 393 - u64 ps, ps_mask, paddr, maddr; 393 + u64 ps, ps_mask, paddr, maddr, io_mask; 394 394 union pte_flags phy_pte; 395 395 396 396 ps = itir_ps(itir); ··· 398 398 phy_pte.val = *pte; 399 399 paddr = *pte; 400 400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); 401 - maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); 402 - if (maddr & GPFN_IO_MASK) { 401 + maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); 402 + io_mask = maddr & GPFN_IO_MASK; 403 + if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { 403 404 *pte |= VTLB_PTE_IO; 404 405 return -1; 405 406 } ··· 419 418 u64 ifa, int type) 420 419 { 421 420 u64 ps; 422 - u64 phy_pte; 421 + u64 phy_pte, io_mask, index; 423 422 union ia64_rr vrr, mrr; 424 423 int ret = 0; 425 424 ··· 427 426 vrr.val = vcpu_get_rr(v, ifa); 428 427 mrr.val = ia64_get_rr(ifa); 429 428 429 + index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; 430 + io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; 430 431 phy_pte = translate_phy_pte(&pte, itir, ifa); 431 432 432 433 /* Ensure WB attribute if pte is related to a normal mem page, 433 434 * which is required by vga acceleration since qemu maps shared 434 435 * vram buffer with WB. 435 436 */ 436 - if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { 437 + if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && 438 + io_mask != GPFN_PHYS_MMIO) { 437 439 pte &= ~_PAGE_MA_MASK; 438 440 phy_pte &= ~_PAGE_MA_MASK; 439 441 } ··· 570 566 } 571 567 } 572 568 573 - u64 kvm_lookup_mpa(u64 gpfn) 569 + u64 kvm_get_mpt_entry(u64 gpfn) 574 570 { 575 571 u64 *base = (u64 *) KVM_P2M_BASE; 576 572 return *(base + gpfn); 573 + } 574 + 575 + u64 kvm_lookup_mpa(u64 gpfn) 576 + { 577 + u64 maddr; 578 + maddr = kvm_get_mpt_entry(gpfn); 579 + return maddr&_PAGE_PPN_MASK; 577 580 } 578 581 579 582 u64 kvm_gpa_to_mpa(u64 gpa)