Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Move reusable bits of H_PUT_TCE handler to helpers

Upcoming multi-tce support (H_PUT_TCE_INDIRECT/H_STUFF_TCE hypercalls)
will validate TCE (not to have unexpected bits) and IO address
(to be within the DMA window boundaries).

This introduces helpers to validate TCE and IO address. The helpers are
exported as they compile into vmlinux (to work in realmode) and will be
used later by KVM kernel module in virtual mode.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Alexey Kardashevskiy and committed by
Paul Mackerras
5ee7af18 462ee11e

+83 -10
+4
arch/powerpc/include/asm/kvm_ppc.h
··· 166 166 167 167 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 168 168 struct kvm_create_spapr_tce *args); 169 + extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, 170 + unsigned long ioba, unsigned long npages); 171 + extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, 172 + unsigned long tce); 169 173 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 170 174 unsigned long ioba, unsigned long tce); 171 175 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+79 -10
arch/powerpc/kvm/book3s_64_vio_hv.c
··· 36 36 #include <asm/kvm_host.h> 37 37 #include <asm/udbg.h> 38 38 #include <asm/iommu.h> 39 + #include <asm/tce.h> 39 40 40 41 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) 41 42 ··· 65 64 * WARNING: This will be called in real-mode on HV KVM and virtual 66 65 * mode on PR KVM 67 66 */ 68 - static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, 67 + long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, 69 68 unsigned long ioba, unsigned long npages) 70 69 { 71 70 unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; ··· 77 76 78 77 return H_SUCCESS; 79 78 } 79 + EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); 80 + 81 + /* 82 + * Validates TCE address. 83 + * At the moment flags and page mask are validated. 84 + * As the host kernel does not access those addresses (just puts them 85 + * to the table and user space is supposed to process them), we can skip 86 + * checking other things (such as TCE is a guest RAM address or the page 87 + * was actually allocated). 88 + * 89 + * WARNING: This will be called in real-mode on HV KVM and virtual 90 + * mode on PR KVM 91 + */ 92 + long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) 93 + { 94 + unsigned long mask = 95 + ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ); 96 + 97 + if (tce & mask) 98 + return H_PARAMETER; 99 + 100 + return H_SUCCESS; 101 + } 102 + EXPORT_SYMBOL_GPL(kvmppc_tce_validate); 103 + 104 + /* Note on the use of page_address() in real mode, 105 + * 106 + * It is safe to use page_address() in real mode on ppc64 because 107 + * page_address() is always defined as lowmem_page_address() 108 + * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic 109 + * operation and does not access page struct. 110 + * 111 + * Theoretically page_address() could be defined different 112 + * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL 113 + * would have to be enabled. 114 + * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64, 115 + * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only 116 + * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP 117 + * is not expected to be enabled on ppc32, page_address() 118 + * is safe for ppc32 as well. 119 + * 120 + * WARNING: This will be called in real-mode on HV KVM and virtual 121 + * mode on PR KVM 122 + */ 123 + static u64 *kvmppc_page_address(struct page *page) 124 + { 125 + #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL) 126 + #error TODO: fix to avoid page_address() here 127 + #endif 128 + return (u64 *) page_address(page); 129 + } 130 + 131 + /* 132 + * Handles TCE requests for emulated devices. 133 + * Puts guest TCE values to the table and expects user space to convert them. 134 + * Called in both real and virtual modes. 135 + * Cannot fail so kvmppc_tce_validate must be called before it. 136 + * 137 + * WARNING: This will be called in real-mode on HV KVM and virtual 138 + * mode on PR KVM 139 + */ 140 + void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, 141 + unsigned long idx, unsigned long tce) 142 + { 143 + struct page *page; 144 + u64 *tbl; 145 + 146 + page = stt->pages[idx / TCES_PER_PAGE]; 147 + tbl = kvmppc_page_address(page); 148 + 149 + tbl[idx % TCES_PER_PAGE] = tce; 150 + } 151 + EXPORT_SYMBOL_GPL(kvmppc_tce_put); 80 152 81 153 /* WARNING: This will be called in real-mode on HV KVM and virtual 82 154 * mode on PR KVM ··· 159 85 { 160 86 struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); 161 87 long ret; 162 - unsigned long idx; 163 - struct page *page; 164 - u64 *tbl; 165 88 166 89 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 167 90 /* liobn, ioba, tce); */ ··· 170 99 if (ret != H_SUCCESS) 171 100 return ret; 172 101 173 - idx = ioba >> IOMMU_PAGE_SHIFT_4K; 174 - page = stt->pages[idx / TCES_PER_PAGE]; 175 - tbl = (u64 *)page_address(page); 102 + ret = kvmppc_tce_validate(stt, tce); 103 + if (ret != H_SUCCESS) 104 + return ret; 176 105 177 - /* FIXME: Need to validate the TCE itself */ 178 - /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ 179 - tbl[idx % TCES_PER_PAGE] = tce; 106 + kvmppc_tce_put(stt, ioba >> IOMMU_PAGE_SHIFT_4K, tce); 180 107 181 108 return H_SUCCESS; 182 109 }