Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Rework H_PUT_TCE/H_GET_TCE handlers

This reworks the existing H_PUT_TCE/H_GET_TCE handlers to have following
patches applied nicer.

This moves the ioba boundaries check to a helper and adds a check for
least bits which have to be zeros.

The patch is pretty mechanical (only check for least ioba bits is added)
so no change in behaviour is expected.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Alexey Kardashevskiy and committed by
Paul Mackerras
fcbb2ce6 e9ab1a1c

+71 -38
+71 -38
arch/powerpc/kvm/book3s_64_vio_hv.c
··· 35 35 #include <asm/ppc-opcode.h> 36 36 #include <asm/kvm_host.h> 37 37 #include <asm/udbg.h> 38 + #include <asm/iommu.h> 38 39 39 40 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) 41 + 42 + /* 43 + * Finds a TCE table descriptor by LIOBN. 44 + * 45 + * WARNING: This will be called in real or virtual mode on HV KVM and virtual 46 + * mode on PR KVM 47 + */ 48 + static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, 49 + unsigned long liobn) 50 + { 51 + struct kvm *kvm = vcpu->kvm; 52 + struct kvmppc_spapr_tce_table *stt; 53 + 54 + list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) 55 + if (stt->liobn == liobn) 56 + return stt; 57 + 58 + return NULL; 59 + } 60 + 61 + /* 62 + * Validates IO address. 63 + * 64 + * WARNING: This will be called in real-mode on HV KVM and virtual 65 + * mode on PR KVM 66 + */ 67 + static long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, 68 + unsigned long ioba, unsigned long npages) 69 + { 70 + unsigned long mask = (1ULL << IOMMU_PAGE_SHIFT_4K) - 1; 71 + unsigned long idx = ioba >> IOMMU_PAGE_SHIFT_4K; 72 + unsigned long size = stt->window_size >> IOMMU_PAGE_SHIFT_4K; 73 + 74 + if ((ioba & mask) || (idx + npages > size) || (idx + npages < idx)) 75 + return H_PARAMETER; 76 + 77 + return H_SUCCESS; 78 + } 40 79 41 80 /* WARNING: This will be called in real-mode on HV KVM and virtual 42 81 * mode on PR KVM ··· 83 44 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 84 45 unsigned long ioba, unsigned long tce) 85 46 { 86 - struct kvm *kvm = vcpu->kvm; 87 - struct kvmppc_spapr_tce_table *stt; 47 + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); 48 + long ret; 49 + unsigned long idx; 50 + struct page *page; 51 + u64 *tbl; 88 52 89 53 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 90 54 /* liobn, ioba, tce); */ 91 55 92 - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { 93 - if (stt->liobn == liobn) { 94 - unsigned long idx = ioba >> SPAPR_TCE_SHIFT; 95 - struct page *page; 96 - u64 *tbl; 56 + if (!stt) 57 + return H_TOO_HARD; 97 58 98 - /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */ 99 - /* liobn, stt, stt->window_size); */ 100 - if (ioba >= stt->window_size) 101 - return H_PARAMETER; 59 + ret = kvmppc_ioba_validate(stt, ioba, 1); 60 + if (ret != H_SUCCESS) 61 + return ret; 102 62 103 - page = stt->pages[idx / TCES_PER_PAGE]; 104 - tbl = (u64 *)page_address(page); 63 + idx = ioba >> SPAPR_TCE_SHIFT; 64 + page = stt->pages[idx / TCES_PER_PAGE]; 65 + tbl = (u64 *)page_address(page); 105 66 106 - /* FIXME: Need to validate the TCE itself */ 107 - /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ 108 - tbl[idx % TCES_PER_PAGE] = tce; 109 - return H_SUCCESS; 110 - } 111 - } 67 + /* FIXME: Need to validate the TCE itself */ 68 + /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */ 69 + tbl[idx % TCES_PER_PAGE] = tce; 112 70 113 - /* Didn't find the liobn, punt it to userspace */ 114 - return H_TOO_HARD; 71 + return H_SUCCESS; 115 72 } 116 73 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 117 74 118 75 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 119 76 unsigned long ioba) 120 77 { 121 - struct kvm *kvm = vcpu->kvm; 122 - struct kvmppc_spapr_tce_table *stt; 78 + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); 79 + long ret; 80 + unsigned long idx; 81 + struct page *page; 82 + u64 *tbl; 123 83 124 - list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { 125 - if (stt->liobn == liobn) { 126 - unsigned long idx = ioba >> SPAPR_TCE_SHIFT; 127 - struct page *page; 128 - u64 *tbl; 84 + if (!stt) 85 + return H_TOO_HARD; 129 86 130 - if (ioba >= stt->window_size) 131 - return H_PARAMETER; 87 + ret = kvmppc_ioba_validate(stt, ioba, 1); 88 + if (ret != H_SUCCESS) 89 + return ret; 132 90 133 - page = stt->pages[idx / TCES_PER_PAGE]; 134 - tbl = (u64 *)page_address(page); 91 + idx = ioba >> SPAPR_TCE_SHIFT; 92 + page = stt->pages[idx / TCES_PER_PAGE]; 93 + tbl = (u64 *)page_address(page); 135 94 136 - vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; 137 - return H_SUCCESS; 138 - } 139 - } 95 + vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; 140 96 141 - /* Didn't find the liobn, punt it to userspace */ 142 - return H_TOO_HARD; 97 + return H_SUCCESS; 143 98 } 144 99 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);