Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Tweak parameters of guest cache maintenance functions

Adjust the parameter "kvm_pfn_t pfn" of __clean_dcache_guest_page
and __invalidate_icache_guest_page to "void *va", which paves the
way for converting these two guest CMO functions into callbacks in
structure kvm_pgtable_mm_ops. No functional change.

Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Yanan Wang <wangyanan55@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210617105824.31752-4-wangyanan55@huawei.com

authored by

Yanan Wang and committed by
Marc Zyngier
378e6a9c a4d5ca5c

+17 -20
+2 -7
arch/arm64/include/asm/kvm_mmu.h
··· 187 187 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; 188 188 } 189 189 190 - static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) 190 + static inline void __clean_dcache_guest_page(void *va, size_t size) 191 191 { 192 - void *va = page_address(pfn_to_page(pfn)); 193 - 194 192 /* 195 193 * With FWB, we ensure that the guest always accesses memory using 196 194 * cacheable attributes, and we don't have to clean to PoC when ··· 201 203 kvm_flush_dcache_to_poc(va, size); 202 204 } 203 205 204 - static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn, 205 - unsigned long size) 206 + static inline void __invalidate_icache_guest_page(void *va, size_t size) 206 207 { 207 208 if (icache_is_aliasing()) { 208 209 /* any kind of VIPT cache */ 209 210 __flush_icache_all(); 210 211 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 211 212 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 212 - void *va = page_address(pfn_to_page(pfn)); 213 - 214 213 invalidate_icache_range((unsigned long)va, 215 214 (unsigned long)va + size); 216 215 }
+15 -13
arch/arm64/kvm/mmu.c
··· 126 126 return __va(phys); 127 127 } 128 128 129 + static void clean_dcache_guest_page(void *va, size_t size) 130 + { 131 + __clean_dcache_guest_page(va, size); 132 + } 133 + 134 + static void invalidate_icache_guest_page(void *va, size_t size) 135 + { 136 + __invalidate_icache_guest_page(va, size); 137 + } 138 + 129 139 /* 130 140 * Unmapping vs dcache management: 131 141 * ··· 703 693 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 704 694 } 705 695 706 - static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) 707 - { 708 - __clean_dcache_guest_page(pfn, size); 709 - } 710 - 711 - static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size) 712 - { 713 - __invalidate_icache_guest_page(pfn, size); 714 - } 715 - 716 696 static void kvm_send_hwpoison_signal(unsigned long address, short lsb) 717 697 { 718 698 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); ··· 972 972 prot |= KVM_PGTABLE_PROT_W; 973 973 974 974 if (fault_status != FSC_PERM && !device) 975 - clean_dcache_guest_page(pfn, vma_pagesize); 975 + clean_dcache_guest_page(page_address(pfn_to_page(pfn)), 976 + vma_pagesize); 976 977 977 978 if (exec_fault) { 978 979 prot |= KVM_PGTABLE_PROT_X; 979 - invalidate_icache_guest_page(pfn, vma_pagesize); 980 + invalidate_icache_guest_page(page_address(pfn_to_page(pfn)), 981 + vma_pagesize); 980 982 } 981 983 982 984 if (device) ··· 1180 1178 * We've moved a page around, probably through CoW, so let's treat it 1181 1179 * just like a translation fault and clean the cache to the PoC. 1182 1180 */ 1183 - clean_dcache_guest_page(pfn, PAGE_SIZE); 1181 + clean_dcache_guest_page(page_address(pfn_to_page(pfn)), PAGE_SIZE); 1184 1182 1185 1183 /* 1186 1184 * The MMU notifiers will have unmapped a huge PMD before calling