Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: pfncache: include page offset in uhva and use it consistently

Currently the pfncache page offset is sometimes determined using the gpa
and sometimes the khva, whilst the uhva is always page-aligned. After a
subsequent patch is applied the gpa will not always be valid so adjust
the code to include the page offset in the uhva and use it consistently
as the source of truth.

Also, where a page-aligned address is required, use PAGE_ALIGN_DOWN()
for clarity.

No functional change intended.

Signed-off-by: Paul Durrant <pdurrant@amazon.com>
Reviewed-by: David Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-8-paul@xen.org
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

Paul Durrant and committed by
Sean Christopherson
406c1096 53e63e95

+21 -8
+21 -8
virt/kvm/pfncache.c
··· 48 48 if (!gpc->active) 49 49 return false; 50 50 51 - if (offset_in_page(gpc->gpa) + len > PAGE_SIZE) 51 + if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) 52 52 return false; 53 53 54 - if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) 54 + if (offset_in_page(gpc->uhva) + len > PAGE_SIZE) 55 55 return false; 56 56 57 57 if (!gpc->valid) ··· 119 119 static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) 120 120 { 121 121 /* Note, the new page offset may be different than the old! */ 122 - void *old_khva = gpc->khva - offset_in_page(gpc->khva); 122 + void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); 123 123 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; 124 124 void *new_khva = NULL; 125 125 unsigned long mmu_seq; ··· 192 192 193 193 gpc->valid = true; 194 194 gpc->pfn = new_pfn; 195 - gpc->khva = new_khva + offset_in_page(gpc->gpa); 195 + gpc->khva = new_khva + offset_in_page(gpc->uhva); 196 196 197 197 /* 198 198 * Put the reference to the _new_ pfn. The pfn is now tracked by the ··· 217 217 bool unmap_old = false; 218 218 unsigned long old_uhva; 219 219 kvm_pfn_t old_pfn; 220 + bool hva_change = false; 220 221 void *old_khva; 221 222 int ret; 222 223 ··· 243 242 } 244 243 245 244 old_pfn = gpc->pfn; 246 - old_khva = gpc->khva - offset_in_page(gpc->khva); 247 - old_uhva = gpc->uhva; 245 + old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); 246 + old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); 248 247 249 - /* If the userspace HVA is invalid, refresh that first */ 248 + /* Refresh the userspace HVA if necessary */ 250 249 if (gpc->gpa != gpa || gpc->generation != slots->generation || 251 250 kvm_is_error_hva(gpc->uhva)) { 252 251 gfn_t gfn = gpa_to_gfn(gpa); ··· 260 259 ret = -EFAULT; 261 260 goto out; 262 261 } 262 + 263 + /* 264 + * Even if the GPA and/or the memslot generation changed, the 265 + * HVA may still be the same. 266 + */ 267 + if (gpc->uhva != old_uhva) 268 + hva_change = true; 269 + } else { 270 + gpc->uhva = old_uhva; 263 271 } 272 + 273 + /* Note: the offset must be correct before calling hva_to_pfn_retry() */ 274 + gpc->uhva += page_offset; 264 275 265 276 /* 266 277 * If the userspace HVA changed or the PFN was already invalid, 267 278 * drop the lock and do the HVA to PFN lookup again. 268 279 */ 269 - if (!gpc->valid || old_uhva != gpc->uhva) { 280 + if (!gpc->valid || hva_change) { 270 281 ret = hva_to_pfn_retry(gpc); 271 282 } else { 272 283 /*