Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: s390: add stat counter for shadow gmap events

The shadow gmap tracks memory of nested guests (guest-3). In certain
scenarios, the shadow gmap needs to be rebuilt, which is a costly operation
since it involves a SIE exit into guest-1 for every entry in the respective
shadow level.

Add kvm stat counters when new shadow structures are created at various
levels. Also add a counter gmap_shadow_create when a completely fresh
shadow gmap is created as well as a counter gmap_shadow_reuse when an
existing gmap is being reused.

Note that when several levels are shadowed at once, counters on all
affected levels will be increased.

Also note that not all page table levels need to be present and a ASCE
can directly point to e.g. a segment table. In this case, a new segment
table will always be equivalent to a new shadow gmap and hence will be
counted as gmap_shadow_create and not as gmap_shadow_segment.

Signed-off-by: Nico Boehr <nrb@linux.ibm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Link: https://lore.kernel.org/r/20231009093304.2555344-2-nrb@linux.ibm.com
Message-Id: <20231009093304.2555344-2-nrb@linux.ibm.com>

authored by

Nico Boehr and committed by
Janosch Frank
c3235e2d ce9ecca0

+26 -2
+7
arch/s390/include/asm/kvm_host.h
··· 777 777 u64 inject_service_signal; 778 778 u64 inject_virtio; 779 779 u64 aen_forward; 780 + u64 gmap_shadow_create; 781 + u64 gmap_shadow_reuse; 782 + u64 gmap_shadow_r1_entry; 783 + u64 gmap_shadow_r2_entry; 784 + u64 gmap_shadow_r3_entry; 785 + u64 gmap_shadow_sg_entry; 786 + u64 gmap_shadow_pg_entry; 780 787 }; 781 788 782 789 struct kvm_arch_memory_slot {
+7
arch/s390/kvm/gaccess.c
··· 1382 1382 unsigned long *pgt, int *dat_protection, 1383 1383 int *fake) 1384 1384 { 1385 + struct kvm *kvm; 1385 1386 struct gmap *parent; 1386 1387 union asce asce; 1387 1388 union vaddress vaddr; ··· 1391 1390 1392 1391 *fake = 0; 1393 1392 *dat_protection = 0; 1393 + kvm = sg->private; 1394 1394 parent = sg->parent; 1395 1395 vaddr.addr = saddr; 1396 1396 asce.val = sg->orig_asce; ··· 1452 1450 rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake); 1453 1451 if (rc) 1454 1452 return rc; 1453 + kvm->stat.gmap_shadow_r1_entry++; 1455 1454 } 1456 1455 fallthrough; 1457 1456 case ASCE_TYPE_REGION2: { ··· 1481 1478 rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake); 1482 1479 if (rc) 1483 1480 return rc; 1481 + kvm->stat.gmap_shadow_r2_entry++; 1484 1482 } 1485 1483 fallthrough; 1486 1484 case ASCE_TYPE_REGION3: { ··· 1519 1515 rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake); 1520 1516 if (rc) 1521 1517 return rc; 1518 + kvm->stat.gmap_shadow_r3_entry++; 1522 1519 } 1523 1520 fallthrough; 1524 1521 case ASCE_TYPE_SEGMENT: { ··· 1553 1548 rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake); 1554 1549 if (rc) 1555 1550 return rc; 1551 + kvm->stat.gmap_shadow_sg_entry++; 1556 1552 } 1557 1553 } 1558 1554 /* Return the parent address of the page table */ ··· 1624 1618 pte.p |= dat_protection; 1625 1619 if (!rc) 1626 1620 rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); 1621 + vcpu->kvm->stat.gmap_shadow_pg_entry++; 1627 1622 ipte_unlock(vcpu->kvm); 1628 1623 mmap_read_unlock(sg->mm); 1629 1624 return rc;
+8 -1
arch/s390/kvm/kvm-s390.c
··· 66 66 STATS_DESC_COUNTER(VM, inject_pfault_done), 67 67 STATS_DESC_COUNTER(VM, inject_service_signal), 68 68 STATS_DESC_COUNTER(VM, inject_virtio), 69 - STATS_DESC_COUNTER(VM, aen_forward) 69 + STATS_DESC_COUNTER(VM, aen_forward), 70 + STATS_DESC_COUNTER(VM, gmap_shadow_reuse), 71 + STATS_DESC_COUNTER(VM, gmap_shadow_create), 72 + STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry), 73 + STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry), 74 + STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry), 75 + STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry), 76 + STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry), 70 77 }; 71 78 72 79 const struct kvm_stats_header kvm_vm_stats_header = {
+4 -1
arch/s390/kvm/vsie.c
··· 1214 1214 * we're holding has been unshadowed. If the gmap is still valid, 1215 1215 * we can safely reuse it. 1216 1216 */ 1217 - if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) 1217 + if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat)) { 1218 + vcpu->kvm->stat.gmap_shadow_reuse++; 1218 1219 return 0; 1220 + } 1219 1221 1220 1222 /* release the old shadow - if any, and mark the prefix as unmapped */ 1221 1223 release_gmap_shadow(vsie_page); ··· 1225 1223 if (IS_ERR(gmap)) 1226 1224 return PTR_ERR(gmap); 1227 1225 gmap->private = vcpu->kvm; 1226 + vcpu->kvm->stat.gmap_shadow_create++; 1228 1227 WRITE_ONCE(vsie_page->gmap, gmap); 1229 1228 return 0; 1230 1229 }