Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/kvm-hv-pmu: Implement GSB message-ops for hostwide counters

Implement and setup necessary structures to send a prepolulated
Guest-State-Buffer(GSB) requesting hostwide counters to L0-PowerVM and have
the returned GSB holding the values of these counters parsed. This is done
via existing GSB implementation and with the newly added support of
Hostwide elements in GSB.

The request to L0-PowerVM to return Hostwide counters is done using a
pre-allocated GSB named 'gsb_l0_stats'. To be able to populate this GSB
with the needed Guest-State-Elements (GSIDs) a instance of 'struct
kvmppc_gs_msg' named 'gsm_l0_stats' is introduced. The 'gsm_l0_stats' is
tied to an instance of 'struct kvmppc_gs_msg_ops' named 'gsb_ops_l0_stats'
which holds various callbacks to be compute the size ( hostwide_get_size()
), populate the GSB ( hostwide_fill_info() ) and
refresh ( hostwide_refresh_info() ) the contents of
'l0_stats' that holds the Hostwide counters returned from L0-PowerVM.

To protect these structures from simultaneous access a spinlock
'lock_l0_stats' has been introduced. The allocation and initialization of
the above structures is done in newly introduced kvmppc_init_hostwide() and
similarly the cleanup is performed in newly introduced
kvmppc_cleanup_hostwide().

Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250416162740.93143-6-vaibhav@linux.ibm.com

authored by

Vaibhav Jain and committed by
Madhavan Srinivasan
f0c9c49c ff45bf50

+207
+207
arch/powerpc/perf/kvm-hv-pmu.c
··· 27 27 #include <asm/plpar_wrappers.h> 28 28 #include <asm/firmware.h> 29 29 30 + #include "asm/guest-state-buffer.h" 31 + 30 32 enum kvmppc_pmu_eventid { 31 33 KVMPPC_EVENT_MAX, 32 34 }; 35 + 36 + #define KVMPPC_PMU_EVENT_ATTR(_name, _id) \ 37 + PMU_EVENT_ATTR_ID(_name, kvmppc_events_sysfs_show, _id) 38 + 39 + static ssize_t kvmppc_events_sysfs_show(struct device *dev, 40 + struct device_attribute *attr, 41 + char *page) 42 + { 43 + struct perf_pmu_events_attr *pmu_attr; 44 + 45 + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 46 + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); 47 + } 48 + 49 + /* Holds the hostwide stats */ 50 + static struct kvmppc_hostwide_stats { 51 + u64 guest_heap; 52 + u64 guest_heap_max; 53 + u64 guest_pgtable_size; 54 + u64 guest_pgtable_size_max; 55 + u64 guest_pgtable_reclaim; 56 + } l0_stats; 57 + 58 + /* Protect access to l0_stats */ 59 + static DEFINE_SPINLOCK(lock_l0_stats); 60 + 61 + /* GSB related structs needed to talk to L0 */ 62 + static struct kvmppc_gs_msg *gsm_l0_stats; 63 + static struct kvmppc_gs_buff *gsb_l0_stats; 33 64 34 65 static struct attribute *kvmppc_pmu_events_attr[] = { 35 66 NULL, ··· 121 90 { 122 91 } 123 92 93 + /* Return the size of the needed guest state buffer */ 94 + static size_t hostwide_get_size(struct kvmppc_gs_msg *gsm) 95 + 96 + { 97 + size_t size = 0; 98 + const u16 ids[] = { 99 + KVMPPC_GSID_L0_GUEST_HEAP, 100 + KVMPPC_GSID_L0_GUEST_HEAP_MAX, 101 + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, 102 + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, 103 + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM 104 + }; 105 + 106 + for (int i = 0; i < ARRAY_SIZE(ids); i++) 107 + size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); 108 + return size; 109 + } 110 + 111 + /* Populate the request guest state buffer */ 112 + static int hostwide_fill_info(struct kvmppc_gs_buff *gsb, 113 + struct kvmppc_gs_msg *gsm) 114 + { 115 + int rc = 0; 116 + struct kvmppc_hostwide_stats *stats = gsm->data; 117 + 118 + /* 119 + * It doesn't matter what values are put into request buffer as 120 + * they are going to be overwritten anyways. But for the sake of 121 + * testcode and symmetry contents of existing stats are put 122 + * populated into the request guest state buffer. 123 + */ 124 + if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP)) 125 + rc = kvmppc_gse_put_u64(gsb, 126 + KVMPPC_GSID_L0_GUEST_HEAP, 127 + stats->guest_heap); 128 + 129 + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX)) 130 + rc = kvmppc_gse_put_u64(gsb, 131 + KVMPPC_GSID_L0_GUEST_HEAP_MAX, 132 + stats->guest_heap_max); 133 + 134 + if (!rc && kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE)) 135 + rc = kvmppc_gse_put_u64(gsb, 136 + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE, 137 + stats->guest_pgtable_size); 138 + if (!rc && 139 + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX)) 140 + rc = kvmppc_gse_put_u64(gsb, 141 + KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX, 142 + stats->guest_pgtable_size_max); 143 + if (!rc && 144 + kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM)) 145 + rc = kvmppc_gse_put_u64(gsb, 146 + KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM, 147 + stats->guest_pgtable_reclaim); 148 + 149 + return rc; 150 + } 151 + 152 + /* Parse and update the host wide stats from returned gsb */ 153 + static int hostwide_refresh_info(struct kvmppc_gs_msg *gsm, 154 + struct kvmppc_gs_buff *gsb) 155 + { 156 + struct kvmppc_gs_parser gsp = { 0 }; 157 + struct kvmppc_hostwide_stats *stats = gsm->data; 158 + struct kvmppc_gs_elem *gse; 159 + int rc; 160 + 161 + rc = kvmppc_gse_parse(&gsp, gsb); 162 + if (rc < 0) 163 + return rc; 164 + 165 + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP); 166 + if (gse) 167 + stats->guest_heap = kvmppc_gse_get_u64(gse); 168 + 169 + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX); 170 + if (gse) 171 + stats->guest_heap_max = kvmppc_gse_get_u64(gse); 172 + 173 + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); 174 + if (gse) 175 + stats->guest_pgtable_size = kvmppc_gse_get_u64(gse); 176 + 177 + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); 178 + if (gse) 179 + stats->guest_pgtable_size_max = kvmppc_gse_get_u64(gse); 180 + 181 + gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); 182 + if (gse) 183 + stats->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse); 184 + 185 + return 0; 186 + } 187 + 188 + /* gsb-message ops for setting up/parsing */ 189 + static struct kvmppc_gs_msg_ops gsb_ops_l0_stats = { 190 + .get_size = hostwide_get_size, 191 + .fill_info = hostwide_fill_info, 192 + .refresh_info = hostwide_refresh_info, 193 + }; 194 + 195 + static int kvmppc_init_hostwide(void) 196 + { 197 + int rc = 0; 198 + unsigned long flags; 199 + 200 + spin_lock_irqsave(&lock_l0_stats, flags); 201 + 202 + /* already registered ? */ 203 + if (gsm_l0_stats) { 204 + rc = 0; 205 + goto out; 206 + } 207 + 208 + /* setup the Guest state message/buffer to talk to L0 */ 209 + gsm_l0_stats = kvmppc_gsm_new(&gsb_ops_l0_stats, &l0_stats, 210 + GSM_SEND, GFP_KERNEL); 211 + if (!gsm_l0_stats) { 212 + rc = -ENOMEM; 213 + goto out; 214 + } 215 + 216 + /* Populate the Idents */ 217 + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP); 218 + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_HEAP_MAX); 219 + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE); 220 + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX); 221 + kvmppc_gsm_include(gsm_l0_stats, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM); 222 + 223 + /* allocate GSB. Guest/Vcpu Id is ignored */ 224 + gsb_l0_stats = kvmppc_gsb_new(kvmppc_gsm_size(gsm_l0_stats), 0, 0, 225 + GFP_KERNEL); 226 + if (!gsb_l0_stats) { 227 + rc = -ENOMEM; 228 + goto out; 229 + } 230 + 231 + /* ask the ops to fill in the info */ 232 + rc = kvmppc_gsm_fill_info(gsm_l0_stats, gsb_l0_stats); 233 + 234 + out: 235 + if (rc) { 236 + if (gsm_l0_stats) 237 + kvmppc_gsm_free(gsm_l0_stats); 238 + if (gsb_l0_stats) 239 + kvmppc_gsb_free(gsb_l0_stats); 240 + gsm_l0_stats = NULL; 241 + gsb_l0_stats = NULL; 242 + } 243 + spin_unlock_irqrestore(&lock_l0_stats, flags); 244 + return rc; 245 + } 246 + 247 + static void kvmppc_cleanup_hostwide(void) 248 + { 249 + unsigned long flags; 250 + 251 + spin_lock_irqsave(&lock_l0_stats, flags); 252 + 253 + if (gsm_l0_stats) 254 + kvmppc_gsm_free(gsm_l0_stats); 255 + if (gsb_l0_stats) 256 + kvmppc_gsb_free(gsb_l0_stats); 257 + gsm_l0_stats = NULL; 258 + gsb_l0_stats = NULL; 259 + 260 + spin_unlock_irqrestore(&lock_l0_stats, flags); 261 + } 262 + 124 263 /* L1 wide counters PMU */ 125 264 static struct pmu kvmppc_pmu = { 126 265 .module = THIS_MODULE, ··· 310 109 311 110 /* only support events for nestedv2 right now */ 312 111 if (kvmhv_is_nestedv2()) { 112 + rc = kvmppc_init_hostwide(); 113 + if (rc) 114 + goto out; 115 + 313 116 /* Register the pmu */ 314 117 rc = perf_pmu_register(&kvmppc_pmu, kvmppc_pmu.name, -1); 315 118 if (rc) ··· 329 124 static void __exit kvmppc_unregister_pmu(void) 330 125 { 331 126 if (kvmhv_is_nestedv2()) { 127 + kvmppc_cleanup_hostwide(); 128 + 332 129 if (kvmppc_pmu.type != -1) 333 130 perf_pmu_unregister(&kvmppc_pmu); 334 131