Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/kvm-hv-pmu: Add perf-events for Hostwide counters

Update 'kvm-hv-pmu.c' to add five new perf-events mapped to the five
Hostwide counters. Since these newly introduced perf events are at system
wide scope and can be read from any L1-Lpar CPU, 'kvmppc_pmu' scope and
capabilities are updated appropriately.

Also introduce two new helpers. First is kvmppc_update_l0_stats() that uses
the infrastructure introduced in previous patches to issues the
H_GUEST_GET_STATE hcall L0-PowerVM to fetch guest-state-buffer holding the
latest values of these counters which is then parsed and 'l0_stats'
variable updated.

Second helper is kvmppc_pmu_event_update() which is called from
'kvmppv_pmu' callbacks and uses kvmppc_update_l0_stats() to update
'l0_stats' and the update the 'struct perf_event's event-counter.

Some minor updates to kvmppc_pmu_{add, del, read}() to remove some debug
scaffolding code.

Signed-off-by: Vaibhav Jain <vaibhav@linux.ibm.com>
Reviewed-by: Athira Rajeev <atrajeev@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250416162740.93143-7-vaibhav@linux.ibm.com

authored by

Vaibhav Jain and committed by
Madhavan Srinivasan
02a1324b f0c9c49c

+91 -1
+91 -1
arch/powerpc/perf/kvm-hv-pmu.c
··· 30 30 #include "asm/guest-state-buffer.h" 31 31 32 32 enum kvmppc_pmu_eventid { 33 + KVMPPC_EVENT_HOST_HEAP, 34 + KVMPPC_EVENT_HOST_HEAP_MAX, 35 + KVMPPC_EVENT_HOST_PGTABLE, 36 + KVMPPC_EVENT_HOST_PGTABLE_MAX, 37 + KVMPPC_EVENT_HOST_PGTABLE_RECLAIM, 33 38 KVMPPC_EVENT_MAX, 34 39 }; 35 40 ··· 66 61 /* GSB related structs needed to talk to L0 */ 67 62 static struct kvmppc_gs_msg *gsm_l0_stats; 68 63 static struct kvmppc_gs_buff *gsb_l0_stats; 64 + static struct kvmppc_gs_parser gsp_l0_stats; 69 65 70 66 static struct attribute *kvmppc_pmu_events_attr[] = { 67 + KVMPPC_PMU_EVENT_ATTR(host_heap, KVMPPC_EVENT_HOST_HEAP), 68 + KVMPPC_PMU_EVENT_ATTR(host_heap_max, KVMPPC_EVENT_HOST_HEAP_MAX), 69 + KVMPPC_PMU_EVENT_ATTR(host_pagetable, KVMPPC_EVENT_HOST_PGTABLE), 70 + KVMPPC_PMU_EVENT_ATTR(host_pagetable_max, KVMPPC_EVENT_HOST_PGTABLE_MAX), 71 + KVMPPC_PMU_EVENT_ATTR(host_pagetable_reclaim, KVMPPC_EVENT_HOST_PGTABLE_RECLAIM), 71 72 NULL, 72 73 }; 73 74 ··· 82 71 .attrs = kvmppc_pmu_events_attr, 83 72 }; 84 73 85 - PMU_FORMAT_ATTR(event, "config:0"); 74 + PMU_FORMAT_ATTR(event, "config:0-5"); 86 75 static struct attribute *kvmppc_pmu_format_attr[] = { 87 76 &format_attr_event.attr, 88 77 NULL, ··· 98 87 &kvmppc_pmu_format_group, 99 88 NULL, 100 89 }; 90 + 91 + /* 92 + * Issue the hcall to get the L0-host stats. 93 + * Should be called with l0-stat lock held 94 + */ 95 + static int kvmppc_update_l0_stats(void) 96 + { 97 + int rc; 98 + 99 + /* With HOST_WIDE flags guestid and vcpuid will be ignored */ 100 + rc = kvmppc_gsb_recv(gsb_l0_stats, KVMPPC_GS_FLAGS_HOST_WIDE); 101 + if (rc) 102 + goto out; 103 + 104 + /* Parse the guest state buffer is successful */ 105 + rc = kvmppc_gse_parse(&gsp_l0_stats, gsb_l0_stats); 106 + if (rc) 107 + goto out; 108 + 109 + /* Update the l0 returned stats*/ 110 + memset(&l0_stats, 0, sizeof(l0_stats)); 111 + rc = kvmppc_gsm_refresh_info(gsm_l0_stats, gsb_l0_stats); 112 + 113 + out: 114 + return rc; 115 + } 116 + 117 + /* Update the value of the given perf_event */ 118 + static int kvmppc_pmu_event_update(struct perf_event *event) 119 + { 120 + int rc; 121 + u64 curr_val, prev_val; 122 + unsigned long flags; 123 + unsigned int config = event->attr.config; 124 + 125 + /* Ensure no one else is modifying the l0_stats */ 126 + spin_lock_irqsave(&lock_l0_stats, flags); 127 + 128 + rc = kvmppc_update_l0_stats(); 129 + if (!rc) { 130 + switch (config) { 131 + case KVMPPC_EVENT_HOST_HEAP: 132 + curr_val = l0_stats.guest_heap; 133 + break; 134 + case KVMPPC_EVENT_HOST_HEAP_MAX: 135 + curr_val = l0_stats.guest_heap_max; 136 + break; 137 + case KVMPPC_EVENT_HOST_PGTABLE: 138 + curr_val = l0_stats.guest_pgtable_size; 139 + break; 140 + case KVMPPC_EVENT_HOST_PGTABLE_MAX: 141 + curr_val = l0_stats.guest_pgtable_size_max; 142 + break; 143 + case KVMPPC_EVENT_HOST_PGTABLE_RECLAIM: 144 + curr_val = l0_stats.guest_pgtable_reclaim; 145 + break; 146 + default: 147 + rc = -ENOENT; 148 + break; 149 + } 150 + } 151 + 152 + spin_unlock_irqrestore(&lock_l0_stats, flags); 153 + 154 + /* If no error than update the perf event */ 155 + if (!rc) { 156 + prev_val = local64_xchg(&event->hw.prev_count, curr_val); 157 + if (curr_val > prev_val) 158 + local64_add(curr_val - prev_val, &event->count); 159 + } 160 + 161 + return rc; 162 + } 101 163 102 164 static int kvmppc_pmu_event_init(struct perf_event *event) 103 165 { ··· 194 110 195 111 static void kvmppc_pmu_del(struct perf_event *event, int flags) 196 112 { 113 + kvmppc_pmu_event_update(event); 197 114 } 198 115 199 116 static int kvmppc_pmu_add(struct perf_event *event, int flags) 200 117 { 118 + if (flags & PERF_EF_START) 119 + return kvmppc_pmu_event_update(event); 201 120 return 0; 202 121 } 203 122 204 123 static void kvmppc_pmu_read(struct perf_event *event) 205 124 { 125 + kvmppc_pmu_event_update(event); 206 126 } 207 127 208 128 /* Return the size of the needed guest state buffer */ ··· 390 302 .read = kvmppc_pmu_read, 391 303 .attr_groups = kvmppc_pmu_attr_groups, 392 304 .type = -1, 305 + .scope = PERF_PMU_SCOPE_SYS_WIDE, 306 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT, 393 307 }; 394 308 395 309 static int __init kvmppc_register_pmu(void)