Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

perf/x86/intel/uncore: Make package handling more robust

The package management code in uncore relies on package mapping being
available before a CPU is started. This changed with:

9d85eb9119f4 ("x86/smpboot: Make logical package management more robust")

because the ACPI/BIOS information turned out to be unreliable, but that
left uncore in broken state. This was not noticed because on a regular boot
all CPUs are online before uncore is initialized.

Move the allocation to the CPU online callback and simplify the hotplug
handling. At this point the package mapping is established and correct.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust")
Link: http://lkml.kernel.org/r/20170131230141.377156255@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Thomas Gleixner and committed by
Ingo Molnar
fff4b87e 1aa6cfd3

+91 -107
+91 -105
arch/x86/events/intel/uncore.c
··· 100 100 101 101 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 102 102 { 103 - return pmu->boxes[topology_logical_package_id(cpu)]; 103 + unsigned int pkgid = topology_logical_package_id(cpu); 104 + 105 + /* 106 + * The unsigned check also catches the '-1' return value for non 107 + * existent mappings in the topology map. 108 + */ 109 + return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; 104 110 } 105 111 106 112 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) ··· 1040 1034 } 1041 1035 } 1042 1036 1043 - static int uncore_cpu_dying(unsigned int cpu) 1044 - { 1045 - struct intel_uncore_type *type, **types = uncore_msr_uncores; 1046 - struct intel_uncore_pmu *pmu; 1047 - struct intel_uncore_box *box; 1048 - int i, pkg; 1049 - 1050 - pkg = topology_logical_package_id(cpu); 1051 - for (; *types; types++) { 1052 - type = *types; 1053 - pmu = type->pmus; 1054 - for (i = 0; i < type->num_boxes; i++, pmu++) { 1055 - box = pmu->boxes[pkg]; 1056 - if (box && atomic_dec_return(&box->refcnt) == 0) 1057 - uncore_box_exit(box); 1058 - } 1059 - } 1060 - return 0; 1061 - } 1062 - 1063 - static int uncore_cpu_starting(unsigned int cpu) 1064 - { 1065 - struct intel_uncore_type *type, **types = uncore_msr_uncores; 1066 - struct intel_uncore_pmu *pmu; 1067 - struct intel_uncore_box *box; 1068 - int i, pkg; 1069 - 1070 - pkg = topology_logical_package_id(cpu); 1071 - for (; *types; types++) { 1072 - type = *types; 1073 - pmu = type->pmus; 1074 - for (i = 0; i < type->num_boxes; i++, pmu++) { 1075 - box = pmu->boxes[pkg]; 1076 - if (!box) 1077 - continue; 1078 - /* The first cpu on a package activates the box */ 1079 - if (atomic_inc_return(&box->refcnt) == 1) 1080 - uncore_box_init(box); 1081 - } 1082 - } 1083 - 1084 - return 0; 1085 - } 1086 - 1087 - static int uncore_cpu_prepare(unsigned int cpu) 1088 - { 1089 - struct intel_uncore_type *type, **types = uncore_msr_uncores; 1090 - struct intel_uncore_pmu *pmu; 1091 - struct intel_uncore_box *box; 1092 - int i, pkg; 1093 - 1094 - pkg = topology_logical_package_id(cpu); 1095 - for (; *types; types++) { 1096 - type = *types; 1097 - pmu = type->pmus; 1098 - for (i = 0; i < type->num_boxes; i++, pmu++) { 1099 - if (pmu->boxes[pkg]) 1100 - continue; 1101 - /* First cpu of a package allocates the box */ 1102 - box = uncore_alloc_box(type, cpu_to_node(cpu)); 1103 - if (!box) 1104 - return -ENOMEM; 1105 - box->pmu = pmu; 1106 - box->pkgid = pkg; 1107 - pmu->boxes[pkg] = box; 1108 - } 1109 - } 1110 - return 0; 1111 - } 1112 - 1113 1037 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1114 1038 int new_cpu) 1115 1039 { ··· 1079 1143 1080 1144 static int uncore_event_cpu_offline(unsigned int cpu) 1081 1145 { 1082 - int target; 1146 + struct intel_uncore_type *type, **types = uncore_msr_uncores; 1147 + struct intel_uncore_pmu *pmu; 1148 + struct intel_uncore_box *box; 1149 + int i, pkg, target; 1083 1150 1084 1151 /* Check if exiting cpu is used for collecting uncore events */ 1085 1152 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1086 - return 0; 1087 - 1153 + goto unref; 1088 1154 /* Find a new cpu to collect uncore events */ 1089 1155 target = cpumask_any_but(topology_core_cpumask(cpu), cpu); 1090 1156 ··· 1098 1160 1099 1161 uncore_change_context(uncore_msr_uncores, cpu, target); 1100 1162 uncore_change_context(uncore_pci_uncores, cpu, target); 1163 + 1164 + unref: 1165 + /* Clear the references */ 1166 + pkg = topology_logical_package_id(cpu); 1167 + for (; *types; types++) { 1168 + type = *types; 1169 + pmu = type->pmus; 1170 + for (i = 0; i < type->num_boxes; i++, pmu++) { 1171 + box = pmu->boxes[pkg]; 1172 + if (box && atomic_dec_return(&box->refcnt) == 0) 1173 + uncore_box_exit(box); 1174 + } 1175 + } 1101 1176 return 0; 1177 + } 1178 + 1179 + static int allocate_boxes(struct intel_uncore_type **types, 1180 + unsigned int pkg, unsigned int cpu) 1181 + { 1182 + struct intel_uncore_box *box, *tmp; 1183 + struct intel_uncore_type *type; 1184 + struct intel_uncore_pmu *pmu; 1185 + LIST_HEAD(allocated); 1186 + int i; 1187 + 1188 + /* Try to allocate all required boxes */ 1189 + for (; *types; types++) { 1190 + type = *types; 1191 + pmu = type->pmus; 1192 + for (i = 0; i < type->num_boxes; i++, pmu++) { 1193 + if (pmu->boxes[pkg]) 1194 + continue; 1195 + box = uncore_alloc_box(type, cpu_to_node(cpu)); 1196 + if (!box) 1197 + goto cleanup; 1198 + box->pmu = pmu; 1199 + box->pkgid = pkg; 1200 + list_add(&box->active_list, &allocated); 1201 + } 1202 + } 1203 + /* Install them in the pmus */ 1204 + list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1205 + list_del_init(&box->active_list); 1206 + box->pmu->boxes[pkg] = box; 1207 + } 1208 + return 0; 1209 + 1210 + cleanup: 1211 + list_for_each_entry_safe(box, tmp, &allocated, active_list) { 1212 + list_del_init(&box->active_list); 1213 + kfree(box); 1214 + } 1215 + return -ENOMEM; 1102 1216 } 1103 1217 1104 1218 static int uncore_event_cpu_online(unsigned int cpu) 1105 1219 { 1106 - int target; 1220 + struct intel_uncore_type *type, **types = uncore_msr_uncores; 1221 + struct intel_uncore_pmu *pmu; 1222 + struct intel_uncore_box *box; 1223 + int i, ret, pkg, target; 1224 + 1225 + pkg = topology_logical_package_id(cpu); 1226 + ret = allocate_boxes(types, pkg, cpu); 1227 + if (ret) 1228 + return ret; 1229 + 1230 + for (; *types; types++) { 1231 + type = *types; 1232 + pmu = type->pmus; 1233 + for (i = 0; i < type->num_boxes; i++, pmu++) { 1234 + box = pmu->boxes[pkg]; 1235 + if (!box && atomic_inc_return(&box->refcnt) == 1) 1236 + uncore_box_init(box); 1237 + } 1238 + } 1107 1239 1108 1240 /* 1109 1241 * Check if there is an online cpu in the package ··· 1363 1355 if (cret && pret) 1364 1356 return -ENODEV; 1365 1357 1366 - /* 1367 - * Install callbacks. Core will call them for each online cpu. 1368 - * 1369 - * The first online cpu of each package allocates and takes 1370 - * the refcounts for all other online cpus in that package. 1371 - * If msrs are not enabled no allocation is required and 1372 - * uncore_cpu_prepare() is not called for each online cpu. 1373 - */ 1374 - if (!cret) { 1375 - ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, 1376 - "perf/x86/intel/uncore:prepare", 1377 - uncore_cpu_prepare, NULL); 1378 - if (ret) 1379 - goto err; 1380 - } else { 1381 - cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, 1382 - "perf/x86/intel/uncore:prepare", 1383 - uncore_cpu_prepare, NULL); 1384 - } 1385 - 1386 - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, 1387 - "perf/x86/uncore:starting", 1388 - uncore_cpu_starting, uncore_cpu_dying); 1389 - 1390 - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 1391 - "perf/x86/uncore:online", 1392 - uncore_event_cpu_online, uncore_event_cpu_offline); 1358 + /* Install hotplug callbacks to setup the targets for each package */ 1359 + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 1360 + "perf/x86/intel/uncore:online", 1361 + uncore_event_cpu_online, 1362 + uncore_event_cpu_offline); 1363 + if (ret) 1364 + goto err; 1393 1365 return 0; 1394 1366 1395 1367 err: ··· 1381 1393 1382 1394 static void __exit intel_uncore_exit(void) 1383 1395 { 1384 - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 1385 - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); 1386 - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); 1396 + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 1387 1397 uncore_types_exit(uncore_msr_uncores); 1388 1398 uncore_pci_exit(); 1389 1399 }
-2
include/linux/cpuhotplug.h
··· 8 8 CPUHP_CREATE_THREADS, 9 9 CPUHP_PERF_PREPARE, 10 10 CPUHP_PERF_X86_PREPARE, 11 - CPUHP_PERF_X86_UNCORE_PREP, 12 11 CPUHP_PERF_X86_AMD_UNCORE_PREP, 13 12 CPUHP_PERF_BFIN, 14 13 CPUHP_PERF_POWER, ··· 84 85 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 85 86 CPUHP_AP_IRQ_BCM2836_STARTING, 86 87 CPUHP_AP_ARM_MVEBU_COHERENCY, 87 - CPUHP_AP_PERF_X86_UNCORE_STARTING, 88 88 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 89 89 CPUHP_AP_PERF_X86_STARTING, 90 90 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,