Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-rmk/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into next/virt

* 'for-rmk/perf' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux:
ARM: perf: simplify __hw_perf_event_init err handling
ARM: perf: remove unnecessary checks for idx < 0
ARM: perf: handle armpmu_register failing
ARM: perf: don't pretend to support counting of L1I writes
ARM: perf: remove redundant NULL check on cpu_pmu
ARM: Use implementor and part defines from cputype.h
ARM: Define CPU part numbers and implementors

+75 -49
+33
arch/arm/include/asm/cputype.h
··· 64 64 #define read_cpuid_ext(reg) 0 65 65 #endif 66 66 67 + #define ARM_CPU_IMP_ARM 0x41 68 + #define ARM_CPU_IMP_INTEL 0x69 69 + 70 + #define ARM_CPU_PART_ARM1136 0xB360 71 + #define ARM_CPU_PART_ARM1156 0xB560 72 + #define ARM_CPU_PART_ARM1176 0xB760 73 + #define ARM_CPU_PART_ARM11MPCORE 0xB020 74 + #define ARM_CPU_PART_CORTEX_A8 0xC080 75 + #define ARM_CPU_PART_CORTEX_A9 0xC090 76 + #define ARM_CPU_PART_CORTEX_A5 0xC050 77 + #define ARM_CPU_PART_CORTEX_A15 0xC0F0 78 + #define ARM_CPU_PART_CORTEX_A7 0xC070 79 + 80 + #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 81 + #define ARM_CPU_XSCALE_ARCH_V1 0x2000 82 + #define ARM_CPU_XSCALE_ARCH_V2 0x4000 83 + #define ARM_CPU_XSCALE_ARCH_V3 0x6000 84 + 67 85 /* 68 86 * The CPU ID never changes at run time, so we might as well tell the 69 87 * compiler that it's constant. Use this function to read the CPU ID ··· 90 72 static inline unsigned int __attribute_const__ read_cpuid_id(void) 91 73 { 92 74 return read_cpuid(CPUID_ID); 75 + } 76 + 77 + static inline unsigned int __attribute_const__ read_cpuid_implementor(void) 78 + { 79 + return (read_cpuid_id() & 0xFF000000) >> 24; 80 + } 81 + 82 + static inline unsigned int __attribute_const__ read_cpuid_part_number(void) 83 + { 84 + return read_cpuid_id() & 0xFFF0; 85 + } 86 + 87 + static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) 88 + { 89 + return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; 93 90 } 94 91 95 92 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
+3 -13
arch/arm/kernel/perf_event.c
··· 149 149 static void 150 150 armpmu_read(struct perf_event *event) 151 151 { 152 - struct hw_perf_event *hwc = &event->hw; 153 - 154 - /* Don't read disabled counters! */ 155 - if (hwc->idx < 0) 156 - return; 157 - 158 152 armpmu_event_update(event); 159 153 } 160 154 ··· 200 206 struct pmu_hw_events *hw_events = armpmu->get_hw_events(); 201 207 struct hw_perf_event *hwc = &event->hw; 202 208 int idx = hwc->idx; 203 - 204 - WARN_ON(idx < 0); 205 209 206 210 armpmu_stop(event, PERF_EF_UPDATE); 207 211 hw_events->events[idx] = NULL; ··· 350 358 { 351 359 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 352 360 struct hw_perf_event *hwc = &event->hw; 353 - int mapping, err; 361 + int mapping; 354 362 355 363 mapping = armpmu->map_event(event); 356 364 ··· 399 407 local64_set(&hwc->period_left, hwc->sample_period); 400 408 } 401 409 402 - err = 0; 403 410 if (event->group_leader != event) { 404 - err = validate_group(event); 405 - if (err) 411 + if (validate_group(event) != 0); 406 412 return -EINVAL; 407 413 } 408 414 409 - return err; 415 + return 0; 410 416 } 411 417 412 418 static int armpmu_event_init(struct perf_event *event)
+27 -24
arch/arm/kernel/perf_event_cpu.c
··· 147 147 cpu_pmu->free_irq = cpu_pmu_free_irq; 148 148 149 149 /* Ensure the PMU has sane values out of reset. */ 150 - if (cpu_pmu && cpu_pmu->reset) 150 + if (cpu_pmu->reset) 151 151 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); 152 152 } 153 153 ··· 201 201 static int probe_current_pmu(struct arm_pmu *pmu) 202 202 { 203 203 int cpu = get_cpu(); 204 - unsigned long cpuid = read_cpuid_id(); 205 - unsigned long implementor = (cpuid & 0xFF000000) >> 24; 206 - unsigned long part_number = (cpuid & 0xFFF0); 204 + unsigned long implementor = read_cpuid_implementor(); 205 + unsigned long part_number = read_cpuid_part_number(); 207 206 int ret = -ENODEV; 208 207 209 208 pr_info("probing PMU on CPU %d\n", cpu); 210 209 211 210 /* ARM Ltd CPUs. */ 212 - if (0x41 == implementor) { 211 + if (implementor == ARM_CPU_IMP_ARM) { 213 212 switch (part_number) { 214 - case 0xB360: /* ARM1136 */ 215 - case 0xB560: /* ARM1156 */ 216 - case 0xB760: /* ARM1176 */ 213 + case ARM_CPU_PART_ARM1136: 214 + case ARM_CPU_PART_ARM1156: 215 + case ARM_CPU_PART_ARM1176: 217 216 ret = armv6pmu_init(pmu); 218 217 break; 219 - case 0xB020: /* ARM11mpcore */ 218 + case ARM_CPU_PART_ARM11MPCORE: 220 219 ret = armv6mpcore_pmu_init(pmu); 221 220 break; 222 - case 0xC080: /* Cortex-A8 */ 221 + case ARM_CPU_PART_CORTEX_A8: 223 222 ret = armv7_a8_pmu_init(pmu); 224 223 break; 225 - case 0xC090: /* Cortex-A9 */ 224 + case ARM_CPU_PART_CORTEX_A9: 226 225 ret = armv7_a9_pmu_init(pmu); 227 226 break; 228 - case 0xC050: /* Cortex-A5 */ 227 + case ARM_CPU_PART_CORTEX_A5: 229 228 ret = armv7_a5_pmu_init(pmu); 230 229 break; 231 - case 0xC0F0: /* Cortex-A15 */ 230 + case ARM_CPU_PART_CORTEX_A15: 232 231 ret = armv7_a15_pmu_init(pmu); 233 232 break; 234 - case 0xC070: /* Cortex-A7 */ 233 + case ARM_CPU_PART_CORTEX_A7: 235 234 ret = armv7_a7_pmu_init(pmu); 236 235 break; 237 236 } 238 237 /* Intel CPUs [xscale]. */ 239 - } else if (0x69 == implementor) { 240 - part_number = (cpuid >> 13) & 0x7; 241 - switch (part_number) { 242 - case 1: 238 + } else if (implementor == ARM_CPU_IMP_INTEL) { 239 + switch (xscale_cpu_arch_version()) { 240 + case ARM_CPU_XSCALE_ARCH_V1: 243 241 ret = xscale1pmu_init(pmu); 244 242 break; 245 - case 2: 243 + case ARM_CPU_XSCALE_ARCH_V2: 246 244 ret = xscale2pmu_init(pmu); 247 245 break; 248 246 } ··· 277 279 } 278 280 279 281 if (ret) { 280 - pr_info("failed to register PMU devices!"); 281 - kfree(pmu); 282 - return ret; 282 + pr_info("failed to probe PMU!"); 283 + goto out_free; 283 284 } 284 285 285 286 cpu_pmu = pmu; 286 287 cpu_pmu->plat_device = pdev; 287 288 cpu_pmu_init(cpu_pmu); 288 - armpmu_register(cpu_pmu, PERF_TYPE_RAW); 289 + ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); 289 290 290 - return 0; 291 + if (!ret) 292 + return 0; 293 + 294 + out_free: 295 + pr_info("failed to register PMU devices!"); 296 + kfree(pmu); 297 + return ret; 291 298 } 292 299 293 300 static struct platform_driver cpu_pmu_driver = {
+2 -2
arch/arm/kernel/perf_event_v6.c
··· 106 106 }, 107 107 [C(OP_WRITE)] = { 108 108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 109 - [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, 109 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 110 110 }, 111 111 [C(OP_PREFETCH)] = { 112 112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, ··· 259 259 }, 260 260 [C(OP_WRITE)] = { 261 261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 262 - [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, 262 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 263 263 }, 264 264 [C(OP_PREFETCH)] = { 265 265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+9 -9
arch/arm/kernel/perf_event_v7.c
··· 157 157 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 158 158 }, 159 159 [C(OP_WRITE)] = { 160 - [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, 161 - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 160 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 161 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 162 162 }, 163 163 [C(OP_PREFETCH)] = { 164 164 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, ··· 282 282 }, 283 283 [C(OP_WRITE)] = { 284 284 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 285 - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 285 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 286 286 }, 287 287 [C(OP_PREFETCH)] = { 288 288 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, ··· 399 399 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 400 400 }, 401 401 [C(OP_WRITE)] = { 402 - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 403 - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 402 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 403 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 404 404 }, 405 405 /* 406 406 * The prefetch counters don't differentiate between the I ··· 527 527 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 528 528 }, 529 529 [C(OP_WRITE)] = { 530 - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 531 - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 530 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 531 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 532 532 }, 533 533 [C(OP_PREFETCH)] = { 534 534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, ··· 651 651 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 652 652 }, 653 653 [C(OP_WRITE)] = { 654 - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, 655 - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, 654 + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 655 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 656 656 }, 657 657 [C(OP_PREFETCH)] = { 658 658 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+1 -1
arch/arm/kernel/perf_event_xscale.c
··· 83 83 }, 84 84 [C(OP_WRITE)] = { 85 85 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 86 - [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, 86 + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 87 87 }, 88 88 [C(OP_PREFETCH)] = { 89 89 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,