Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit 'kumar/next' into merge

+1045 -174
+2 -2
arch/powerpc/boot/dts/gef_ppc9a.dts
··· 1 1 /* 2 - * GE Fanuc PPC9A Device Tree Source 2 + * GE PPC9A Device Tree Source 3 3 * 4 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 4 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify it 7 7 * under the terms of the GNU General Public License as published by the
+2 -2
arch/powerpc/boot/dts/gef_sbc310.dts
··· 1 1 /* 2 - * GE Fanuc SBC310 Device Tree Source 2 + * GE SBC310 Device Tree Source 3 3 * 4 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 4 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify it 7 7 * under the terms of the GNU General Public License as published by the
+2 -2
arch/powerpc/boot/dts/gef_sbc610.dts
··· 1 1 /* 2 - * GE Fanuc SBC610 Device Tree Source 2 + * GE SBC610 Device Tree Source 3 3 * 4 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 4 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 5 5 * 6 6 * This program is free software; you can redistribute it and/or modify it 7 7 * under the terms of the GNU General Public License as published by the
+11 -98
arch/powerpc/include/asm/perf_event.h
··· 1 1 /* 2 - * Performance event support - PowerPC-specific definitions. 2 + * Performance event support - hardware-specific disambiguation 3 3 * 4 - * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 4 + * For now this is a compile-time decision, but eventually it should be 5 + * runtime. This would allow multiplatform perf event support for e300 (fsl 6 + * embedded perf counters) plus server/classic, and would accommodate 7 + * devices other than the core which provide their own performance counters. 8 + * 9 + * Copyright 2010 Freescale Semiconductor, Inc. 5 10 * 6 11 * This program is free software; you can redistribute it and/or 7 12 * modify it under the terms of the GNU General Public License 8 13 * as published by the Free Software Foundation; either version 9 14 * 2 of the License, or (at your option) any later version. 10 15 */ 11 - #include <linux/types.h> 12 16 13 - #include <asm/hw_irq.h> 14 - 15 - #define MAX_HWEVENTS 8 16 - #define MAX_EVENT_ALTERNATIVES 8 17 - #define MAX_LIMITED_HWCOUNTERS 2 18 - 19 - /* 20 - * This struct provides the constants and functions needed to 21 - * describe the PMU on a particular POWER-family CPU. 22 - */ 23 - struct power_pmu { 24 - const char *name; 25 - int n_counter; 26 - int max_alternatives; 27 - unsigned long add_fields; 28 - unsigned long test_adder; 29 - int (*compute_mmcr)(u64 events[], int n_ev, 30 - unsigned int hwc[], unsigned long mmcr[]); 31 - int (*get_constraint)(u64 event_id, unsigned long *mskp, 32 - unsigned long *valp); 33 - int (*get_alternatives)(u64 event_id, unsigned int flags, 34 - u64 alt[]); 35 - void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 36 - int (*limited_pmc_event)(u64 event_id); 37 - u32 flags; 38 - int n_generic; 39 - int *generic_events; 40 - int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 41 - [PERF_COUNT_HW_CACHE_OP_MAX] 42 - [PERF_COUNT_HW_CACHE_RESULT_MAX]; 43 - }; 44 - 45 - /* 46 - * Values for power_pmu.flags 47 - */ 48 - #define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ 49 - #define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ 50 - 51 - /* 52 - * Values for flags to get_alternatives() 53 - */ 54 - #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ 55 - #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 56 - #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 57 - 58 - extern int register_power_pmu(struct power_pmu *); 59 - 60 - struct pt_regs; 61 - extern unsigned long perf_misc_flags(struct pt_regs *regs); 62 - extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 63 - 64 - #define PERF_EVENT_INDEX_OFFSET 1 65 - 66 - /* 67 - * Only override the default definitions in include/linux/perf_event.h 68 - * if we have hardware PMU support. 69 - */ 70 17 #ifdef CONFIG_PPC_PERF_CTRS 71 - #define perf_misc_flags(regs) perf_misc_flags(regs) 18 + #include <asm/perf_event_server.h> 72 19 #endif 73 20 74 - /* 75 - * The power_pmu.get_constraint function returns a 32/64-bit value and 76 - * a 32/64-bit mask that express the constraints between this event_id and 77 - * other events. 78 - * 79 - * The value and mask are divided up into (non-overlapping) bitfields 80 - * of three different types: 81 - * 82 - * Select field: this expresses the constraint that some set of bits 83 - * in MMCR* needs to be set to a specific value for this event_id. For a 84 - * select field, the mask contains 1s in every bit of the field, and 85 - * the value contains a unique value for each possible setting of the 86 - * MMCR* bits. The constraint checking code will ensure that two events 87 - * that set the same field in their masks have the same value in their 88 - * value dwords. 89 - * 90 - * Add field: this expresses the constraint that there can be at most 91 - * N events in a particular class. A field of k bits can be used for 92 - * N <= 2^(k-1) - 1. The mask has the most significant bit of the field 93 - * set (and the other bits 0), and the value has only the least significant 94 - * bit of the field set. In addition, the 'add_fields' and 'test_adder' 95 - * in the struct power_pmu for this processor come into play. The 96 - * add_fields value contains 1 in the LSB of the field, and the 97 - * test_adder contains 2^(k-1) - 1 - N in the field. 98 - * 99 - * NAND field: this expresses the constraint that you may not have events 100 - * in all of a set of classes. (For example, on PPC970, you can't select 101 - * events from the FPU, ISU and IDU simultaneously, although any two are 102 - * possible.) For N classes, the field is N+1 bits wide, and each class 103 - * is assigned one bit from the least-significant N bits. The mask has 104 - * only the most-significant bit set, and the value has only the bit 105 - * for the event_id's class set. The test_adder has the least significant 106 - * bit set in the field. 107 - * 108 - * If an event_id is not subject to the constraint expressed by a particular 109 - * field, then it will have 0 in both the mask and value for that field. 110 - */ 21 + #ifdef CONFIG_FSL_EMB_PERF_EVENT 22 + #include <asm/perf_event_fsl_emb.h> 23 + #endif
+50
arch/powerpc/include/asm/perf_event_fsl_emb.h
··· 1 + /* 2 + * Performance event support - Freescale embedded specific definitions. 3 + * 4 + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 + * Copyright 2010 Freescale Semiconductor, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #include <linux/types.h> 14 + #include <asm/hw_irq.h> 15 + 16 + #define MAX_HWEVENTS 4 17 + 18 + /* event flags */ 19 + #define FSL_EMB_EVENT_VALID 1 20 + #define FSL_EMB_EVENT_RESTRICTED 2 21 + 22 + /* upper half of event flags is PMLCb */ 23 + #define FSL_EMB_EVENT_THRESHMUL 0x0000070000000000ULL 24 + #define FSL_EMB_EVENT_THRESH 0x0000003f00000000ULL 25 + 26 + struct fsl_emb_pmu { 27 + const char *name; 28 + int n_counter; /* total number of counters */ 29 + 30 + /* 31 + * The number of contiguous counters starting at zero that 32 + * can hold restricted events, or zero if there are no 33 + * restricted events. 34 + * 35 + * This isn't a very flexible method of expressing constraints, 36 + * but it's very simple and is adequate for existing chips. 37 + */ 38 + int n_restricted; 39 + 40 + /* Returns event flags and PMLCb (FSL_EMB_EVENT_*) */ 41 + u64 (*xlate_event)(u64 event_id); 42 + 43 + int n_generic; 44 + int *generic_events; 45 + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 46 + [PERF_COUNT_HW_CACHE_OP_MAX] 47 + [PERF_COUNT_HW_CACHE_RESULT_MAX]; 48 + }; 49 + 50 + int register_fsl_emb_pmu(struct fsl_emb_pmu *);
+110
arch/powerpc/include/asm/perf_event_server.h
··· 1 + /* 2 + * Performance event support - PowerPC classic/server specific definitions. 3 + * 4 + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the License, or (at your option) any later version. 10 + */ 11 + 12 + #include <linux/types.h> 13 + #include <asm/hw_irq.h> 14 + 15 + #define MAX_HWEVENTS 8 16 + #define MAX_EVENT_ALTERNATIVES 8 17 + #define MAX_LIMITED_HWCOUNTERS 2 18 + 19 + /* 20 + * This struct provides the constants and functions needed to 21 + * describe the PMU on a particular POWER-family CPU. 22 + */ 23 + struct power_pmu { 24 + const char *name; 25 + int n_counter; 26 + int max_alternatives; 27 + unsigned long add_fields; 28 + unsigned long test_adder; 29 + int (*compute_mmcr)(u64 events[], int n_ev, 30 + unsigned int hwc[], unsigned long mmcr[]); 31 + int (*get_constraint)(u64 event_id, unsigned long *mskp, 32 + unsigned long *valp); 33 + int (*get_alternatives)(u64 event_id, unsigned int flags, 34 + u64 alt[]); 35 + void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 36 + int (*limited_pmc_event)(u64 event_id); 37 + u32 flags; 38 + int n_generic; 39 + int *generic_events; 40 + int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 41 + [PERF_COUNT_HW_CACHE_OP_MAX] 42 + [PERF_COUNT_HW_CACHE_RESULT_MAX]; 43 + }; 44 + 45 + /* 46 + * Values for power_pmu.flags 47 + */ 48 + #define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ 49 + #define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ 50 + 51 + /* 52 + * Values for flags to get_alternatives() 53 + */ 54 + #define PPMU_LIMITED_PMC_OK 1 /* can put this on a limited PMC */ 55 + #define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */ 56 + #define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */ 57 + 58 + extern int register_power_pmu(struct power_pmu *); 59 + 60 + struct pt_regs; 61 + extern unsigned long perf_misc_flags(struct pt_regs *regs); 62 + extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 63 + 64 + #define PERF_EVENT_INDEX_OFFSET 1 65 + 66 + /* 67 + * Only override the default definitions in include/linux/perf_event.h 68 + * if we have hardware PMU support. 69 + */ 70 + #ifdef CONFIG_PPC_PERF_CTRS 71 + #define perf_misc_flags(regs) perf_misc_flags(regs) 72 + #endif 73 + 74 + /* 75 + * The power_pmu.get_constraint function returns a 32/64-bit value and 76 + * a 32/64-bit mask that express the constraints between this event_id and 77 + * other events. 78 + * 79 + * The value and mask are divided up into (non-overlapping) bitfields 80 + * of three different types: 81 + * 82 + * Select field: this expresses the constraint that some set of bits 83 + * in MMCR* needs to be set to a specific value for this event_id. For a 84 + * select field, the mask contains 1s in every bit of the field, and 85 + * the value contains a unique value for each possible setting of the 86 + * MMCR* bits. The constraint checking code will ensure that two events 87 + * that set the same field in their masks have the same value in their 88 + * value dwords. 89 + * 90 + * Add field: this expresses the constraint that there can be at most 91 + * N events in a particular class. A field of k bits can be used for 92 + * N <= 2^(k-1) - 1. The mask has the most significant bit of the field 93 + * set (and the other bits 0), and the value has only the least significant 94 + * bit of the field set. In addition, the 'add_fields' and 'test_adder' 95 + * in the struct power_pmu for this processor come into play. The 96 + * add_fields value contains 1 in the LSB of the field, and the 97 + * test_adder contains 2^(k-1) - 1 - N in the field. 98 + * 99 + * NAND field: this expresses the constraint that you may not have events 100 + * in all of a set of classes. (For example, on PPC970, you can't select 101 + * events from the FPU, ISU and IDU simultaneously, although any two are 102 + * possible.) For N classes, the field is N+1 bits wide, and each class 103 + * is assigned one bit from the least-significant N bits. The mask has 104 + * only the most-significant bit set, and the value has only the bit 105 + * for the event_id's class set. The test_adder has the least significant 106 + * bit set in the field. 107 + * 108 + * If an event_id is not subject to the constraint expressed by a particular 109 + * field, then it will have 0 in both the mask and value for that field. 110 + */
+1 -1
arch/powerpc/include/asm/reg_fsl_emb.h
··· 31 31 #define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ 32 32 #define PMLCA_CE 0x04000000 /* Condition Enable */ 33 33 34 - #define PMLCA_EVENT_MASK 0x007f0000 /* Event field */ 34 + #define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */ 35 35 #define PMLCA_EVENT_SHIFT 16 36 36 37 37 #define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
+6 -1
arch/powerpc/kernel/Makefile
··· 98 98 99 99 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 100 100 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 101 - obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o 101 + obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 102 + 103 + obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o 102 104 obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 103 105 power5+-pmu.o power6-pmu.o power7-pmu.o 104 106 obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 107 + 108 + obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o 109 + obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o 105 110 106 111 obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 107 112
+1 -1
arch/powerpc/kernel/cputable.c
··· 1808 1808 .icache_bsize = 64, 1809 1809 .dcache_bsize = 64, 1810 1810 .num_pmcs = 4, 1811 - .oprofile_cpu_type = "ppc/e500", /* xxx - galak, e500mc? */ 1811 + .oprofile_cpu_type = "ppc/e500mc", 1812 1812 .oprofile_type = PPC_OPROFILE_FSL_EMB, 1813 1813 .cpu_setup = __setup_cpu_e500mc, 1814 1814 .machine_check = machine_check_e500,
+129
arch/powerpc/kernel/e500-pmu.c
··· 1 + /* 2 + * Performance counter support for e500 family processors. 3 + * 4 + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 + * Copyright 2010 Freescale Semiconductor, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + #include <linux/string.h> 13 + #include <linux/perf_event.h> 14 + #include <asm/reg.h> 15 + #include <asm/cputable.h> 16 + 17 + /* 18 + * Map of generic hardware event types to hardware events 19 + * Zero if unsupported 20 + */ 21 + static int e500_generic_events[] = { 22 + [PERF_COUNT_HW_CPU_CYCLES] = 1, 23 + [PERF_COUNT_HW_INSTRUCTIONS] = 2, 24 + [PERF_COUNT_HW_CACHE_MISSES] = 41, /* Data L1 cache reloads */ 25 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12, 26 + [PERF_COUNT_HW_BRANCH_MISSES] = 15, 27 + }; 28 + 29 + #define C(x) PERF_COUNT_HW_CACHE_##x 30 + 31 + /* 32 + * Table of generalized cache-related events. 33 + * 0 means not supported, -1 means nonsensical, other values 34 + * are event codes. 35 + */ 36 + static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 37 + /* 38 + * D-cache misses are not split into read/write/prefetch; 39 + * use raw event 41. 40 + */ 41 + [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ 42 + [C(OP_READ)] = { 27, 0 }, 43 + [C(OP_WRITE)] = { 28, 0 }, 44 + [C(OP_PREFETCH)] = { 29, 0 }, 45 + }, 46 + [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ 47 + [C(OP_READ)] = { 2, 60 }, 48 + [C(OP_WRITE)] = { -1, -1 }, 49 + [C(OP_PREFETCH)] = { 0, 0 }, 50 + }, 51 + /* 52 + * Assuming LL means L2, it's not a good match for this model. 53 + * It allocates only on L1 castout or explicit prefetch, and 54 + * does not have separate read/write events (but it does have 55 + * separate instruction/data events). 56 + */ 57 + [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ 58 + [C(OP_READ)] = { 0, 0 }, 59 + [C(OP_WRITE)] = { 0, 0 }, 60 + [C(OP_PREFETCH)] = { 0, 0 }, 61 + }, 62 + /* 63 + * There are data/instruction MMU misses, but that's a miss on 64 + * the chip's internal level-one TLB which is probably not 65 + * what the user wants. Instead, unified level-two TLB misses 66 + * are reported here. 67 + */ 68 + [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ 69 + [C(OP_READ)] = { 26, 66 }, 70 + [C(OP_WRITE)] = { -1, -1 }, 71 + [C(OP_PREFETCH)] = { -1, -1 }, 72 + }, 73 + [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ 74 + [C(OP_READ)] = { 12, 15 }, 75 + [C(OP_WRITE)] = { -1, -1 }, 76 + [C(OP_PREFETCH)] = { -1, -1 }, 77 + }, 78 + }; 79 + 80 + static int num_events = 128; 81 + 82 + /* Upper half of event id is PMLCb, for threshold events */ 83 + static u64 e500_xlate_event(u64 event_id) 84 + { 85 + u32 event_low = (u32)event_id; 86 + u64 ret; 87 + 88 + if (event_low >= num_events) 89 + return 0; 90 + 91 + ret = FSL_EMB_EVENT_VALID; 92 + 93 + if (event_low >= 76 && event_low <= 81) { 94 + ret |= FSL_EMB_EVENT_RESTRICTED; 95 + ret |= event_id & 96 + (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH); 97 + } else if (event_id & 98 + (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)) { 99 + /* Threshold requested on non-threshold event */ 100 + return 0; 101 + } 102 + 103 + return ret; 104 + } 105 + 106 + static struct fsl_emb_pmu e500_pmu = { 107 + .name = "e500 family", 108 + .n_counter = 4, 109 + .n_restricted = 2, 110 + .xlate_event = e500_xlate_event, 111 + .n_generic = ARRAY_SIZE(e500_generic_events), 112 + .generic_events = e500_generic_events, 113 + .cache_events = &e500_cache_events, 114 + }; 115 + 116 + static int init_e500_pmu(void) 117 + { 118 + if (!cur_cpu_spec->oprofile_cpu_type) 119 + return -ENODEV; 120 + 121 + if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc")) 122 + num_events = 256; 123 + else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500")) 124 + return -ENODEV; 125 + 126 + return register_fsl_emb_pmu(&e500_pmu); 127 + } 128 + 129 + arch_initcall(init_e500_pmu);
+654
arch/powerpc/kernel/perf_event_fsl_emb.c
··· 1 + /* 2 + * Performance event support - Freescale Embedded Performance Monitor 3 + * 4 + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 + * Copyright 2010 Freescale Semiconductor, Inc. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + #include <linux/kernel.h> 13 + #include <linux/sched.h> 14 + #include <linux/perf_event.h> 15 + #include <linux/percpu.h> 16 + #include <linux/hardirq.h> 17 + #include <asm/reg_fsl_emb.h> 18 + #include <asm/pmc.h> 19 + #include <asm/machdep.h> 20 + #include <asm/firmware.h> 21 + #include <asm/ptrace.h> 22 + 23 + struct cpu_hw_events { 24 + int n_events; 25 + int disabled; 26 + u8 pmcs_enabled; 27 + struct perf_event *event[MAX_HWEVENTS]; 28 + }; 29 + static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 30 + 31 + static struct fsl_emb_pmu *ppmu; 32 + 33 + /* Number of perf_events counting hardware events */ 34 + static atomic_t num_events; 35 + /* Used to avoid races in calling reserve/release_pmc_hardware */ 36 + static DEFINE_MUTEX(pmc_reserve_mutex); 37 + 38 + /* 39 + * If interrupts were soft-disabled when a PMU interrupt occurs, treat 40 + * it as an NMI. 41 + */ 42 + static inline int perf_intr_is_nmi(struct pt_regs *regs) 43 + { 44 + #ifdef __powerpc64__ 45 + return !regs->softe; 46 + #else 47 + return 0; 48 + #endif 49 + } 50 + 51 + static void perf_event_interrupt(struct pt_regs *regs); 52 + 53 + /* 54 + * Read one performance monitor counter (PMC). 55 + */ 56 + static unsigned long read_pmc(int idx) 57 + { 58 + unsigned long val; 59 + 60 + switch (idx) { 61 + case 0: 62 + val = mfpmr(PMRN_PMC0); 63 + break; 64 + case 1: 65 + val = mfpmr(PMRN_PMC1); 66 + break; 67 + case 2: 68 + val = mfpmr(PMRN_PMC2); 69 + break; 70 + case 3: 71 + val = mfpmr(PMRN_PMC3); 72 + break; 73 + default: 74 + printk(KERN_ERR "oops trying to read PMC%d\n", idx); 75 + val = 0; 76 + } 77 + return val; 78 + } 79 + 80 + /* 81 + * Write one PMC. 82 + */ 83 + static void write_pmc(int idx, unsigned long val) 84 + { 85 + switch (idx) { 86 + case 0: 87 + mtpmr(PMRN_PMC0, val); 88 + break; 89 + case 1: 90 + mtpmr(PMRN_PMC1, val); 91 + break; 92 + case 2: 93 + mtpmr(PMRN_PMC2, val); 94 + break; 95 + case 3: 96 + mtpmr(PMRN_PMC3, val); 97 + break; 98 + default: 99 + printk(KERN_ERR "oops trying to write PMC%d\n", idx); 100 + } 101 + 102 + isync(); 103 + } 104 + 105 + /* 106 + * Write one local control A register 107 + */ 108 + static void write_pmlca(int idx, unsigned long val) 109 + { 110 + switch (idx) { 111 + case 0: 112 + mtpmr(PMRN_PMLCA0, val); 113 + break; 114 + case 1: 115 + mtpmr(PMRN_PMLCA1, val); 116 + break; 117 + case 2: 118 + mtpmr(PMRN_PMLCA2, val); 119 + break; 120 + case 3: 121 + mtpmr(PMRN_PMLCA3, val); 122 + break; 123 + default: 124 + printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); 125 + } 126 + 127 + isync(); 128 + } 129 + 130 + /* 131 + * Write one local control B register 132 + */ 133 + static void write_pmlcb(int idx, unsigned long val) 134 + { 135 + switch (idx) { 136 + case 0: 137 + mtpmr(PMRN_PMLCB0, val); 138 + break; 139 + case 1: 140 + mtpmr(PMRN_PMLCB1, val); 141 + break; 142 + case 2: 143 + mtpmr(PMRN_PMLCB2, val); 144 + break; 145 + case 3: 146 + mtpmr(PMRN_PMLCB3, val); 147 + break; 148 + default: 149 + printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); 150 + } 151 + 152 + isync(); 153 + } 154 + 155 + static void fsl_emb_pmu_read(struct perf_event *event) 156 + { 157 + s64 val, delta, prev; 158 + 159 + /* 160 + * Performance monitor interrupts come even when interrupts 161 + * are soft-disabled, as long as interrupts are hard-enabled. 162 + * Therefore we treat them like NMIs. 163 + */ 164 + do { 165 + prev = atomic64_read(&event->hw.prev_count); 166 + barrier(); 167 + val = read_pmc(event->hw.idx); 168 + } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 169 + 170 + /* The counters are only 32 bits wide */ 171 + delta = (val - prev) & 0xfffffffful; 172 + atomic64_add(delta, &event->count); 173 + atomic64_sub(delta, &event->hw.period_left); 174 + } 175 + 176 + /* 177 + * Disable all events to prevent PMU interrupts and to allow 178 + * events to be added or removed. 179 + */ 180 + void hw_perf_disable(void) 181 + { 182 + struct cpu_hw_events *cpuhw; 183 + unsigned long flags; 184 + 185 + local_irq_save(flags); 186 + cpuhw = &__get_cpu_var(cpu_hw_events); 187 + 188 + if (!cpuhw->disabled) { 189 + cpuhw->disabled = 1; 190 + 191 + /* 192 + * Check if we ever enabled the PMU on this cpu. 193 + */ 194 + if (!cpuhw->pmcs_enabled) { 195 + ppc_enable_pmcs(); 196 + cpuhw->pmcs_enabled = 1; 197 + } 198 + 199 + if (atomic_read(&num_events)) { 200 + /* 201 + * Set the 'freeze all counters' bit, and disable 202 + * interrupts. The barrier is to make sure the 203 + * mtpmr has been executed and the PMU has frozen 204 + * the events before we return. 205 + */ 206 + 207 + mtpmr(PMRN_PMGC0, PMGC0_FAC); 208 + isync(); 209 + } 210 + } 211 + local_irq_restore(flags); 212 + } 213 + 214 + /* 215 + * Re-enable all events if disable == 0. 216 + * If we were previously disabled and events were added, then 217 + * put the new config on the PMU. 218 + */ 219 + void hw_perf_enable(void) 220 + { 221 + struct cpu_hw_events *cpuhw; 222 + unsigned long flags; 223 + 224 + local_irq_save(flags); 225 + cpuhw = &__get_cpu_var(cpu_hw_events); 226 + if (!cpuhw->disabled) 227 + goto out; 228 + 229 + cpuhw->disabled = 0; 230 + ppc_set_pmu_inuse(cpuhw->n_events != 0); 231 + 232 + if (cpuhw->n_events > 0) { 233 + mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); 234 + isync(); 235 + } 236 + 237 + out: 238 + local_irq_restore(flags); 239 + } 240 + 241 + static int collect_events(struct perf_event *group, int max_count, 242 + struct perf_event *ctrs[]) 243 + { 244 + int n = 0; 245 + struct perf_event *event; 246 + 247 + if (!is_software_event(group)) { 248 + if (n >= max_count) 249 + return -1; 250 + ctrs[n] = group; 251 + n++; 252 + } 253 + list_for_each_entry(event, &group->sibling_list, group_entry) { 254 + if (!is_software_event(event) && 255 + event->state != PERF_EVENT_STATE_OFF) { 256 + if (n >= max_count) 257 + return -1; 258 + ctrs[n] = event; 259 + n++; 260 + } 261 + } 262 + return n; 263 + } 264 + 265 + /* perf must be disabled, context locked on entry */ 266 + static int fsl_emb_pmu_enable(struct perf_event *event) 267 + { 268 + struct cpu_hw_events *cpuhw; 269 + int ret = -EAGAIN; 270 + int num_counters = ppmu->n_counter; 271 + u64 val; 272 + int i; 273 + 274 + cpuhw = &get_cpu_var(cpu_hw_events); 275 + 276 + if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) 277 + num_counters = ppmu->n_restricted; 278 + 279 + /* 280 + * Allocate counters from top-down, so that restricted-capable 281 + * counters are kept free as long as possible. 282 + */ 283 + for (i = num_counters - 1; i >= 0; i--) { 284 + if (cpuhw->event[i]) 285 + continue; 286 + 287 + break; 288 + } 289 + 290 + if (i < 0) 291 + goto out; 292 + 293 + event->hw.idx = i; 294 + cpuhw->event[i] = event; 295 + ++cpuhw->n_events; 296 + 297 + val = 0; 298 + if (event->hw.sample_period) { 299 + s64 left = atomic64_read(&event->hw.period_left); 300 + if (left < 0x80000000L) 301 + val = 0x80000000L - left; 302 + } 303 + atomic64_set(&event->hw.prev_count, val); 304 + write_pmc(i, val); 305 + perf_event_update_userpage(event); 306 + 307 + write_pmlcb(i, event->hw.config >> 32); 308 + write_pmlca(i, event->hw.config_base); 309 + 310 + ret = 0; 311 + out: 312 + put_cpu_var(cpu_hw_events); 313 + return ret; 314 + } 315 + 316 + /* perf must be disabled, context locked on entry */ 317 + static void fsl_emb_pmu_disable(struct perf_event *event) 318 + { 319 + struct cpu_hw_events *cpuhw; 320 + int i = event->hw.idx; 321 + 322 + if (i < 0) 323 + goto out; 324 + 325 + fsl_emb_pmu_read(event); 326 + 327 + cpuhw = &get_cpu_var(cpu_hw_events); 328 + 329 + WARN_ON(event != cpuhw->event[event->hw.idx]); 330 + 331 + write_pmlca(i, 0); 332 + write_pmlcb(i, 0); 333 + write_pmc(i, 0); 334 + 335 + cpuhw->event[i] = NULL; 336 + event->hw.idx = -1; 337 + 338 + /* 339 + * TODO: if at least one restricted event exists, and we 340 + * just freed up a non-restricted-capable counter, and 341 + * there is a restricted-capable counter occupied by 342 + * a non-restricted event, migrate that event to the 343 + * vacated counter. 344 + */ 345 + 346 + cpuhw->n_events--; 347 + 348 + out: 349 + put_cpu_var(cpu_hw_events); 350 + } 351 + 352 + /* 353 + * Re-enable interrupts on a event after they were throttled 354 + * because they were coming too fast. 355 + * 356 + * Context is locked on entry, but perf is not disabled. 357 + */ 358 + static void fsl_emb_pmu_unthrottle(struct perf_event *event) 359 + { 360 + s64 val, left; 361 + unsigned long flags; 362 + 363 + if (event->hw.idx < 0 || !event->hw.sample_period) 364 + return; 365 + local_irq_save(flags); 366 + perf_disable(); 367 + fsl_emb_pmu_read(event); 368 + left = event->hw.sample_period; 369 + event->hw.last_period = left; 370 + val = 0; 371 + if (left < 0x80000000L) 372 + val = 0x80000000L - left; 373 + write_pmc(event->hw.idx, val); 374 + atomic64_set(&event->hw.prev_count, val); 375 + atomic64_set(&event->hw.period_left, left); 376 + perf_event_update_userpage(event); 377 + perf_enable(); 378 + local_irq_restore(flags); 379 + } 380 + 381 + static struct pmu fsl_emb_pmu = { 382 + .enable = fsl_emb_pmu_enable, 383 + .disable = fsl_emb_pmu_disable, 384 + .read = fsl_emb_pmu_read, 385 + .unthrottle = fsl_emb_pmu_unthrottle, 386 + }; 387 + 388 + /* 389 + * Release the PMU if this is the last perf_event. 390 + */ 391 + static void hw_perf_event_destroy(struct perf_event *event) 392 + { 393 + if (!atomic_add_unless(&num_events, -1, 1)) { 394 + mutex_lock(&pmc_reserve_mutex); 395 + if (atomic_dec_return(&num_events) == 0) 396 + release_pmc_hardware(); 397 + mutex_unlock(&pmc_reserve_mutex); 398 + } 399 + } 400 + 401 + /* 402 + * Translate a generic cache event_id config to a raw event_id code. 403 + */ 404 + static int hw_perf_cache_event(u64 config, u64 *eventp) 405 + { 406 + unsigned long type, op, result; 407 + int ev; 408 + 409 + if (!ppmu->cache_events) 410 + return -EINVAL; 411 + 412 + /* unpack config */ 413 + type = config & 0xff; 414 + op = (config >> 8) & 0xff; 415 + result = (config >> 16) & 0xff; 416 + 417 + if (type >= PERF_COUNT_HW_CACHE_MAX || 418 + op >= PERF_COUNT_HW_CACHE_OP_MAX || 419 + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 420 + return -EINVAL; 421 + 422 + ev = (*ppmu->cache_events)[type][op][result]; 423 + if (ev == 0) 424 + return -EOPNOTSUPP; 425 + if (ev == -1) 426 + return -EINVAL; 427 + *eventp = ev; 428 + return 0; 429 + } 430 + 431 + const struct pmu *hw_perf_event_init(struct perf_event *event) 432 + { 433 + u64 ev; 434 + struct perf_event *events[MAX_HWEVENTS]; 435 + int n; 436 + int err; 437 + int num_restricted; 438 + int i; 439 + 440 + switch (event->attr.type) { 441 + case PERF_TYPE_HARDWARE: 442 + ev = event->attr.config; 443 + if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 444 + return ERR_PTR(-EOPNOTSUPP); 445 + ev = ppmu->generic_events[ev]; 446 + break; 447 + 448 + case PERF_TYPE_HW_CACHE: 449 + err = hw_perf_cache_event(event->attr.config, &ev); 450 + if (err) 451 + return ERR_PTR(err); 452 + break; 453 + 454 + case PERF_TYPE_RAW: 455 + ev = event->attr.config; 456 + break; 457 + 458 + default: 459 + return ERR_PTR(-EINVAL); 460 + } 461 + 462 + event->hw.config = ppmu->xlate_event(ev); 463 + if (!(event->hw.config & FSL_EMB_EVENT_VALID)) 464 + return ERR_PTR(-EINVAL); 465 + 466 + /* 467 + * If this is in a group, check if it can go on with all the 468 + * other hardware events in the group. We assume the event 469 + * hasn't been linked into its leader's sibling list at this point. 470 + */ 471 + n = 0; 472 + if (event->group_leader != event) { 473 + n = collect_events(event->group_leader, 474 + ppmu->n_counter - 1, events); 475 + if (n < 0) 476 + return ERR_PTR(-EINVAL); 477 + } 478 + 479 + if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { 480 + num_restricted = 0; 481 + for (i = 0; i < n; i++) { 482 + if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED) 483 + num_restricted++; 484 + } 485 + 486 + if (num_restricted >= ppmu->n_restricted) 487 + return ERR_PTR(-EINVAL); 488 + } 489 + 490 + event->hw.idx = -1; 491 + 492 + event->hw.config_base = PMLCA_CE | PMLCA_FCM1 | 493 + (u32)((ev << 16) & PMLCA_EVENT_MASK); 494 + 495 + if (event->attr.exclude_user) 496 + event->hw.config_base |= PMLCA_FCU; 497 + if (event->attr.exclude_kernel) 498 + event->hw.config_base |= PMLCA_FCS; 499 + if (event->attr.exclude_idle) 500 + return ERR_PTR(-ENOTSUPP); 501 + 502 + event->hw.last_period = event->hw.sample_period; 503 + atomic64_set(&event->hw.period_left, event->hw.last_period); 504 + 505 + /* 506 + * See if we need to reserve the PMU. 507 + * If no events are currently in use, then we have to take a 508 + * mutex to ensure that we don't race with another task doing 509 + * reserve_pmc_hardware or release_pmc_hardware. 510 + */ 511 + err = 0; 512 + if (!atomic_inc_not_zero(&num_events)) { 513 + mutex_lock(&pmc_reserve_mutex); 514 + if (atomic_read(&num_events) == 0 && 515 + reserve_pmc_hardware(perf_event_interrupt)) 516 + err = -EBUSY; 517 + else 518 + atomic_inc(&num_events); 519 + mutex_unlock(&pmc_reserve_mutex); 520 + 521 + mtpmr(PMRN_PMGC0, PMGC0_FAC); 522 + isync(); 523 + } 524 + event->destroy = hw_perf_event_destroy; 525 + 526 + if (err) 527 + return ERR_PTR(err); 528 + return &fsl_emb_pmu; 529 + } 530 + 531 + /* 532 + * A counter has overflowed; update its count and record 533 + * things if requested. Note that interrupts are hard-disabled 534 + * here so there is no possibility of being interrupted. 535 + */ 536 + static void record_and_restart(struct perf_event *event, unsigned long val, 537 + struct pt_regs *regs, int nmi) 538 + { 539 + u64 period = event->hw.sample_period; 540 + s64 prev, delta, left; 541 + int record = 0; 542 + 543 + /* we don't have to worry about interrupts here */ 544 + prev = atomic64_read(&event->hw.prev_count); 545 + delta = (val - prev) & 0xfffffffful; 546 + atomic64_add(delta, &event->count); 547 + 548 + /* 549 + * See if the total period for this event has expired, 550 + * and update for the next period. 551 + */ 552 + val = 0; 553 + left = atomic64_read(&event->hw.period_left) - delta; 554 + if (period) { 555 + if (left <= 0) { 556 + left += period; 557 + if (left <= 0) 558 + left = period; 559 + record = 1; 560 + } 561 + if (left < 0x80000000LL) 562 + val = 0x80000000LL - left; 563 + } 564 + 565 + /* 566 + * Finally record data if requested. 567 + */ 568 + if (record) { 569 + struct perf_sample_data data = { 570 + .period = event->hw.last_period, 571 + }; 572 + 573 + if (perf_event_overflow(event, nmi, &data, regs)) { 574 + /* 575 + * Interrupts are coming too fast - throttle them 576 + * by setting the event to 0, so it will be 577 + * at least 2^30 cycles until the next interrupt 578 + * (assuming each event counts at most 2 counts 579 + * per cycle). 580 + */ 581 + val = 0; 582 + left = ~0ULL >> 1; 583 + } 584 + } 585 + 586 + write_pmc(event->hw.idx, val); 587 + atomic64_set(&event->hw.prev_count, val); 588 + atomic64_set(&event->hw.period_left, left); 589 + perf_event_update_userpage(event); 590 + } 591 + 592 + static void perf_event_interrupt(struct pt_regs *regs) 593 + { 594 + int i; 595 + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 596 + struct perf_event *event; 597 + unsigned long val; 598 + int found = 0; 599 + int nmi; 600 + 601 + nmi = perf_intr_is_nmi(regs); 602 + if (nmi) 603 + nmi_enter(); 604 + else 605 + irq_enter(); 606 + 607 + for (i = 0; i < ppmu->n_counter; ++i) { 608 + event = cpuhw->event[i]; 609 + 610 + val = read_pmc(i); 611 + if ((int)val < 0) { 612 + if (event) { 613 + /* event has overflowed */ 614 + found = 1; 615 + record_and_restart(event, val, regs, nmi); 616 + } else { 617 + /* 618 + * Disabled counter is negative, 619 + * reset it just in case. 620 + */ 621 + write_pmc(i, 0); 622 + } 623 + } 624 + } 625 + 626 + /* PMM will keep counters frozen until we return from the interrupt. */ 627 + mtmsr(mfmsr() | MSR_PMM); 628 + mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); 629 + isync(); 630 + 631 + if (nmi) 632 + nmi_exit(); 633 + else 634 + irq_exit(); 635 + } 636 + 637 + void hw_perf_event_setup(int cpu) 638 + { 639 + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 640 + 641 + memset(cpuhw, 0, sizeof(*cpuhw)); 642 + } 643 + 644 + int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) 645 + { 646 + if (ppmu) 647 + return -EBUSY; /* something's already registered */ 648 + 649 + ppmu = pmu; 650 + pr_info("%s performance monitor hardware support registered\n", 651 + pmu->name); 652 + 653 + return 0; 654 + }
+5 -5
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
··· 24 24 25 25 #include "pq2.h" 26 26 27 - static DEFINE_SPINLOCK(pci_pic_lock); 27 + static DEFINE_RAW_SPINLOCK(pci_pic_lock); 28 28 29 29 struct pq2ads_pci_pic { 30 30 struct device_node *node; ··· 45 45 46 46 if (irq != -1) { 47 47 unsigned long flags; 48 - spin_lock_irqsave(&pci_pic_lock, flags); 48 + raw_spin_lock_irqsave(&pci_pic_lock, flags); 49 49 50 50 setbits32(&priv->regs->mask, 1 << irq); 51 51 mb(); 52 52 53 - spin_unlock_irqrestore(&pci_pic_lock, flags); 53 + raw_spin_unlock_irqrestore(&pci_pic_lock, flags); 54 54 } 55 55 } 56 56 ··· 62 62 if (irq != -1) { 63 63 unsigned long flags; 64 64 65 - spin_lock_irqsave(&pci_pic_lock, flags); 65 + raw_spin_lock_irqsave(&pci_pic_lock, flags); 66 66 clrbits32(&priv->regs->mask, 1 << irq); 67 - spin_unlock_irqrestore(&pci_pic_lock, flags); 67 + raw_spin_unlock_irqrestore(&pci_pic_lock, flags); 68 68 } 69 69 } 70 70
+17 -17
arch/powerpc/platforms/85xx/socrates_fpga_pic.c
··· 50 50 51 51 #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 52 52 53 - static DEFINE_SPINLOCK(socrates_fpga_pic_lock); 53 + static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 54 54 55 55 static void __iomem *socrates_fpga_pic_iobase; 56 56 static struct irq_host *socrates_fpga_pic_irq_host; ··· 80 80 if (i == 3) 81 81 return NO_IRQ; 82 82 83 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 83 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 84 84 cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i)); 85 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 85 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 86 86 for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) { 87 87 if (cause >> (i + 16)) 88 88 break; ··· 116 116 hwirq = socrates_fpga_irq_to_hw(virq); 117 117 118 118 irq_line = fpga_irqs[hwirq].irq_line; 119 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 119 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 120 120 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 121 121 & SOCRATES_FPGA_IRQ_MASK; 122 122 mask |= (1 << (hwirq + 16)); 123 123 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); 124 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 124 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 125 125 } 126 126 127 127 static void socrates_fpga_pic_mask(unsigned int virq) ··· 134 134 hwirq = socrates_fpga_irq_to_hw(virq); 135 135 136 136 irq_line = fpga_irqs[hwirq].irq_line; 137 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 137 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 138 138 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 139 139 & SOCRATES_FPGA_IRQ_MASK; 140 140 mask &= ~(1 << hwirq); 141 141 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); 142 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 142 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 143 143 } 144 144 145 145 static void socrates_fpga_pic_mask_ack(unsigned int virq) ··· 152 152 hwirq = socrates_fpga_irq_to_hw(virq); 153 153 154 154 irq_line = fpga_irqs[hwirq].irq_line; 155 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 155 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 156 156 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 157 157 & SOCRATES_FPGA_IRQ_MASK; 158 158 mask &= ~(1 << hwirq); 159 159 mask |= (1 << (hwirq + 16)); 160 160 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); 161 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 161 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 162 162 } 163 163 164 164 static void socrates_fpga_pic_unmask(unsigned int virq) ··· 171 171 hwirq = socrates_fpga_irq_to_hw(virq); 172 172 173 173 irq_line = fpga_irqs[hwirq].irq_line; 174 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 174 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 175 175 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 176 176 & SOCRATES_FPGA_IRQ_MASK; 177 177 mask |= (1 << hwirq); 178 178 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); 179 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 179 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 180 180 } 181 181 182 182 static void socrates_fpga_pic_eoi(unsigned int virq) ··· 189 189 hwirq = socrates_fpga_irq_to_hw(virq); 190 190 191 191 irq_line = fpga_irqs[hwirq].irq_line; 192 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 192 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 193 193 mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) 194 194 & SOCRATES_FPGA_IRQ_MASK; 195 195 mask |= (1 << (hwirq + 16)); 196 196 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask); 197 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 197 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 198 198 } 199 199 200 200 static int socrates_fpga_pic_set_type(unsigned int virq, ··· 220 220 default: 221 221 return -EINVAL; 222 222 } 223 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 223 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 224 224 mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG); 225 225 if (polarity) 226 226 mask |= (1 << hwirq); 227 227 else 228 228 mask &= ~(1 << hwirq); 229 229 socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask); 230 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 230 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 231 231 return 0; 232 232 } 233 233 ··· 314 314 315 315 socrates_fpga_pic_iobase = of_iomap(pic, 0); 316 316 317 - spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 317 + raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); 318 318 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0), 319 319 SOCRATES_FPGA_IRQ_MASK << 16); 320 320 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1), 321 321 SOCRATES_FPGA_IRQ_MASK << 16); 322 322 socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2), 323 323 SOCRATES_FPGA_IRQ_MASK << 16); 324 - spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 324 + raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); 325 325 326 326 pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n"); 327 327 }
+6 -6
arch/powerpc/platforms/86xx/Kconfig
··· 33 33 This option enables support for the MPC8610 HPCD board. 34 34 35 35 config GEF_PPC9A 36 - bool "GE Fanuc PPC9A" 36 + bool "GE PPC9A" 37 37 select DEFAULT_UIMAGE 38 38 select MMIO_NVRAM 39 39 select GENERIC_GPIO 40 40 select ARCH_REQUIRE_GPIOLIB 41 41 help 42 - This option enables support for GE Fanuc's PPC9A. 42 + This option enables support for the GE PPC9A. 43 43 44 44 config GEF_SBC310 45 - bool "GE Fanuc SBC310" 45 + bool "GE SBC310" 46 46 select DEFAULT_UIMAGE 47 47 select MMIO_NVRAM 48 48 select GENERIC_GPIO 49 49 select ARCH_REQUIRE_GPIOLIB 50 50 help 51 - This option enables support for GE Fanuc's SBC310. 51 + This option enables support for the GE SBC310. 52 52 53 53 config GEF_SBC610 54 - bool "GE Fanuc SBC610" 54 + bool "GE SBC610" 55 55 select DEFAULT_UIMAGE 56 56 select MMIO_NVRAM 57 57 select GENERIC_GPIO 58 58 select ARCH_REQUIRE_GPIOLIB 59 59 select HAS_RAPIDIO 60 60 help 61 - This option enables support for GE Fanuc's SBC610. 61 + This option enables support for the GE SBC610. 62 62 63 63 endif 64 64
+5 -5
arch/powerpc/platforms/86xx/gef_gpio.c
··· 1 1 /* 2 - * Driver for GE Fanuc's FPGA based GPIO pins 2 + * Driver for GE FPGA based GPIO 3 3 * 4 - * Author: Martyn Welch <martyn.welch@gefanuc.com> 4 + * Author: Martyn Welch <martyn.welch@ge.com> 5 5 * 6 - * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. 6 + * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc. 7 7 * 8 8 * This file is licensed under the terms of the GNU General Public License 9 9 * version 2. This program is licensed "as is" without any warranty of any ··· 164 164 }; 165 165 arch_initcall(gef_gpio_init); 166 166 167 - MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver"); 168 - MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com"); 167 + MODULE_DESCRIPTION("GE I/O FPGA GPIO driver"); 168 + MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); 169 169 MODULE_LICENSE("GPL");
+10 -10
arch/powerpc/platforms/86xx/gef_pic.c
··· 1 1 /* 2 - * Interrupt handling for GE Fanuc's FPGA based PIC 2 + * Interrupt handling for GE FPGA based PIC 3 3 * 4 - * Author: Martyn Welch <martyn.welch@gefanuc.com> 4 + * Author: Martyn Welch <martyn.welch@ge.com> 5 5 * 6 - * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc. 6 + * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc. 7 7 * 8 8 * This file is licensed under the terms of the GNU General Public License 9 9 * version 2. This program is licensed "as is" without any warranty of any ··· 49 49 #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) 50 50 51 51 52 - static DEFINE_SPINLOCK(gef_pic_lock); 52 + static DEFINE_RAW_SPINLOCK(gef_pic_lock); 53 53 54 54 static void __iomem *gef_pic_irq_reg_base; 55 55 static struct irq_host *gef_pic_irq_host; ··· 118 118 119 119 hwirq = gef_irq_to_hw(virq); 120 120 121 - spin_lock_irqsave(&gef_pic_lock, flags); 121 + raw_spin_lock_irqsave(&gef_pic_lock, flags); 122 122 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 123 123 mask &= ~(1 << hwirq); 124 124 out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); 125 - spin_unlock_irqrestore(&gef_pic_lock, flags); 125 + raw_spin_unlock_irqrestore(&gef_pic_lock, flags); 126 126 } 127 127 128 128 static void gef_pic_mask_ack(unsigned int virq) ··· 141 141 142 142 hwirq = gef_irq_to_hw(virq); 143 143 144 - spin_lock_irqsave(&gef_pic_lock, flags); 144 + raw_spin_lock_irqsave(&gef_pic_lock, flags); 145 145 mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); 146 146 mask |= (1 << hwirq); 147 147 out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask); 148 - spin_unlock_irqrestore(&gef_pic_lock, flags); 148 + raw_spin_unlock_irqrestore(&gef_pic_lock, flags); 149 149 } 150 150 151 151 static struct irq_chip gef_pic_chip = { ··· 199 199 /* Map the devices registers into memory */ 200 200 gef_pic_irq_reg_base = of_iomap(np, 0); 201 201 202 - spin_lock_irqsave(&gef_pic_lock, flags); 202 + raw_spin_lock_irqsave(&gef_pic_lock, flags); 203 203 204 204 /* Initialise everything as masked. */ 205 205 out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0); ··· 208 208 out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0); 209 209 out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0); 210 210 211 - spin_unlock_irqrestore(&gef_pic_lock, flags); 211 + raw_spin_unlock_irqrestore(&gef_pic_lock, flags); 212 212 213 213 /* Map controller */ 214 214 gef_pic_cascade_irq = irq_of_parse_and_map(np, 0);
+6 -6
arch/powerpc/platforms/86xx/gef_ppc9a.c
··· 1 1 /* 2 - * GE Fanuc PPC9A board support 2 + * GE PPC9A board support 3 3 * 4 - * Author: Martyn Welch <martyn.welch@gefanuc.com> 4 + * Author: Martyn Welch <martyn.welch@ge.com> 5 5 * 6 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 6 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the ··· 82 82 } 83 83 #endif 84 84 85 - printk(KERN_INFO "GE Fanuc Intelligent Platforms PPC9A 6U VME SBC\n"); 85 + printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n"); 86 86 87 87 #ifdef CONFIG_SMP 88 88 mpc86xx_smp_init(); ··· 151 151 { 152 152 uint svid = mfspr(SPRN_SVR); 153 153 154 - seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); 154 + seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); 155 155 156 156 seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(), 157 157 ('A' + gef_ppc9a_get_board_rev())); ··· 235 235 machine_device_initcall(gef_ppc9a, declare_of_platform_devices); 236 236 237 237 define_machine(gef_ppc9a) { 238 - .name = "GE Fanuc PPC9A", 238 + .name = "GE PPC9A", 239 239 .probe = gef_ppc9a_probe, 240 240 .setup_arch = gef_ppc9a_setup_arch, 241 241 .init_IRQ = gef_ppc9a_init_irq,
+6 -6
arch/powerpc/platforms/86xx/gef_sbc310.c
··· 1 1 /* 2 - * GE Fanuc SBC310 board support 2 + * GE SBC310 board support 3 3 * 4 - * Author: Martyn Welch <martyn.welch@gefanuc.com> 4 + * Author: Martyn Welch <martyn.welch@ge.com> 5 5 * 6 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 6 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the ··· 82 82 } 83 83 #endif 84 84 85 - printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC310 6U VPX SBC\n"); 85 + printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n"); 86 86 87 87 #ifdef CONFIG_SMP 88 88 mpc86xx_smp_init(); ··· 142 142 { 143 143 uint svid = mfspr(SPRN_SVR); 144 144 145 - seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); 145 + seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); 146 146 147 147 seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id()); 148 148 seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(), ··· 223 223 machine_device_initcall(gef_sbc310, declare_of_platform_devices); 224 224 225 225 define_machine(gef_sbc310) { 226 - .name = "GE Fanuc SBC310", 226 + .name = "GE SBC310", 227 227 .probe = gef_sbc310_probe, 228 228 .setup_arch = gef_sbc310_setup_arch, 229 229 .init_IRQ = gef_sbc310_init_irq,
+6 -6
arch/powerpc/platforms/86xx/gef_sbc610.c
··· 1 1 /* 2 - * GE Fanuc SBC610 board support 2 + * GE SBC610 board support 3 3 * 4 - * Author: Martyn Welch <martyn.welch@gefanuc.com> 4 + * Author: Martyn Welch <martyn.welch@ge.com> 5 5 * 6 - * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc. 6 + * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc. 7 7 * 8 8 * This program is free software; you can redistribute it and/or modify it 9 9 * under the terms of the GNU General Public License as published by the ··· 82 82 } 83 83 #endif 84 84 85 - printk(KERN_INFO "GE Fanuc Intelligent Platforms SBC610 6U VPX SBC\n"); 85 + printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n"); 86 86 87 87 #ifdef CONFIG_SMP 88 88 mpc86xx_smp_init(); ··· 133 133 { 134 134 uint svid = mfspr(SPRN_SVR); 135 135 136 - seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); 136 + seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); 137 137 138 138 seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(), 139 139 ('A' + gef_sbc610_get_board_rev() - 1)); ··· 212 212 machine_device_initcall(gef_sbc610, declare_of_platform_devices); 213 213 214 214 define_machine(gef_sbc610) { 215 - .name = "GE Fanuc SBC610", 215 + .name = "GE SBC610", 216 216 .probe = gef_sbc610_probe, 217 217 .setup_arch = gef_sbc610_setup_arch, 218 218 .init_IRQ = gef_sbc610_init_irq,
+10
arch/powerpc/platforms/Kconfig.cputype
··· 144 144 and some e300 cores (c3 and c4). Select this only if your 145 145 core supports the Embedded Performance Monitor APU 146 146 147 + config FSL_EMB_PERF_EVENT 148 + bool 149 + depends on FSL_EMB_PERFMON && PERF_EVENTS && !PPC_PERF_CTRS 150 + default y 151 + 152 + config FSL_EMB_PERF_EVENT_E500 153 + bool 154 + depends on FSL_EMB_PERF_EVENT && E500 155 + default y 156 + 147 157 config 4xx 148 158 bool 149 159 depends on 40x || 44x
+1 -1
arch/powerpc/sysdev/cpm2_pic.h
··· 3 3 4 4 extern unsigned int cpm2_get_irq(void); 5 5 6 - extern void cpm2_pic_init(struct device_node*); 6 + extern void cpm2_pic_init(struct device_node *); 7 7 8 8 #endif /* _PPC_KERNEL_CPM2_H */
+5 -5
arch/powerpc/sysdev/qe_lib/qe_ic.c
··· 33 33 34 34 #include "qe_ic.h" 35 35 36 - static DEFINE_SPINLOCK(qe_ic_lock); 36 + static DEFINE_RAW_SPINLOCK(qe_ic_lock); 37 37 38 38 static struct qe_ic_info qe_ic_info[] = { 39 39 [1] = { ··· 201 201 unsigned long flags; 202 202 u32 temp; 203 203 204 - spin_lock_irqsave(&qe_ic_lock, flags); 204 + raw_spin_lock_irqsave(&qe_ic_lock, flags); 205 205 206 206 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); 207 207 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, 208 208 temp | qe_ic_info[src].mask); 209 209 210 - spin_unlock_irqrestore(&qe_ic_lock, flags); 210 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags); 211 211 } 212 212 213 213 static void qe_ic_mask_irq(unsigned int virq) ··· 217 217 unsigned long flags; 218 218 u32 temp; 219 219 220 - spin_lock_irqsave(&qe_ic_lock, flags); 220 + raw_spin_lock_irqsave(&qe_ic_lock, flags); 221 221 222 222 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg); 223 223 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg, ··· 233 233 */ 234 234 mb(); 235 235 236 - spin_unlock_irqrestore(&qe_ic_lock, flags); 236 + raw_spin_unlock_irqrestore(&qe_ic_lock, flags); 237 237 } 238 238 239 239 static struct irq_chip qe_ic_irq_chip = {