at v3.14-rc6 282 lines 6.8 kB view raw
1#ifndef _PERF_PERF_H 2#define _PERF_PERF_H 3 4#include <asm/unistd.h> 5 6#if defined(__i386__) 7#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 8#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 9#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 10#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 11#define CPUINFO_PROC "model name" 12#ifndef __NR_perf_event_open 13# define __NR_perf_event_open 336 14#endif 15#endif 16 17#if defined(__x86_64__) 18#define mb() asm volatile("mfence" ::: "memory") 19#define wmb() asm volatile("sfence" ::: "memory") 20#define rmb() asm volatile("lfence" ::: "memory") 21#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 22#define CPUINFO_PROC "model name" 23#ifndef __NR_perf_event_open 24# define __NR_perf_event_open 298 25#endif 26#endif 27 28#ifdef __powerpc__ 29#include "../../arch/powerpc/include/uapi/asm/unistd.h" 30#define mb() asm volatile ("sync" ::: "memory") 31#define wmb() asm volatile ("sync" ::: "memory") 32#define rmb() asm volatile ("sync" ::: "memory") 33#define CPUINFO_PROC "cpu" 34#endif 35 36#ifdef __s390__ 37#define mb() asm volatile("bcr 15,0" ::: "memory") 38#define wmb() asm volatile("bcr 15,0" ::: "memory") 39#define rmb() asm volatile("bcr 15,0" ::: "memory") 40#endif 41 42#ifdef __sh__ 43#if defined(__SH4A__) || defined(__SH5__) 44# define mb() asm volatile("synco" ::: "memory") 45# define wmb() asm volatile("synco" ::: "memory") 46# define rmb() asm volatile("synco" ::: "memory") 47#else 48# define mb() asm volatile("" ::: "memory") 49# define wmb() asm volatile("" ::: "memory") 50# define rmb() asm volatile("" ::: "memory") 51#endif 52#define CPUINFO_PROC "cpu type" 53#endif 54 55#ifdef __hppa__ 56#define mb() asm volatile("" ::: "memory") 57#define wmb() asm volatile("" ::: "memory") 58#define rmb() asm volatile("" ::: "memory") 59#define CPUINFO_PROC "cpu" 60#endif 61 62#ifdef __sparc__ 63#ifdef __LP64__ 64#define mb() asm volatile("ba,pt %%xcc, 1f\n" \ 65 "membar #StoreLoad\n" \ 66 "1:\n":::"memory") 67#else 68#define mb() asm volatile("":::"memory") 69#endif 70#define wmb() asm volatile("":::"memory") 71#define rmb() asm volatile("":::"memory") 72#define CPUINFO_PROC "cpu" 73#endif 74 75#ifdef __alpha__ 76#define mb() asm volatile("mb" ::: "memory") 77#define wmb() asm volatile("wmb" ::: "memory") 78#define rmb() asm volatile("mb" ::: "memory") 79#define CPUINFO_PROC "cpu model" 80#endif 81 82#ifdef __ia64__ 83#define mb() asm volatile ("mf" ::: "memory") 84#define wmb() asm volatile ("mf" ::: "memory") 85#define rmb() asm volatile ("mf" ::: "memory") 86#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 87#define CPUINFO_PROC "model name" 88#endif 89 90#ifdef __arm__ 91/* 92 * Use the __kuser_memory_barrier helper in the CPU helper page. See 93 * arch/arm/kernel/entry-armv.S in the kernel source for details. 94 */ 95#define mb() ((void(*)(void))0xffff0fa0)() 96#define wmb() ((void(*)(void))0xffff0fa0)() 97#define rmb() ((void(*)(void))0xffff0fa0)() 98#define CPUINFO_PROC "Processor" 99#endif 100 101#ifdef __aarch64__ 102#define mb() asm volatile("dmb ish" ::: "memory") 103#define wmb() asm volatile("dmb ishst" ::: "memory") 104#define rmb() asm volatile("dmb ishld" ::: "memory") 105#define cpu_relax() asm volatile("yield" ::: "memory") 106#endif 107 108#ifdef __mips__ 109#define mb() asm volatile( \ 110 ".set mips2\n\t" \ 111 "sync\n\t" \ 112 ".set mips0" \ 113 : /* no output */ \ 114 : /* no input */ \ 115 : "memory") 116#define wmb() mb() 117#define rmb() mb() 118#define CPUINFO_PROC "cpu model" 119#endif 120 121#ifdef __arc__ 122#define mb() asm volatile("" ::: "memory") 123#define wmb() asm volatile("" ::: "memory") 124#define rmb() asm volatile("" ::: "memory") 125#define CPUINFO_PROC "Processor" 126#endif 127 128#ifdef __metag__ 129#define mb() asm volatile("" ::: "memory") 130#define wmb() asm volatile("" ::: "memory") 131#define rmb() asm volatile("" ::: "memory") 132#define CPUINFO_PROC "CPU" 133#endif 134 135#ifdef __xtensa__ 136#define mb() asm volatile("memw" ::: "memory") 137#define wmb() asm volatile("memw" ::: "memory") 138#define rmb() asm volatile("" ::: "memory") 139#define CPUINFO_PROC "core ID" 140#endif 141 142#define barrier() asm volatile ("" ::: "memory") 143 144#ifndef cpu_relax 145#define cpu_relax() barrier() 146#endif 147 148#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 149 150 151#include <time.h> 152#include <unistd.h> 153#include <sys/types.h> 154#include <sys/syscall.h> 155 156#include <linux/perf_event.h> 157#include "util/types.h" 158#include <stdbool.h> 159 160/* 161 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 162 * counters in the current task. 163 */ 164#define PR_TASK_PERF_EVENTS_DISABLE 31 165#define PR_TASK_PERF_EVENTS_ENABLE 32 166 167#ifndef NSEC_PER_SEC 168# define NSEC_PER_SEC 1000000000ULL 169#endif 170#ifndef NSEC_PER_USEC 171# define NSEC_PER_USEC 1000ULL 172#endif 173 174static inline unsigned long long rdclock(void) 175{ 176 struct timespec ts; 177 178 clock_gettime(CLOCK_MONOTONIC, &ts); 179 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 180} 181 182/* 183 * Pick up some kernel type conventions: 184 */ 185#define __user 186#define asmlinkage 187 188#define unlikely(x) __builtin_expect(!!(x), 0) 189#define min(x, y) ({ \ 190 typeof(x) _min1 = (x); \ 191 typeof(y) _min2 = (y); \ 192 (void) (&_min1 == &_min2); \ 193 _min1 < _min2 ? _min1 : _min2; }) 194 195extern bool test_attr__enabled; 196void test_attr__init(void); 197void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 198 int fd, int group_fd, unsigned long flags); 199 200static inline int 201sys_perf_event_open(struct perf_event_attr *attr, 202 pid_t pid, int cpu, int group_fd, 203 unsigned long flags) 204{ 205 int fd; 206 207 fd = syscall(__NR_perf_event_open, attr, pid, cpu, 208 group_fd, flags); 209 210 if (unlikely(test_attr__enabled)) 211 test_attr__open(attr, pid, cpu, fd, group_fd, flags); 212 213 return fd; 214} 215 216#define MAX_COUNTERS 256 217#define MAX_NR_CPUS 256 218 219struct ip_callchain { 220 u64 nr; 221 u64 ips[0]; 222}; 223 224struct branch_flags { 225 u64 mispred:1; 226 u64 predicted:1; 227 u64 in_tx:1; 228 u64 abort:1; 229 u64 reserved:60; 230}; 231 232struct branch_entry { 233 u64 from; 234 u64 to; 235 struct branch_flags flags; 236}; 237 238struct branch_stack { 239 u64 nr; 240 struct branch_entry entries[0]; 241}; 242 243extern const char *input_name; 244extern bool perf_host, perf_guest; 245extern const char perf_version_string[]; 246 247void pthread__unblock_sigwinch(void); 248 249#include "util/target.h" 250 251enum perf_call_graph_mode { 252 CALLCHAIN_NONE, 253 CALLCHAIN_FP, 254 CALLCHAIN_DWARF 255}; 256 257struct record_opts { 258 struct target target; 259 int call_graph; 260 bool group; 261 bool inherit_stat; 262 bool no_buffering; 263 bool no_inherit; 264 bool no_inherit_set; 265 bool no_samples; 266 bool raw_samples; 267 bool sample_address; 268 bool sample_weight; 269 bool sample_time; 270 bool period; 271 unsigned int freq; 272 unsigned int mmap_pages; 273 unsigned int user_freq; 274 u64 branch_stack; 275 u64 default_interval; 276 u64 user_interval; 277 u16 stack_dump_size; 278 bool sample_transaction; 279 unsigned initial_delay; 280}; 281 282#endif