at v2.6.34-rc2 132 lines 3.3 kB view raw
1#ifndef _PERF_PERF_H 2#define _PERF_PERF_H 3 4#if defined(__i386__) 5#include "../../arch/x86/include/asm/unistd.h" 6#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 7#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 8#endif 9 10#if defined(__x86_64__) 11#include "../../arch/x86/include/asm/unistd.h" 12#define rmb() asm volatile("lfence" ::: "memory") 13#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 14#endif 15 16#ifdef __powerpc__ 17#include "../../arch/powerpc/include/asm/unistd.h" 18#define rmb() asm volatile ("sync" ::: "memory") 19#define cpu_relax() asm volatile ("" ::: "memory"); 20#endif 21 22#ifdef __s390__ 23#include "../../arch/s390/include/asm/unistd.h" 24#define rmb() asm volatile("bcr 15,0" ::: "memory") 25#define cpu_relax() asm volatile("" ::: "memory"); 26#endif 27 28#ifdef __sh__ 29#include "../../arch/sh/include/asm/unistd.h" 30#if defined(__SH4A__) || defined(__SH5__) 31# define rmb() asm volatile("synco" ::: "memory") 32#else 33# define rmb() asm volatile("" ::: "memory") 34#endif 35#define cpu_relax() asm volatile("" ::: "memory") 36#endif 37 38#ifdef __hppa__ 39#include "../../arch/parisc/include/asm/unistd.h" 40#define rmb() asm volatile("" ::: "memory") 41#define cpu_relax() asm volatile("" ::: "memory"); 42#endif 43 44#ifdef __sparc__ 45#include "../../arch/sparc/include/asm/unistd.h" 46#define rmb() asm volatile("":::"memory") 47#define cpu_relax() asm volatile("":::"memory") 48#endif 49 50#ifdef __alpha__ 51#include "../../arch/alpha/include/asm/unistd.h" 52#define rmb() asm volatile("mb" ::: "memory") 53#define cpu_relax() asm volatile("" ::: "memory") 54#endif 55 56#ifdef __ia64__ 57#include "../../arch/ia64/include/asm/unistd.h" 58#define rmb() asm volatile ("mf" ::: "memory") 59#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 60#endif 61 62#ifdef __arm__ 63#include "../../arch/arm/include/asm/unistd.h" 64/* 65 * Use the __kuser_memory_barrier helper in the CPU helper page. See 66 * arch/arm/kernel/entry-armv.S in the kernel source for details. 67 */ 68#define rmb() ((void(*)(void))0xffff0fa0)() 69#define cpu_relax() asm volatile("":::"memory") 70#endif 71 72#include <time.h> 73#include <unistd.h> 74#include <sys/types.h> 75#include <sys/syscall.h> 76 77#include "../../include/linux/perf_event.h" 78#include "util/types.h" 79 80/* 81 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 82 * counters in the current task. 83 */ 84#define PR_TASK_PERF_EVENTS_DISABLE 31 85#define PR_TASK_PERF_EVENTS_ENABLE 32 86 87#ifndef NSEC_PER_SEC 88# define NSEC_PER_SEC 1000000000ULL 89#endif 90 91static inline unsigned long long rdclock(void) 92{ 93 struct timespec ts; 94 95 clock_gettime(CLOCK_MONOTONIC, &ts); 96 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 97} 98 99/* 100 * Pick up some kernel type conventions: 101 */ 102#define __user 103#define asmlinkage 104 105#define __used __attribute__((__unused__)) 106 107#define unlikely(x) __builtin_expect(!!(x), 0) 108#define min(x, y) ({ \ 109 typeof(x) _min1 = (x); \ 110 typeof(y) _min2 = (y); \ 111 (void) (&_min1 == &_min2); \ 112 _min1 < _min2 ? _min1 : _min2; }) 113 114static inline int 115sys_perf_event_open(struct perf_event_attr *attr, 116 pid_t pid, int cpu, int group_fd, 117 unsigned long flags) 118{ 119 attr->size = sizeof(*attr); 120 return syscall(__NR_perf_event_open, attr, pid, cpu, 121 group_fd, flags); 122} 123 124#define MAX_COUNTERS 256 125#define MAX_NR_CPUS 256 126 127struct ip_callchain { 128 u64 nr; 129 u64 ips[0]; 130}; 131 132#endif