at v2.6.36-rc2 137 lines 3.4 kB view raw
1#ifndef _PERF_PERF_H 2#define _PERF_PERF_H 3 4struct winsize; 5 6void get_term_dimensions(struct winsize *ws); 7 8#if defined(__i386__) 9#include "../../arch/x86/include/asm/unistd.h" 10#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 11#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 12#endif 13 14#if defined(__x86_64__) 15#include "../../arch/x86/include/asm/unistd.h" 16#define rmb() asm volatile("lfence" ::: "memory") 17#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 18#endif 19 20#ifdef __powerpc__ 21#include "../../arch/powerpc/include/asm/unistd.h" 22#define rmb() asm volatile ("sync" ::: "memory") 23#define cpu_relax() asm volatile ("" ::: "memory"); 24#endif 25 26#ifdef __s390__ 27#include "../../arch/s390/include/asm/unistd.h" 28#define rmb() asm volatile("bcr 15,0" ::: "memory") 29#define cpu_relax() asm volatile("" ::: "memory"); 30#endif 31 32#ifdef __sh__ 33#include "../../arch/sh/include/asm/unistd.h" 34#if defined(__SH4A__) || defined(__SH5__) 35# define rmb() asm volatile("synco" ::: "memory") 36#else 37# define rmb() asm volatile("" ::: "memory") 38#endif 39#define cpu_relax() asm volatile("" ::: "memory") 40#endif 41 42#ifdef __hppa__ 43#include "../../arch/parisc/include/asm/unistd.h" 44#define rmb() asm volatile("" ::: "memory") 45#define cpu_relax() asm volatile("" ::: "memory"); 46#endif 47 48#ifdef __sparc__ 49#include "../../arch/sparc/include/asm/unistd.h" 50#define rmb() asm volatile("":::"memory") 51#define cpu_relax() asm volatile("":::"memory") 52#endif 53 54#ifdef __alpha__ 55#include "../../arch/alpha/include/asm/unistd.h" 56#define rmb() asm volatile("mb" ::: "memory") 57#define cpu_relax() asm volatile("" ::: "memory") 58#endif 59 60#ifdef __ia64__ 61#include "../../arch/ia64/include/asm/unistd.h" 62#define rmb() asm volatile ("mf" ::: "memory") 63#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 64#endif 65 66#ifdef __arm__ 67#include "../../arch/arm/include/asm/unistd.h" 68/* 69 * Use the __kuser_memory_barrier helper in the CPU helper page. See 70 * arch/arm/kernel/entry-armv.S in the kernel source for details. 71 */ 72#define rmb() ((void(*)(void))0xffff0fa0)() 73#define cpu_relax() asm volatile("":::"memory") 74#endif 75 76#include <time.h> 77#include <unistd.h> 78#include <sys/types.h> 79#include <sys/syscall.h> 80 81#include "../../include/linux/perf_event.h" 82#include "util/types.h" 83#include <stdbool.h> 84 85/* 86 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 87 * counters in the current task. 88 */ 89#define PR_TASK_PERF_EVENTS_DISABLE 31 90#define PR_TASK_PERF_EVENTS_ENABLE 32 91 92#ifndef NSEC_PER_SEC 93# define NSEC_PER_SEC 1000000000ULL 94#endif 95 96static inline unsigned long long rdclock(void) 97{ 98 struct timespec ts; 99 100 clock_gettime(CLOCK_MONOTONIC, &ts); 101 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 102} 103 104/* 105 * Pick up some kernel type conventions: 106 */ 107#define __user 108#define asmlinkage 109 110#define unlikely(x) __builtin_expect(!!(x), 0) 111#define min(x, y) ({ \ 112 typeof(x) _min1 = (x); \ 113 typeof(y) _min2 = (y); \ 114 (void) (&_min1 == &_min2); \ 115 _min1 < _min2 ? _min1 : _min2; }) 116 117static inline int 118sys_perf_event_open(struct perf_event_attr *attr, 119 pid_t pid, int cpu, int group_fd, 120 unsigned long flags) 121{ 122 attr->size = sizeof(*attr); 123 return syscall(__NR_perf_event_open, attr, pid, cpu, 124 group_fd, flags); 125} 126 127#define MAX_COUNTERS 256 128#define MAX_NR_CPUS 256 129 130struct ip_callchain { 131 u64 nr; 132 u64 ips[0]; 133}; 134 135extern bool perf_host, perf_guest; 136 137#endif