at v2.6.38-rc2 149 lines 3.7 kB view raw
1#ifndef _PERF_PERF_H 2#define _PERF_PERF_H 3 4struct winsize; 5 6void get_term_dimensions(struct winsize *ws); 7 8#if defined(__i386__) 9#include "../../arch/x86/include/asm/unistd.h" 10#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 11#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 12#endif 13 14#if defined(__x86_64__) 15#include "../../arch/x86/include/asm/unistd.h" 16#define rmb() asm volatile("lfence" ::: "memory") 17#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 18#endif 19 20#ifdef __powerpc__ 21#include "../../arch/powerpc/include/asm/unistd.h" 22#define rmb() asm volatile ("sync" ::: "memory") 23#define cpu_relax() asm volatile ("" ::: "memory"); 24#endif 25 26#ifdef __s390__ 27#include "../../arch/s390/include/asm/unistd.h" 28#define rmb() asm volatile("bcr 15,0" ::: "memory") 29#define cpu_relax() asm volatile("" ::: "memory"); 30#endif 31 32#ifdef __sh__ 33#include "../../arch/sh/include/asm/unistd.h" 34#if defined(__SH4A__) || defined(__SH5__) 35# define rmb() asm volatile("synco" ::: "memory") 36#else 37# define rmb() asm volatile("" ::: "memory") 38#endif 39#define cpu_relax() asm volatile("" ::: "memory") 40#endif 41 42#ifdef __hppa__ 43#include "../../arch/parisc/include/asm/unistd.h" 44#define rmb() asm volatile("" ::: "memory") 45#define cpu_relax() asm volatile("" ::: "memory"); 46#endif 47 48#ifdef __sparc__ 49#include "../../arch/sparc/include/asm/unistd.h" 50#define rmb() asm volatile("":::"memory") 51#define cpu_relax() asm volatile("":::"memory") 52#endif 53 54#ifdef __alpha__ 55#include "../../arch/alpha/include/asm/unistd.h" 56#define rmb() asm volatile("mb" ::: "memory") 57#define cpu_relax() asm volatile("" ::: "memory") 58#endif 59 60#ifdef __ia64__ 61#include "../../arch/ia64/include/asm/unistd.h" 62#define rmb() asm volatile ("mf" ::: "memory") 63#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 64#endif 65 66#ifdef __arm__ 67#include "../../arch/arm/include/asm/unistd.h" 68/* 69 * Use the __kuser_memory_barrier helper in the CPU helper page. See 70 * arch/arm/kernel/entry-armv.S in the kernel source for details. 71 */ 72#define rmb() ((void(*)(void))0xffff0fa0)() 73#define cpu_relax() asm volatile("":::"memory") 74#endif 75 76#ifdef __mips__ 77#include "../../arch/mips/include/asm/unistd.h" 78#define rmb() asm volatile( \ 79 ".set mips2\n\t" \ 80 "sync\n\t" \ 81 ".set mips0" \ 82 : /* no output */ \ 83 : /* no input */ \ 84 : "memory") 85#define cpu_relax() asm volatile("" ::: "memory") 86#endif 87 88#include <time.h> 89#include <unistd.h> 90#include <sys/types.h> 91#include <sys/syscall.h> 92 93#include "../../include/linux/perf_event.h" 94#include "util/types.h" 95#include <stdbool.h> 96 97/* 98 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 99 * counters in the current task. 100 */ 101#define PR_TASK_PERF_EVENTS_DISABLE 31 102#define PR_TASK_PERF_EVENTS_ENABLE 32 103 104#ifndef NSEC_PER_SEC 105# define NSEC_PER_SEC 1000000000ULL 106#endif 107 108static inline unsigned long long rdclock(void) 109{ 110 struct timespec ts; 111 112 clock_gettime(CLOCK_MONOTONIC, &ts); 113 return ts.tv_sec * 1000000000ULL + ts.tv_nsec; 114} 115 116/* 117 * Pick up some kernel type conventions: 118 */ 119#define __user 120#define asmlinkage 121 122#define unlikely(x) __builtin_expect(!!(x), 0) 123#define min(x, y) ({ \ 124 typeof(x) _min1 = (x); \ 125 typeof(y) _min2 = (y); \ 126 (void) (&_min1 == &_min2); \ 127 _min1 < _min2 ? _min1 : _min2; }) 128 129static inline int 130sys_perf_event_open(struct perf_event_attr *attr, 131 pid_t pid, int cpu, int group_fd, 132 unsigned long flags) 133{ 134 attr->size = sizeof(*attr); 135 return syscall(__NR_perf_event_open, attr, pid, cpu, 136 group_fd, flags); 137} 138 139#define MAX_COUNTERS 256 140#define MAX_NR_CPUS 256 141 142struct ip_callchain { 143 u64 nr; 144 u64 ips[0]; 145}; 146 147extern bool perf_host, perf_guest; 148 149#endif