Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf mmap: Move perf_mmap and methods to separate mmap.[ch] files

To better organize the sources, and we may end up even using it
directly, without evlists and evsels.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-oiqrm7grflurnnzo2ovfnslg@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+349 -323
+1
tools/perf/util/Build
··· 13 13 libperf-y += kallsyms.o 14 14 libperf-y += levenshtein.o 15 15 libperf-y += llvm-utils.o 16 + libperf-y += mmap.o 16 17 libperf-y += memswap.o 17 18 libperf-y += parse-events.o 18 19 libperf-y += perf_regs.o
-248
tools/perf/util/evlist.c
··· 33 33 #include <linux/log2.h> 34 34 #include <linux/err.h> 35 35 36 - static void perf_mmap__munmap(struct perf_mmap *map); 37 - static void perf_mmap__put(struct perf_mmap *map); 38 - 39 36 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 40 37 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 41 38 ··· 701 704 return perf_evlist__set_paused(evlist, false); 702 705 } 703 706 704 - /* When check_messup is true, 'end' must points to a good entry */ 705 - static union perf_event * 706 - perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, 707 - u64 end, u64 *prev) 708 - { 709 - unsigned char *data = md->base + page_size; 710 - union perf_event *event = NULL; 711 - int diff = end - start; 712 - 713 - if (check_messup) { 714 - /* 715 - * If we're further behind than half the buffer, there's a chance 716 - * the writer will bite our tail and mess up the samples under us. 717 - * 718 - * If we somehow ended up ahead of the 'end', we got messed up. 719 - * 720 - * In either case, truncate and restart at 'end'. 721 - */ 722 - if (diff > md->mask / 2 || diff < 0) { 723 - fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 724 - 725 - /* 726 - * 'end' points to a known good entry, start there. 727 - */ 728 - start = end; 729 - diff = 0; 730 - } 731 - } 732 - 733 - if (diff >= (int)sizeof(event->header)) { 734 - size_t size; 735 - 736 - event = (union perf_event *)&data[start & md->mask]; 737 - size = event->header.size; 738 - 739 - if (size < sizeof(event->header) || diff < (int)size) { 740 - event = NULL; 741 - goto broken_event; 742 - } 743 - 744 - /* 745 - * Event straddles the mmap boundary -- header should always 746 - * be inside due to u64 alignment of output. 747 - */ 748 - if ((start & md->mask) + size != ((start + size) & md->mask)) { 749 - unsigned int offset = start; 750 - unsigned int len = min(sizeof(*event), size), cpy; 751 - void *dst = md->event_copy; 752 - 753 - do { 754 - cpy = min(md->mask + 1 - (offset & md->mask), len); 755 - memcpy(dst, &data[offset & md->mask], cpy); 756 - offset += cpy; 757 - dst += cpy; 758 - len -= cpy; 759 - } while (len); 760 - 761 - event = (union perf_event *) md->event_copy; 762 - } 763 - 764 - start += size; 765 - } 766 - 767 - broken_event: 768 - if (prev) 769 - *prev = start; 770 - 771 - return event; 772 - } 773 - 774 - union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup) 775 - { 776 - u64 head; 777 - u64 old = md->prev; 778 - 779 - /* 780 - * Check if event was unmapped due to a POLLHUP/POLLERR. 781 - */ 782 - if (!refcount_read(&md->refcnt)) 783 - return NULL; 784 - 785 - head = perf_mmap__read_head(md); 786 - 787 - return perf_mmap__read(md, check_messup, old, head, &md->prev); 788 - } 789 - 790 - union perf_event * 791 - perf_mmap__read_backward(struct perf_mmap *md) 792 - { 793 - u64 head, end; 794 - u64 start = md->prev; 795 - 796 - /* 797 - * Check if event was unmapped due to a POLLHUP/POLLERR. 798 - */ 799 - if (!refcount_read(&md->refcnt)) 800 - return NULL; 801 - 802 - head = perf_mmap__read_head(md); 803 - if (!head) 804 - return NULL; 805 - 806 - /* 807 - * 'head' pointer starts from 0. Kernel minus sizeof(record) form 808 - * it each time when kernel writes to it, so in fact 'head' is 809 - * negative. 'end' pointer is made manually by adding the size of 810 - * the ring buffer to 'head' pointer, means the validate data can 811 - * read is the whole ring buffer. If 'end' is positive, the ring 812 - * buffer has not fully filled, so we must adjust 'end' to 0. 813 - * 814 - * However, since both 'head' and 'end' is unsigned, we can't 815 - * simply compare 'end' against 0. Here we compare '-head' and 816 - * the size of the ring buffer, where -head is the number of bytes 817 - * kernel write to the ring buffer. 818 - */ 819 - if (-head < (u64)(md->mask + 1)) 820 - end = 0; 821 - else 822 - end = head + md->mask + 1; 823 - 824 - return perf_mmap__read(md, false, start, end, &md->prev); 825 - } 826 - 827 707 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) 828 708 { 829 709 struct perf_mmap *md = &evlist->mmap[idx]; ··· 731 857 return perf_evlist__mmap_read_forward(evlist, idx); 732 858 } 733 859 734 - void perf_mmap__read_catchup(struct perf_mmap *md) 735 - { 736 - u64 head; 737 - 738 - if (!refcount_read(&md->refcnt)) 739 - return; 740 - 741 - head = perf_mmap__read_head(md); 742 - md->prev = head; 743 - } 744 - 745 860 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) 746 861 { 747 862 perf_mmap__read_catchup(&evlist->mmap[idx]); 748 863 } 749 864 750 - static bool perf_mmap__empty(struct perf_mmap *md) 751 - { 752 - return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; 753 - } 754 - 755 - static void perf_mmap__get(struct perf_mmap *map) 756 - { 757 - refcount_inc(&map->refcnt); 758 - } 759 - 760 - static void perf_mmap__put(struct perf_mmap *md) 761 - { 762 - BUG_ON(md->base && refcount_read(&md->refcnt) == 0); 763 - 764 - if (refcount_dec_and_test(&md->refcnt)) 765 - perf_mmap__munmap(md); 766 - } 767 - 768 - void perf_mmap__consume(struct perf_mmap *md, bool overwrite) 769 - { 770 - if (!overwrite) { 771 - u64 old = md->prev; 772 - 773 - perf_mmap__write_tail(md, old); 774 - } 775 - 776 - if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md)) 777 - perf_mmap__put(md); 778 - } 779 - 780 865 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 781 866 { 782 867 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); 783 - } 784 - 785 - int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 786 - struct auxtrace_mmap_params *mp __maybe_unused, 787 - void *userpg __maybe_unused, 788 - int fd __maybe_unused) 789 - { 790 - return 0; 791 - } 792 - 793 - void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 794 - { 795 - } 796 - 797 - void __weak auxtrace_mmap_params__init( 798 - struct auxtrace_mmap_params *mp __maybe_unused, 799 - off_t auxtrace_offset __maybe_unused, 800 - unsigned int auxtrace_pages __maybe_unused, 801 - bool auxtrace_overwrite __maybe_unused) 802 - { 803 - } 804 - 805 - void __weak auxtrace_mmap_params__set_idx( 806 - struct auxtrace_mmap_params *mp __maybe_unused, 807 - struct perf_evlist *evlist __maybe_unused, 808 - int idx __maybe_unused, 809 - bool per_cpu __maybe_unused) 810 - { 811 - } 812 - 813 - static void perf_mmap__munmap(struct perf_mmap *map) 814 - { 815 - if (map->base != NULL) { 816 - munmap(map->base, perf_mmap__mmap_len(map)); 817 - map->base = NULL; 818 - map->fd = -1; 819 - refcount_set(&map->refcnt, 0); 820 - } 821 - auxtrace_mmap__munmap(&map->auxtrace_mmap); 822 868 } 823 869 824 870 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) ··· 787 993 refcount_set(&map[i].refcnt, 0); 788 994 } 789 995 return map; 790 - } 791 - 792 - struct mmap_params { 793 - int prot; 794 - int mask; 795 - struct auxtrace_mmap_params auxtrace_mp; 796 - }; 797 - 798 - static int perf_mmap__mmap(struct perf_mmap *map, 799 - struct mmap_params *mp, int fd) 800 - { 801 - /* 802 - * The last one will be done at perf_evlist__mmap_consume(), so that we 803 - * make sure we don't prevent tools from consuming every last event in 804 - * the ring buffer. 805 - * 806 - * I.e. we can get the POLLHUP meaning that the fd doesn't exist 807 - * anymore, but the last events for it are still in the ring buffer, 808 - * waiting to be consumed. 809 - * 810 - * Tools can chose to ignore this at their own discretion, but the 811 - * evlist layer can't just drop it when filtering events in 812 - * perf_evlist__filter_pollfd(). 813 - */ 814 - refcount_set(&map->refcnt, 2); 815 - map->prev = 0; 816 - map->mask = mp->mask; 817 - map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 818 - MAP_SHARED, fd, 0); 819 - if (map->base == MAP_FAILED) { 820 - pr_debug2("failed to mmap perf event ring buffer, error %d\n", 821 - errno); 822 - map->base = NULL; 823 - return -1; 824 - } 825 - map->fd = fd; 826 - 827 - if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 828 - &mp->auxtrace_mp, map->base, fd)) 829 - return -1; 830 - 831 - return 0; 832 996 } 833 997 834 998 static bool
+1 -75
tools/perf/util/evlist.h
··· 11 11 #include "../perf.h" 12 12 #include "event.h" 13 13 #include "evsel.h" 14 + #include "mmap.h" 14 15 #include "util.h" 15 - #include "auxtrace.h" 16 16 #include <signal.h> 17 17 #include <unistd.h> 18 18 ··· 23 23 24 24 #define PERF_EVLIST__HLIST_BITS 8 25 25 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 26 - 27 - /** 28 - * struct perf_mmap - perf's ring buffer mmap details 29 - * 30 - * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this 31 - */ 32 - struct perf_mmap { 33 - void *base; 34 - int mask; 35 - int fd; 36 - refcount_t refcnt; 37 - u64 prev; 38 - struct auxtrace_mmap auxtrace_mmap; 39 - char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 40 - }; 41 - 42 - static inline size_t 43 - perf_mmap__mmap_len(struct perf_mmap *map) 44 - { 45 - return map->mask + 1 + page_size; 46 - } 47 - 48 - /* 49 - * State machine of bkw_mmap_state: 50 - * 51 - * .________________(forbid)_____________. 52 - * | V 53 - * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY 54 - * ^ ^ | ^ | 55 - * | |__(forbid)____/ |___(forbid)___/| 56 - * | | 57 - * \_________________(3)_______________/ 58 - * 59 - * NOTREADY : Backward ring buffers are not ready 60 - * RUNNING : Backward ring buffers are recording 61 - * DATA_PENDING : We are required to collect data from backward ring buffers 62 - * EMPTY : We have collected data from backward ring buffers. 63 - * 64 - * (0): Setup backward ring buffer 65 - * (1): Pause ring buffers for reading 66 - * (2): Read from ring buffers 67 - * (3): Resume ring buffers for recording 68 - */ 69 - enum bkw_mmap_state { 70 - BKW_MMAP_NOTREADY, 71 - BKW_MMAP_RUNNING, 72 - BKW_MMAP_DATA_PENDING, 73 - BKW_MMAP_EMPTY, 74 - }; 75 26 76 27 struct perf_evlist { 77 28 struct list_head entries; ··· 128 177 129 178 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state); 130 179 131 - union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup); 132 - union perf_event *perf_mmap__read_backward(struct perf_mmap *map); 133 - 134 - void perf_mmap__read_catchup(struct perf_mmap *md); 135 - void perf_mmap__consume(struct perf_mmap *md, bool overwrite); 136 - 137 180 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx); 138 181 139 182 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, ··· 230 285 231 286 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size); 232 287 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size); 233 - 234 - static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 235 - { 236 - struct perf_event_mmap_page *pc = mm->base; 237 - u64 head = ACCESS_ONCE(pc->data_head); 238 - rmb(); 239 - return head; 240 - } 241 - 242 - static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) 243 - { 244 - struct perf_event_mmap_page *pc = md->base; 245 - 246 - /* 247 - * ensure all reads are done before we write the tail out. 248 - */ 249 - mb(); 250 - pc->data_tail = tail; 251 - } 252 288 253 289 bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str); 254 290 void perf_evlist__to_front(struct perf_evlist *evlist,
+252
tools/perf/util/mmap.c
··· 1 + /* 2 + * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 + * 4 + * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further 5 + * copyright notes. 6 + * 7 + * Released under the GPL v2. (and only v2, not any later version) 8 + */ 9 + 10 + #include <sys/mman.h> 11 + #include "event.h" 12 + #include "mmap.h" 13 + #include "util.h" /* page_size */ 14 + 15 + size_t perf_mmap__mmap_len(struct perf_mmap *map) 16 + { 17 + return map->mask + 1 + page_size; 18 + } 19 + 20 + /* When check_messup is true, 'end' must points to a good entry */ 21 + static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup, 22 + u64 start, u64 end, u64 *prev) 23 + { 24 + unsigned char *data = map->base + page_size; 25 + union perf_event *event = NULL; 26 + int diff = end - start; 27 + 28 + if (check_messup) { 29 + /* 30 + * If we're further behind than half the buffer, there's a chance 31 + * the writer will bite our tail and mess up the samples under us. 32 + * 33 + * If we somehow ended up ahead of the 'end', we got messed up. 34 + * 35 + * In either case, truncate and restart at 'end'. 36 + */ 37 + if (diff > map->mask / 2 || diff < 0) { 38 + fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 39 + 40 + /* 41 + * 'end' points to a known good entry, start there. 42 + */ 43 + start = end; 44 + diff = 0; 45 + } 46 + } 47 + 48 + if (diff >= (int)sizeof(event->header)) { 49 + size_t size; 50 + 51 + event = (union perf_event *)&data[start & map->mask]; 52 + size = event->header.size; 53 + 54 + if (size < sizeof(event->header) || diff < (int)size) { 55 + event = NULL; 56 + goto broken_event; 57 + } 58 + 59 + /* 60 + * Event straddles the mmap boundary -- header should always 61 + * be inside due to u64 alignment of output. 62 + */ 63 + if ((start & map->mask) + size != ((start + size) & map->mask)) { 64 + unsigned int offset = start; 65 + unsigned int len = min(sizeof(*event), size), cpy; 66 + void *dst = map->event_copy; 67 + 68 + do { 69 + cpy = min(map->mask + 1 - (offset & map->mask), len); 70 + memcpy(dst, &data[offset & map->mask], cpy); 71 + offset += cpy; 72 + dst += cpy; 73 + len -= cpy; 74 + } while (len); 75 + 76 + event = (union perf_event *)map->event_copy; 77 + } 78 + 79 + start += size; 80 + } 81 + 82 + broken_event: 83 + if (prev) 84 + *prev = start; 85 + 86 + return event; 87 + } 88 + 89 + union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup) 90 + { 91 + u64 head; 92 + u64 old = map->prev; 93 + 94 + /* 95 + * Check if event was unmapped due to a POLLHUP/POLLERR. 96 + */ 97 + if (!refcount_read(&map->refcnt)) 98 + return NULL; 99 + 100 + head = perf_mmap__read_head(map); 101 + 102 + return perf_mmap__read(map, check_messup, old, head, &map->prev); 103 + } 104 + 105 + union perf_event *perf_mmap__read_backward(struct perf_mmap *map) 106 + { 107 + u64 head, end; 108 + u64 start = map->prev; 109 + 110 + /* 111 + * Check if event was unmapped due to a POLLHUP/POLLERR. 112 + */ 113 + if (!refcount_read(&map->refcnt)) 114 + return NULL; 115 + 116 + head = perf_mmap__read_head(map); 117 + if (!head) 118 + return NULL; 119 + 120 + /* 121 + * 'head' pointer starts from 0. Kernel minus sizeof(record) form 122 + * it each time when kernel writes to it, so in fact 'head' is 123 + * negative. 'end' pointer is made manually by adding the size of 124 + * the ring buffer to 'head' pointer, means the validate data can 125 + * read is the whole ring buffer. If 'end' is positive, the ring 126 + * buffer has not fully filled, so we must adjust 'end' to 0. 127 + * 128 + * However, since both 'head' and 'end' is unsigned, we can't 129 + * simply compare 'end' against 0. Here we compare '-head' and 130 + * the size of the ring buffer, where -head is the number of bytes 131 + * kernel write to the ring buffer. 132 + */ 133 + if (-head < (u64)(map->mask + 1)) 134 + end = 0; 135 + else 136 + end = head + map->mask + 1; 137 + 138 + return perf_mmap__read(map, false, start, end, &map->prev); 139 + } 140 + 141 + void perf_mmap__read_catchup(struct perf_mmap *map) 142 + { 143 + u64 head; 144 + 145 + if (!refcount_read(&map->refcnt)) 146 + return; 147 + 148 + head = perf_mmap__read_head(map); 149 + map->prev = head; 150 + } 151 + 152 + static bool perf_mmap__empty(struct perf_mmap *map) 153 + { 154 + return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; 155 + } 156 + 157 + void perf_mmap__get(struct perf_mmap *map) 158 + { 159 + refcount_inc(&map->refcnt); 160 + } 161 + 162 + void perf_mmap__put(struct perf_mmap *map) 163 + { 164 + BUG_ON(map->base && refcount_read(&map->refcnt) == 0); 165 + 166 + if (refcount_dec_and_test(&map->refcnt)) 167 + perf_mmap__munmap(map); 168 + } 169 + 170 + void perf_mmap__consume(struct perf_mmap *map, bool overwrite) 171 + { 172 + if (!overwrite) { 173 + u64 old = map->prev; 174 + 175 + perf_mmap__write_tail(map, old); 176 + } 177 + 178 + if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) 179 + perf_mmap__put(map); 180 + } 181 + 182 + int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 183 + struct auxtrace_mmap_params *mp __maybe_unused, 184 + void *userpg __maybe_unused, 185 + int fd __maybe_unused) 186 + { 187 + return 0; 188 + } 189 + 190 + void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 191 + { 192 + } 193 + 194 + void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, 195 + off_t auxtrace_offset __maybe_unused, 196 + unsigned int auxtrace_pages __maybe_unused, 197 + bool auxtrace_overwrite __maybe_unused) 198 + { 199 + } 200 + 201 + void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, 202 + struct perf_evlist *evlist __maybe_unused, 203 + int idx __maybe_unused, 204 + bool per_cpu __maybe_unused) 205 + { 206 + } 207 + 208 + void perf_mmap__munmap(struct perf_mmap *map) 209 + { 210 + if (map->base != NULL) { 211 + munmap(map->base, perf_mmap__mmap_len(map)); 212 + map->base = NULL; 213 + map->fd = -1; 214 + refcount_set(&map->refcnt, 0); 215 + } 216 + auxtrace_mmap__munmap(&map->auxtrace_mmap); 217 + } 218 + 219 + int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) 220 + { 221 + /* 222 + * The last one will be done at perf_evlist__mmap_consume(), so that we 223 + * make sure we don't prevent tools from consuming every last event in 224 + * the ring buffer. 225 + * 226 + * I.e. we can get the POLLHUP meaning that the fd doesn't exist 227 + * anymore, but the last events for it are still in the ring buffer, 228 + * waiting to be consumed. 229 + * 230 + * Tools can chose to ignore this at their own discretion, but the 231 + * evlist layer can't just drop it when filtering events in 232 + * perf_evlist__filter_pollfd(). 233 + */ 234 + refcount_set(&map->refcnt, 2); 235 + map->prev = 0; 236 + map->mask = mp->mask; 237 + map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 238 + MAP_SHARED, fd, 0); 239 + if (map->base == MAP_FAILED) { 240 + pr_debug2("failed to mmap perf event ring buffer, error %d\n", 241 + errno); 242 + map->base = NULL; 243 + return -1; 244 + } 245 + map->fd = fd; 246 + 247 + if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 248 + &mp->auxtrace_mp, map->base, fd)) 249 + return -1; 250 + 251 + return 0; 252 + }
+94
tools/perf/util/mmap.h
··· 1 + #ifndef __PERF_MMAP_H 2 + #define __PERF_MMAP_H 1 3 + 4 + #include <linux/compiler.h> 5 + #include <linux/refcount.h> 6 + #include <linux/types.h> 7 + #include <asm/barrier.h> 8 + #include <stdbool.h> 9 + #include "auxtrace.h" 10 + #include "event.h" 11 + 12 + /** 13 + * struct perf_mmap - perf's ring buffer mmap details 14 + * 15 + * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this 16 + */ 17 + struct perf_mmap { 18 + void *base; 19 + int mask; 20 + int fd; 21 + refcount_t refcnt; 22 + u64 prev; 23 + struct auxtrace_mmap auxtrace_mmap; 24 + char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 25 + }; 26 + 27 + /* 28 + * State machine of bkw_mmap_state: 29 + * 30 + * .________________(forbid)_____________. 31 + * | V 32 + * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY 33 + * ^ ^ | ^ | 34 + * | |__(forbid)____/ |___(forbid)___/| 35 + * | | 36 + * \_________________(3)_______________/ 37 + * 38 + * NOTREADY : Backward ring buffers are not ready 39 + * RUNNING : Backward ring buffers are recording 40 + * DATA_PENDING : We are required to collect data from backward ring buffers 41 + * EMPTY : We have collected data from backward ring buffers. 42 + * 43 + * (0): Setup backward ring buffer 44 + * (1): Pause ring buffers for reading 45 + * (2): Read from ring buffers 46 + * (3): Resume ring buffers for recording 47 + */ 48 + enum bkw_mmap_state { 49 + BKW_MMAP_NOTREADY, 50 + BKW_MMAP_RUNNING, 51 + BKW_MMAP_DATA_PENDING, 52 + BKW_MMAP_EMPTY, 53 + }; 54 + 55 + struct mmap_params { 56 + int prot, mask; 57 + struct auxtrace_mmap_params auxtrace_mp; 58 + }; 59 + 60 + int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd); 61 + void perf_mmap__munmap(struct perf_mmap *map); 62 + 63 + void perf_mmap__get(struct perf_mmap *map); 64 + void perf_mmap__put(struct perf_mmap *map); 65 + 66 + void perf_mmap__consume(struct perf_mmap *map, bool overwrite); 67 + 68 + void perf_mmap__read_catchup(struct perf_mmap *md); 69 + 70 + static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 71 + { 72 + struct perf_event_mmap_page *pc = mm->base; 73 + u64 head = ACCESS_ONCE(pc->data_head); 74 + rmb(); 75 + return head; 76 + } 77 + 78 + static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) 79 + { 80 + struct perf_event_mmap_page *pc = md->base; 81 + 82 + /* 83 + * ensure all reads are done before we write the tail out. 84 + */ 85 + mb(); 86 + pc->data_tail = tail; 87 + } 88 + 89 + union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup); 90 + union perf_event *perf_mmap__read_backward(struct perf_mmap *map); 91 + 92 + size_t perf_mmap__mmap_len(struct perf_mmap *map); 93 + 94 + #endif /*__PERF_MMAP_H */
+1
tools/perf/util/python-ext-sources
··· 10 10 util/evlist.c 11 11 util/evsel.c 12 12 util/cpumap.c 13 + util/mmap.c 13 14 util/namespaces.c 14 15 ../lib/bitmap.c 15 16 ../lib/find_bit.c