Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libperf: Add perf_evlist__poll() function

Move perf_evlist__poll() from tools/perf to libperf, it will be used in
the following patches.

And rename the existing perf's function to evlist__poll().

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lore.kernel.org/lkml/20190913132355.21634-39-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Jiri Olsa and committed by
Arnaldo Carvalho de Melo
80ab2987 f4009e7b

+19 -12
+1 -1
tools/perf/builtin-record.c
··· 1615 1615 if (hits == rec->samples) { 1616 1616 if (done || draining) 1617 1617 break; 1618 - err = perf_evlist__poll(rec->evlist, -1); 1618 + err = evlist__poll(rec->evlist, -1); 1619 1619 /* 1620 1620 * Propagate error, only if there's any. Ignore positive 1621 1621 * number of returned events and interrupt error.
+2 -2
tools/perf/builtin-top.c
··· 1307 1307 } 1308 1308 1309 1309 /* Wait for a minimal set of events before starting the snapshot */ 1310 - perf_evlist__poll(top->evlist, 100); 1310 + evlist__poll(top->evlist, 100); 1311 1311 1312 1312 perf_top__mmap_read(top); 1313 1313 ··· 1317 1317 perf_top__mmap_read(top); 1318 1318 1319 1319 if (opts->overwrite || (hits == top->samples)) 1320 - ret = perf_evlist__poll(top->evlist, 100); 1320 + ret = evlist__poll(top->evlist, 100); 1321 1321 1322 1322 if (resize) { 1323 1323 perf_top__resize(top);
+1 -1
tools/perf/builtin-trace.c
··· 3474 3474 if (trace->nr_events == before) { 3475 3475 int timeout = done ? 100 : -1; 3476 3476 3477 - if (!draining && perf_evlist__poll(evlist, timeout) > 0) { 3477 + if (!draining && evlist__poll(evlist, timeout) > 0) { 3478 3478 if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 3479 3479 draining = true; 3480 3480
+5
tools/perf/lib/evlist.c
··· 276 276 277 277 return pos; 278 278 } 279 + 280 + int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 281 + { 282 + return fdarray__poll(&evlist->pollfd, timeout); 283 + }
+1
tools/perf/lib/include/perf/evlist.h
··· 31 31 LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist, 32 32 struct perf_cpu_map *cpus, 33 33 struct perf_thread_map *threads); 34 + LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout); 34 35 35 36 #endif /* __LIBPERF_EVLIST_H */
+1
tools/perf/lib/libperf.map
··· 39 39 perf_evlist__remove; 40 40 perf_evlist__next; 41 41 perf_evlist__set_maps; 42 + perf_evlist__poll; 42 43 local: 43 44 *; 44 45 };
+1 -1
tools/perf/tests/openat-syscall-tp-fields.c
··· 127 127 } 128 128 129 129 if (nr_events == before) 130 - perf_evlist__poll(evlist, 10); 130 + evlist__poll(evlist, 10); 131 131 132 132 if (++nr_polls > 5) { 133 133 pr_debug("%s: no events!\n", __func__);
+1 -1
tools/perf/tests/perf-record.c
··· 287 287 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. 288 288 */ 289 289 if (total_events == before && false) 290 - perf_evlist__poll(evlist, -1); 290 + evlist__poll(evlist, -1); 291 291 292 292 sleep(1); 293 293 if (++wakeups > 5) {
+1 -1
tools/perf/tests/task-exit.c
··· 130 130 131 131 out_init: 132 132 if (!exited || !nr_exit) { 133 - perf_evlist__poll(evlist, -1); 133 + evlist__poll(evlist, -1); 134 134 goto retry; 135 135 } 136 136
+3 -3
tools/perf/util/evlist.c
··· 418 418 perf_evlist__munmap_filtered, NULL); 419 419 } 420 420 421 - int perf_evlist__poll(struct evlist *evlist, int timeout) 421 + int evlist__poll(struct evlist *evlist, int timeout) 422 422 { 423 - return fdarray__poll(&evlist->core.pollfd, timeout); 423 + return perf_evlist__poll(&evlist->core, timeout); 424 424 } 425 425 426 426 static void perf_evlist__set_sid_idx(struct evlist *evlist, ··· 1736 1736 draining = true; 1737 1737 1738 1738 if (!draining) 1739 - perf_evlist__poll(evlist, 1000); 1739 + evlist__poll(evlist, 1000); 1740 1740 1741 1741 for (i = 0; i < evlist->core.nr_mmaps; i++) { 1742 1742 struct mmap *map = &evlist->mmap[i];
+1 -1
tools/perf/util/evlist.h
··· 144 144 int evlist__add_pollfd(struct evlist *evlist, int fd); 145 145 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask); 146 146 147 - int perf_evlist__poll(struct evlist *evlist, int timeout); 147 + int evlist__poll(struct evlist *evlist, int timeout); 148 148 149 149 struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id); 150 150 struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
+1 -1
tools/perf/util/python.c
··· 918 918 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 919 919 return NULL; 920 920 921 - n = perf_evlist__poll(evlist, timeout); 921 + n = evlist__poll(evlist, timeout); 922 922 if (n < 0) { 923 923 PyErr_SetFromErrno(PyExc_OSError); 924 924 return NULL;