libperf evsel: Make use of FD robust.

FD uses xyarray__entry that may return NULL if an index is out of
bounds. If NULL is returned then a segv happens as FD unconditionally
dereferences the pointer. This was happening in a case of with perf
iostat as shown below. The fix is to make FD an "int*" rather than an
int and handle the NULL case as either invalid input or a closed fd.

$ sudo gdb --args perf stat --iostat list
...
Breakpoint 1, perf_evsel__alloc_fd (evsel=0x5555560951a0, ncpus=1, nthreads=1) at evsel.c:50
50 {
(gdb) bt
#0 perf_evsel__alloc_fd (evsel=0x5555560951a0, ncpus=1, nthreads=1) at evsel.c:50
#1 0x000055555585c188 in evsel__open_cpu (evsel=0x5555560951a0, cpus=0x555556093410,
threads=0x555556086fb0, start_cpu=0, end_cpu=1) at util/evsel.c:1792
#2 0x000055555585cfb2 in evsel__open (evsel=0x5555560951a0, cpus=0x0, threads=0x555556086fb0)
at util/evsel.c:2045
#3 0x000055555585d0db in evsel__open_per_thread (evsel=0x5555560951a0, threads=0x555556086fb0)
at util/evsel.c:2065
#4 0x00005555558ece64 in create_perf_stat_counter (evsel=0x5555560951a0,
config=0x555555c34700 <stat_config>, target=0x555555c2f1c0 <target>, cpu=0) at util/stat.c:590
#5 0x000055555578e927 in __run_perf_stat (argc=1, argv=0x7fffffffe4a0, run_idx=0)
at builtin-stat.c:833
#6 0x000055555578f3c6 in run_perf_stat (argc=1, argv=0x7fffffffe4a0, run_idx=0)
at builtin-stat.c:1048
#7 0x0000555555792ee5 in cmd_stat (argc=1, argv=0x7fffffffe4a0) at builtin-stat.c:2534
#8 0x0000555555835ed3 in run_builtin (p=0x555555c3f540 <commands+288>, argc=3,
argv=0x7fffffffe4a0) at perf.c:313
#9 0x0000555555836154 in handle_internal_command (argc=3, argv=0x7fffffffe4a0) at perf.c:365
#10 0x000055555583629f in run_argv (argcp=0x7fffffffe2ec, argv=0x7fffffffe2e0) at perf.c:409
#11 0x0000555555836692 in main (argc=3, argv=0x7fffffffe4a0) at perf.c:539
...
(gdb) c
Continuing.
Error:
The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (uncore_iio_0/event=0x83,umask=0x04,ch_mask=0xF,fc_mask=0x07/).
/bin/dmesg | grep -i perf may provide additional information.

Program received signal SIGSEGV, Segmentation fault.
0x00005555559b03ea in perf_evsel__close_fd_cpu (evsel=0x5555560951a0, cpu=1) at evsel.c:166
166 if (FD(evsel, cpu, thread) >= 0)

v3. fixes a bug in perf_evsel__run_ioctl where the sense of a branch was
backward.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lore.kernel.org/lkml/20210918054440.2350466-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by Ian Rogers and committed by Arnaldo Carvalho de Melo aba5daeb 57f0ff05

Changed files
+41 -23
tools
lib
perf
+41 -23
tools/lib/perf/evsel.c
··· 43 43 free(evsel); 44 44 } 45 45 46 - #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y)) 46 + #define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y)) 47 47 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL) 48 48 49 49 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) ··· 54 54 int cpu, thread; 55 55 for (cpu = 0; cpu < ncpus; cpu++) { 56 56 for (thread = 0; thread < nthreads; thread++) { 57 - FD(evsel, cpu, thread) = -1; 57 + int *fd = FD(evsel, cpu, thread); 58 + 59 + if (fd) 60 + *fd = -1; 58 61 } 59 62 } 60 63 } ··· 83 80 static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd) 84 81 { 85 82 struct perf_evsel *leader = evsel->leader; 86 - int fd; 83 + int *fd; 87 84 88 85 if (evsel == leader) { 89 86 *group_fd = -1; ··· 98 95 return -ENOTCONN; 99 96 100 97 fd = FD(leader, cpu, thread); 101 - if (fd == -1) 98 + if (fd == NULL || *fd == -1) 102 99 return -EBADF; 103 100 104 - *group_fd = fd; 101 + *group_fd = *fd; 105 102 106 103 return 0; 107 104 } ··· 141 138 142 139 for (cpu = 0; cpu < cpus->nr; cpu++) { 143 140 for (thread = 0; thread < threads->nr; thread++) { 144 - int fd, group_fd; 141 + int fd, group_fd, *evsel_fd; 142 + 143 + evsel_fd = FD(evsel, cpu, thread); 144 + if (evsel_fd == NULL) 145 + return -EINVAL; 145 146 146 147 err = get_group_fd(evsel, cpu, thread, &group_fd); 147 148 if (err < 0) ··· 158 151 if (fd < 0) 159 152 return -errno; 160 153 161 - FD(evsel, cpu, thread) = fd; 154 + *evsel_fd = fd; 162 155 } 163 156 } 164 157 ··· 170 163 int thread; 171 164 172 165 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { 173 - if (FD(evsel, cpu, thread) >= 0) 174 - close(FD(evsel, cpu, thread)); 175 - FD(evsel, cpu, thread) = -1; 166 + int *fd = FD(evsel, cpu, thread); 167 + 168 + if (fd && *fd >= 0) { 169 + close(*fd); 170 + *fd = -1; 171 + } 176 172 } 177 173 } 178 174 ··· 219 209 220 210 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 221 211 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 222 - int fd = FD(evsel, cpu, thread); 223 - struct perf_mmap *map = MMAP(evsel, cpu, thread); 212 + int *fd = FD(evsel, cpu, thread); 224 213 225 - if (fd < 0) 214 + if (fd == NULL || *fd < 0) 226 215 continue; 227 216 228 - perf_mmap__munmap(map); 217 + perf_mmap__munmap(MMAP(evsel, cpu, thread)); 229 218 } 230 219 } 231 220 ··· 248 239 249 240 for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) { 250 241 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 251 - int fd = FD(evsel, cpu, thread); 252 - struct perf_mmap *map = MMAP(evsel, cpu, thread); 242 + int *fd = FD(evsel, cpu, thread); 243 + struct perf_mmap *map; 253 244 254 - if (fd < 0) 245 + if (fd == NULL || *fd < 0) 255 246 continue; 256 247 248 + map = MMAP(evsel, cpu, thread); 257 249 perf_mmap__init(map, NULL, false, NULL); 258 250 259 - ret = perf_mmap__mmap(map, &mp, fd, cpu); 251 + ret = perf_mmap__mmap(map, &mp, *fd, cpu); 260 252 if (ret) { 261 253 perf_evsel__munmap(evsel); 262 254 return ret; ··· 270 260 271 261 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread) 272 262 { 273 - if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL) 263 + int *fd = FD(evsel, cpu, thread); 264 + 265 + if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL) 274 266 return NULL; 275 267 276 268 return MMAP(evsel, cpu, thread)->base; ··· 307 295 struct perf_counts_values *count) 308 296 { 309 297 size_t size = perf_evsel__read_size(evsel); 298 + int *fd = FD(evsel, cpu, thread); 310 299 311 300 memset(count, 0, sizeof(*count)); 312 301 313 - if (FD(evsel, cpu, thread) < 0) 302 + if (fd == NULL || *fd < 0) 314 303 return -EINVAL; 315 304 316 305 if (MMAP(evsel, cpu, thread) && 317 306 !perf_mmap__read_self(MMAP(evsel, cpu, thread), count)) 318 307 return 0; 319 308 320 - if (readn(FD(evsel, cpu, thread), count->values, size) <= 0) 309 + if (readn(*fd, count->values, size) <= 0) 321 310 return -errno; 322 311 323 312 return 0; ··· 331 318 int thread; 332 319 333 320 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { 334 - int fd = FD(evsel, cpu, thread), 335 - err = ioctl(fd, ioc, arg); 321 + int err; 322 + int *fd = FD(evsel, cpu, thread); 323 + 324 + if (fd == NULL || *fd < 0) 325 + return -1; 326 + 327 + err = ioctl(*fd, ioc, arg); 336 328 337 329 if (err) 338 330 return err;