at v4.16 35 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * This file contains the procedures for the handling of select and poll 4 * 5 * Created for Linux based loosely upon Mathius Lattner's minix 6 * patches by Peter MacDonald. Heavily edited by Linus. 7 * 8 * 4 February 1994 9 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS 10 * flag set in its personality we do *not* modify the given timeout 11 * parameter to reflect time remaining. 12 * 13 * 24 January 2000 14 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation 15 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). 16 */ 17 18#include <linux/kernel.h> 19#include <linux/sched/signal.h> 20#include <linux/sched/rt.h> 21#include <linux/syscalls.h> 22#include <linux/export.h> 23#include <linux/slab.h> 24#include <linux/poll.h> 25#include <linux/personality.h> /* for STICKY_TIMEOUTS */ 26#include <linux/file.h> 27#include <linux/fdtable.h> 28#include <linux/fs.h> 29#include <linux/rcupdate.h> 30#include <linux/hrtimer.h> 31#include <linux/freezer.h> 32#include <net/busy_poll.h> 33#include <linux/vmalloc.h> 34 35#include <linux/uaccess.h> 36 37 38/* 39 * Estimate expected accuracy in ns from a timeval. 40 * 41 * After quite a bit of churning around, we've settled on 42 * a simple thing of taking 0.1% of the timeout as the 43 * slack, with a cap of 100 msec. 44 * "nice" tasks get a 0.5% slack instead. 45 * 46 * Consider this comment an open invitation to come up with even 47 * better solutions.. 48 */ 49 50#define MAX_SLACK (100 * NSEC_PER_MSEC) 51 52static long __estimate_accuracy(struct timespec64 *tv) 53{ 54 long slack; 55 int divfactor = 1000; 56 57 if (tv->tv_sec < 0) 58 return 0; 59 60 if (task_nice(current) > 0) 61 divfactor = divfactor / 5; 62 63 if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) 64 return MAX_SLACK; 65 66 slack = tv->tv_nsec / divfactor; 67 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); 68 69 if (slack > MAX_SLACK) 70 return MAX_SLACK; 71 72 return slack; 73} 74 75u64 select_estimate_accuracy(struct timespec64 *tv) 76{ 77 u64 ret; 78 struct timespec64 now; 79 80 /* 81 * Realtime tasks get a slack of 0 for obvious reasons. 82 */ 83 84 if (rt_task(current)) 85 return 0; 86 87 ktime_get_ts64(&now); 88 now = timespec64_sub(*tv, now); 89 ret = __estimate_accuracy(&now); 90 if (ret < current->timer_slack_ns) 91 return current->timer_slack_ns; 92 return ret; 93} 94 95 96 97struct poll_table_page { 98 struct poll_table_page * next; 99 struct poll_table_entry * entry; 100 struct poll_table_entry entries[0]; 101}; 102 103#define POLL_TABLE_FULL(table) \ 104 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table)) 105 106/* 107 * Ok, Peter made a complicated, but straightforward multiple_wait() function. 108 * I have rewritten this, taking some shortcuts: This code may not be easy to 109 * follow, but it should be free of race-conditions, and it's practical. If you 110 * understand what I'm doing here, then you understand how the linux 111 * sleep/wakeup mechanism works. 112 * 113 * Two very simple procedures, poll_wait() and poll_freewait() make all the 114 * work. poll_wait() is an inline-function defined in <linux/poll.h>, 115 * as all select/poll functions have to call it to add an entry to the 116 * poll table. 117 */ 118static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 119 poll_table *p); 120 121void poll_initwait(struct poll_wqueues *pwq) 122{ 123 init_poll_funcptr(&pwq->pt, __pollwait); 124 pwq->polling_task = current; 125 pwq->triggered = 0; 126 pwq->error = 0; 127 pwq->table = NULL; 128 pwq->inline_index = 0; 129} 130EXPORT_SYMBOL(poll_initwait); 131 132static void free_poll_entry(struct poll_table_entry *entry) 133{ 134 remove_wait_queue(entry->wait_address, &entry->wait); 135 fput(entry->filp); 136} 137 138void poll_freewait(struct poll_wqueues *pwq) 139{ 140 struct poll_table_page * p = pwq->table; 141 int i; 142 for (i = 0; i < pwq->inline_index; i++) 143 free_poll_entry(pwq->inline_entries + i); 144 while (p) { 145 struct poll_table_entry * entry; 146 struct poll_table_page *old; 147 148 entry = p->entry; 149 do { 150 entry--; 151 free_poll_entry(entry); 152 } while (entry > p->entries); 153 old = p; 154 p = p->next; 155 free_page((unsigned long) old); 156 } 157} 158EXPORT_SYMBOL(poll_freewait); 159 160static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p) 161{ 162 struct poll_table_page *table = p->table; 163 164 if (p->inline_index < N_INLINE_POLL_ENTRIES) 165 return p->inline_entries + p->inline_index++; 166 167 if (!table || POLL_TABLE_FULL(table)) { 168 struct poll_table_page *new_table; 169 170 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL); 171 if (!new_table) { 172 p->error = -ENOMEM; 173 return NULL; 174 } 175 new_table->entry = new_table->entries; 176 new_table->next = table; 177 p->table = new_table; 178 table = new_table; 179 } 180 181 return table->entry++; 182} 183 184static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 185{ 186 struct poll_wqueues *pwq = wait->private; 187 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); 188 189 /* 190 * Although this function is called under waitqueue lock, LOCK 191 * doesn't imply write barrier and the users expect write 192 * barrier semantics on wakeup functions. The following 193 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up() 194 * and is paired with smp_store_mb() in poll_schedule_timeout. 195 */ 196 smp_wmb(); 197 pwq->triggered = 1; 198 199 /* 200 * Perform the default wake up operation using a dummy 201 * waitqueue. 202 * 203 * TODO: This is hacky but there currently is no interface to 204 * pass in @sync. @sync is scheduled to be removed and once 205 * that happens, wake_up_process() can be used directly. 206 */ 207 return default_wake_function(&dummy_wait, mode, sync, key); 208} 209 210static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 211{ 212 struct poll_table_entry *entry; 213 214 entry = container_of(wait, struct poll_table_entry, wait); 215 if (key && !(key_to_poll(key) & entry->key)) 216 return 0; 217 return __pollwake(wait, mode, sync, key); 218} 219 220/* Add a new entry */ 221static void __pollwait(struct file *filp, wait_queue_head_t *wait_address, 222 poll_table *p) 223{ 224 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt); 225 struct poll_table_entry *entry = poll_get_entry(pwq); 226 if (!entry) 227 return; 228 entry->filp = get_file(filp); 229 entry->wait_address = wait_address; 230 entry->key = p->_key; 231 init_waitqueue_func_entry(&entry->wait, pollwake); 232 entry->wait.private = pwq; 233 add_wait_queue(wait_address, &entry->wait); 234} 235 236int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 237 ktime_t *expires, unsigned long slack) 238{ 239 int rc = -EINTR; 240 241 set_current_state(state); 242 if (!pwq->triggered) 243 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS); 244 __set_current_state(TASK_RUNNING); 245 246 /* 247 * Prepare for the next iteration. 248 * 249 * The following smp_store_mb() serves two purposes. First, it's 250 * the counterpart rmb of the wmb in pollwake() such that data 251 * written before wake up is always visible after wake up. 252 * Second, the full barrier guarantees that triggered clearing 253 * doesn't pass event check of the next iteration. Note that 254 * this problem doesn't exist for the first iteration as 255 * add_wait_queue() has full barrier semantics. 256 */ 257 smp_store_mb(pwq->triggered, 0); 258 259 return rc; 260} 261EXPORT_SYMBOL(poll_schedule_timeout); 262 263/** 264 * poll_select_set_timeout - helper function to setup the timeout value 265 * @to: pointer to timespec64 variable for the final timeout 266 * @sec: seconds (from user space) 267 * @nsec: nanoseconds (from user space) 268 * 269 * Note, we do not use a timespec for the user space value here, That 270 * way we can use the function for timeval and compat interfaces as well. 271 * 272 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. 273 */ 274int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) 275{ 276 struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; 277 278 if (!timespec64_valid(&ts)) 279 return -EINVAL; 280 281 /* Optimize for the zero timeout value here */ 282 if (!sec && !nsec) { 283 to->tv_sec = to->tv_nsec = 0; 284 } else { 285 ktime_get_ts64(to); 286 *to = timespec64_add_safe(*to, ts); 287 } 288 return 0; 289} 290 291static int poll_select_copy_remaining(struct timespec64 *end_time, 292 void __user *p, 293 int timeval, int ret) 294{ 295 struct timespec64 rts; 296 struct timeval rtv; 297 298 if (!p) 299 return ret; 300 301 if (current->personality & STICKY_TIMEOUTS) 302 goto sticky; 303 304 /* No update for zero timeout */ 305 if (!end_time->tv_sec && !end_time->tv_nsec) 306 return ret; 307 308 ktime_get_ts64(&rts); 309 rts = timespec64_sub(*end_time, rts); 310 if (rts.tv_sec < 0) 311 rts.tv_sec = rts.tv_nsec = 0; 312 313 314 if (timeval) { 315 if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) 316 memset(&rtv, 0, sizeof(rtv)); 317 rtv.tv_sec = rts.tv_sec; 318 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; 319 320 if (!copy_to_user(p, &rtv, sizeof(rtv))) 321 return ret; 322 323 } else if (!put_timespec64(&rts, p)) 324 return ret; 325 326 /* 327 * If an application puts its timeval in read-only memory, we 328 * don't want the Linux-specific update to the timeval to 329 * cause a fault after the select has completed 330 * successfully. However, because we're not updating the 331 * timeval, we can't restart the system call. 332 */ 333 334sticky: 335 if (ret == -ERESTARTNOHAND) 336 ret = -EINTR; 337 return ret; 338} 339 340/* 341 * Scalable version of the fd_set. 342 */ 343 344typedef struct { 345 unsigned long *in, *out, *ex; 346 unsigned long *res_in, *res_out, *res_ex; 347} fd_set_bits; 348 349/* 350 * How many longwords for "nr" bits? 351 */ 352#define FDS_BITPERLONG (8*sizeof(long)) 353#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 354#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 355 356/* 357 * We do a VERIFY_WRITE here even though we are only reading this time: 358 * we'll write to it eventually.. 359 * 360 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 361 */ 362static inline 363int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 364{ 365 nr = FDS_BYTES(nr); 366 if (ufdset) 367 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 368 369 memset(fdset, 0, nr); 370 return 0; 371} 372 373static inline unsigned long __must_check 374set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 375{ 376 if (ufdset) 377 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 378 return 0; 379} 380 381static inline 382void zero_fd_set(unsigned long nr, unsigned long *fdset) 383{ 384 memset(fdset, 0, FDS_BYTES(nr)); 385} 386 387#define FDS_IN(fds, n) (fds->in + n) 388#define FDS_OUT(fds, n) (fds->out + n) 389#define FDS_EX(fds, n) (fds->ex + n) 390 391#define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n)) 392 393static int max_select_fd(unsigned long n, fd_set_bits *fds) 394{ 395 unsigned long *open_fds; 396 unsigned long set; 397 int max; 398 struct fdtable *fdt; 399 400 /* handle last in-complete long-word first */ 401 set = ~(~0UL << (n & (BITS_PER_LONG-1))); 402 n /= BITS_PER_LONG; 403 fdt = files_fdtable(current->files); 404 open_fds = fdt->open_fds + n; 405 max = 0; 406 if (set) { 407 set &= BITS(fds, n); 408 if (set) { 409 if (!(set & ~*open_fds)) 410 goto get_max; 411 return -EBADF; 412 } 413 } 414 while (n) { 415 open_fds--; 416 n--; 417 set = BITS(fds, n); 418 if (!set) 419 continue; 420 if (set & ~*open_fds) 421 return -EBADF; 422 if (max) 423 continue; 424get_max: 425 do { 426 max++; 427 set >>= 1; 428 } while (set); 429 max += n * BITS_PER_LONG; 430 } 431 432 return max; 433} 434 435#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR) 436#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR) 437#define POLLEX_SET (EPOLLPRI) 438 439static inline void wait_key_set(poll_table *wait, unsigned long in, 440 unsigned long out, unsigned long bit, 441 __poll_t ll_flag) 442{ 443 wait->_key = POLLEX_SET | ll_flag; 444 if (in & bit) 445 wait->_key |= POLLIN_SET; 446 if (out & bit) 447 wait->_key |= POLLOUT_SET; 448} 449 450static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) 451{ 452 ktime_t expire, *to = NULL; 453 struct poll_wqueues table; 454 poll_table *wait; 455 int retval, i, timed_out = 0; 456 u64 slack = 0; 457 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 458 unsigned long busy_start = 0; 459 460 rcu_read_lock(); 461 retval = max_select_fd(n, fds); 462 rcu_read_unlock(); 463 464 if (retval < 0) 465 return retval; 466 n = retval; 467 468 poll_initwait(&table); 469 wait = &table.pt; 470 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 471 wait->_qproc = NULL; 472 timed_out = 1; 473 } 474 475 if (end_time && !timed_out) 476 slack = select_estimate_accuracy(end_time); 477 478 retval = 0; 479 for (;;) { 480 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; 481 bool can_busy_loop = false; 482 483 inp = fds->in; outp = fds->out; exp = fds->ex; 484 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; 485 486 for (i = 0; i < n; ++rinp, ++routp, ++rexp) { 487 unsigned long in, out, ex, all_bits, bit = 1, j; 488 unsigned long res_in = 0, res_out = 0, res_ex = 0; 489 __poll_t mask; 490 491 in = *inp++; out = *outp++; ex = *exp++; 492 all_bits = in | out | ex; 493 if (all_bits == 0) { 494 i += BITS_PER_LONG; 495 continue; 496 } 497 498 for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) { 499 struct fd f; 500 if (i >= n) 501 break; 502 if (!(bit & all_bits)) 503 continue; 504 f = fdget(i); 505 if (f.file) { 506 const struct file_operations *f_op; 507 f_op = f.file->f_op; 508 mask = DEFAULT_POLLMASK; 509 if (f_op->poll) { 510 wait_key_set(wait, in, out, 511 bit, busy_flag); 512 mask = (*f_op->poll)(f.file, wait); 513 } 514 fdput(f); 515 if ((mask & POLLIN_SET) && (in & bit)) { 516 res_in |= bit; 517 retval++; 518 wait->_qproc = NULL; 519 } 520 if ((mask & POLLOUT_SET) && (out & bit)) { 521 res_out |= bit; 522 retval++; 523 wait->_qproc = NULL; 524 } 525 if ((mask & POLLEX_SET) && (ex & bit)) { 526 res_ex |= bit; 527 retval++; 528 wait->_qproc = NULL; 529 } 530 /* got something, stop busy polling */ 531 if (retval) { 532 can_busy_loop = false; 533 busy_flag = 0; 534 535 /* 536 * only remember a returned 537 * POLL_BUSY_LOOP if we asked for it 538 */ 539 } else if (busy_flag & mask) 540 can_busy_loop = true; 541 542 } 543 } 544 if (res_in) 545 *rinp = res_in; 546 if (res_out) 547 *routp = res_out; 548 if (res_ex) 549 *rexp = res_ex; 550 cond_resched(); 551 } 552 wait->_qproc = NULL; 553 if (retval || timed_out || signal_pending(current)) 554 break; 555 if (table.error) { 556 retval = table.error; 557 break; 558 } 559 560 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 561 if (can_busy_loop && !need_resched()) { 562 if (!busy_start) { 563 busy_start = busy_loop_current_time(); 564 continue; 565 } 566 if (!busy_loop_timeout(busy_start)) 567 continue; 568 } 569 busy_flag = 0; 570 571 /* 572 * If this is the first loop and we have a timeout 573 * given, then we convert to ktime_t and set the to 574 * pointer to the expiry value. 575 */ 576 if (end_time && !to) { 577 expire = timespec64_to_ktime(*end_time); 578 to = &expire; 579 } 580 581 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE, 582 to, slack)) 583 timed_out = 1; 584 } 585 586 poll_freewait(&table); 587 588 return retval; 589} 590 591/* 592 * We can actually return ERESTARTSYS instead of EINTR, but I'd 593 * like to be certain this leads to no problems. So I return 594 * EINTR just for safety. 595 * 596 * Update: ERESTARTSYS breaks at least the xview clock binary, so 597 * I'm trying ERESTARTNOHAND which restart only when you want to. 598 */ 599int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 600 fd_set __user *exp, struct timespec64 *end_time) 601{ 602 fd_set_bits fds; 603 void *bits; 604 int ret, max_fds; 605 size_t size, alloc_size; 606 struct fdtable *fdt; 607 /* Allocate small arguments on the stack to save memory and be faster */ 608 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 609 610 ret = -EINVAL; 611 if (n < 0) 612 goto out_nofds; 613 614 /* max_fds can increase, so grab it once to avoid race */ 615 rcu_read_lock(); 616 fdt = files_fdtable(current->files); 617 max_fds = fdt->max_fds; 618 rcu_read_unlock(); 619 if (n > max_fds) 620 n = max_fds; 621 622 /* 623 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 624 * since we used fdset we need to allocate memory in units of 625 * long-words. 626 */ 627 size = FDS_BYTES(n); 628 bits = stack_fds; 629 if (size > sizeof(stack_fds) / 6) { 630 /* Not enough space in on-stack array; must use kmalloc */ 631 ret = -ENOMEM; 632 if (size > (SIZE_MAX / 6)) 633 goto out_nofds; 634 635 alloc_size = 6 * size; 636 bits = kvmalloc(alloc_size, GFP_KERNEL); 637 if (!bits) 638 goto out_nofds; 639 } 640 fds.in = bits; 641 fds.out = bits + size; 642 fds.ex = bits + 2*size; 643 fds.res_in = bits + 3*size; 644 fds.res_out = bits + 4*size; 645 fds.res_ex = bits + 5*size; 646 647 if ((ret = get_fd_set(n, inp, fds.in)) || 648 (ret = get_fd_set(n, outp, fds.out)) || 649 (ret = get_fd_set(n, exp, fds.ex))) 650 goto out; 651 zero_fd_set(n, fds.res_in); 652 zero_fd_set(n, fds.res_out); 653 zero_fd_set(n, fds.res_ex); 654 655 ret = do_select(n, &fds, end_time); 656 657 if (ret < 0) 658 goto out; 659 if (!ret) { 660 ret = -ERESTARTNOHAND; 661 if (signal_pending(current)) 662 goto out; 663 ret = 0; 664 } 665 666 if (set_fd_set(n, inp, fds.res_in) || 667 set_fd_set(n, outp, fds.res_out) || 668 set_fd_set(n, exp, fds.res_ex)) 669 ret = -EFAULT; 670 671out: 672 if (bits != stack_fds) 673 kvfree(bits); 674out_nofds: 675 return ret; 676} 677 678SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, 679 fd_set __user *, exp, struct timeval __user *, tvp) 680{ 681 struct timespec64 end_time, *to = NULL; 682 struct timeval tv; 683 int ret; 684 685 if (tvp) { 686 if (copy_from_user(&tv, tvp, sizeof(tv))) 687 return -EFAULT; 688 689 to = &end_time; 690 if (poll_select_set_timeout(to, 691 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 692 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 693 return -EINVAL; 694 } 695 696 ret = core_sys_select(n, inp, outp, exp, to); 697 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret); 698 699 return ret; 700} 701 702static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 703 fd_set __user *exp, struct timespec __user *tsp, 704 const sigset_t __user *sigmask, size_t sigsetsize) 705{ 706 sigset_t ksigmask, sigsaved; 707 struct timespec64 ts, end_time, *to = NULL; 708 int ret; 709 710 if (tsp) { 711 if (get_timespec64(&ts, tsp)) 712 return -EFAULT; 713 714 to = &end_time; 715 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 716 return -EINVAL; 717 } 718 719 if (sigmask) { 720 /* XXX: Don't preclude handling different sized sigset_t's. */ 721 if (sigsetsize != sizeof(sigset_t)) 722 return -EINVAL; 723 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 724 return -EFAULT; 725 726 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 727 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 728 } 729 730 ret = core_sys_select(n, inp, outp, exp, to); 731 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 732 733 if (ret == -ERESTARTNOHAND) { 734 /* 735 * Don't restore the signal mask yet. Let do_signal() deliver 736 * the signal on the way back to userspace, before the signal 737 * mask is restored. 738 */ 739 if (sigmask) { 740 memcpy(&current->saved_sigmask, &sigsaved, 741 sizeof(sigsaved)); 742 set_restore_sigmask(); 743 } 744 } else if (sigmask) 745 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 746 747 return ret; 748} 749 750/* 751 * Most architectures can't handle 7-argument syscalls. So we provide a 752 * 6-argument version where the sixth argument is a pointer to a structure 753 * which has a pointer to the sigset_t itself followed by a size_t containing 754 * the sigset size. 755 */ 756SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp, 757 fd_set __user *, exp, struct timespec __user *, tsp, 758 void __user *, sig) 759{ 760 size_t sigsetsize = 0; 761 sigset_t __user *up = NULL; 762 763 if (sig) { 764 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t)) 765 || __get_user(up, (sigset_t __user * __user *)sig) 766 || __get_user(sigsetsize, 767 (size_t __user *)(sig+sizeof(void *)))) 768 return -EFAULT; 769 } 770 771 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 772} 773 774#ifdef __ARCH_WANT_SYS_OLD_SELECT 775struct sel_arg_struct { 776 unsigned long n; 777 fd_set __user *inp, *outp, *exp; 778 struct timeval __user *tvp; 779}; 780 781SYSCALL_DEFINE1(old_select, struct sel_arg_struct __user *, arg) 782{ 783 struct sel_arg_struct a; 784 785 if (copy_from_user(&a, arg, sizeof(a))) 786 return -EFAULT; 787 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); 788} 789#endif 790 791struct poll_list { 792 struct poll_list *next; 793 int len; 794 struct pollfd entries[0]; 795}; 796 797#define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd)) 798 799/* 800 * Fish for pollable events on the pollfd->fd file descriptor. We're only 801 * interested in events matching the pollfd->events mask, and the result 802 * matching that mask is both recorded in pollfd->revents and returned. The 803 * pwait poll_table will be used by the fd-provided poll handler for waiting, 804 * if pwait->_qproc is non-NULL. 805 */ 806static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait, 807 bool *can_busy_poll, 808 __poll_t busy_flag) 809{ 810 __poll_t mask; 811 int fd; 812 813 mask = 0; 814 fd = pollfd->fd; 815 if (fd >= 0) { 816 struct fd f = fdget(fd); 817 mask = EPOLLNVAL; 818 if (f.file) { 819 /* userland u16 ->events contains POLL... bitmap */ 820 __poll_t filter = demangle_poll(pollfd->events) | 821 EPOLLERR | EPOLLHUP; 822 mask = DEFAULT_POLLMASK; 823 if (f.file->f_op->poll) { 824 pwait->_key = filter; 825 pwait->_key |= busy_flag; 826 mask = f.file->f_op->poll(f.file, pwait); 827 if (mask & busy_flag) 828 *can_busy_poll = true; 829 } 830 /* Mask out unneeded events. */ 831 mask &= filter; 832 fdput(f); 833 } 834 } 835 /* ... and so does ->revents */ 836 pollfd->revents = mangle_poll(mask); 837 838 return mask; 839} 840 841static int do_poll(struct poll_list *list, struct poll_wqueues *wait, 842 struct timespec64 *end_time) 843{ 844 poll_table* pt = &wait->pt; 845 ktime_t expire, *to = NULL; 846 int timed_out = 0, count = 0; 847 u64 slack = 0; 848 __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0; 849 unsigned long busy_start = 0; 850 851 /* Optimise the no-wait case */ 852 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) { 853 pt->_qproc = NULL; 854 timed_out = 1; 855 } 856 857 if (end_time && !timed_out) 858 slack = select_estimate_accuracy(end_time); 859 860 for (;;) { 861 struct poll_list *walk; 862 bool can_busy_loop = false; 863 864 for (walk = list; walk != NULL; walk = walk->next) { 865 struct pollfd * pfd, * pfd_end; 866 867 pfd = walk->entries; 868 pfd_end = pfd + walk->len; 869 for (; pfd != pfd_end; pfd++) { 870 /* 871 * Fish for events. If we found one, record it 872 * and kill poll_table->_qproc, so we don't 873 * needlessly register any other waiters after 874 * this. They'll get immediately deregistered 875 * when we break out and return. 876 */ 877 if (do_pollfd(pfd, pt, &can_busy_loop, 878 busy_flag)) { 879 count++; 880 pt->_qproc = NULL; 881 /* found something, stop busy polling */ 882 busy_flag = 0; 883 can_busy_loop = false; 884 } 885 } 886 } 887 /* 888 * All waiters have already been registered, so don't provide 889 * a poll_table->_qproc to them on the next loop iteration. 890 */ 891 pt->_qproc = NULL; 892 if (!count) { 893 count = wait->error; 894 if (signal_pending(current)) 895 count = -EINTR; 896 } 897 if (count || timed_out) 898 break; 899 900 /* only if found POLL_BUSY_LOOP sockets && not out of time */ 901 if (can_busy_loop && !need_resched()) { 902 if (!busy_start) { 903 busy_start = busy_loop_current_time(); 904 continue; 905 } 906 if (!busy_loop_timeout(busy_start)) 907 continue; 908 } 909 busy_flag = 0; 910 911 /* 912 * If this is the first loop and we have a timeout 913 * given, then we convert to ktime_t and set the to 914 * pointer to the expiry value. 915 */ 916 if (end_time && !to) { 917 expire = timespec64_to_ktime(*end_time); 918 to = &expire; 919 } 920 921 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack)) 922 timed_out = 1; 923 } 924 return count; 925} 926 927#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \ 928 sizeof(struct pollfd)) 929 930static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, 931 struct timespec64 *end_time) 932{ 933 struct poll_wqueues table; 934 int err = -EFAULT, fdcount, len, size; 935 /* Allocate small arguments on the stack to save memory and be 936 faster - use long to make sure the buffer is aligned properly 937 on 64 bit archs to avoid unaligned access */ 938 long stack_pps[POLL_STACK_ALLOC/sizeof(long)]; 939 struct poll_list *const head = (struct poll_list *)stack_pps; 940 struct poll_list *walk = head; 941 unsigned long todo = nfds; 942 943 if (nfds > rlimit(RLIMIT_NOFILE)) 944 return -EINVAL; 945 946 len = min_t(unsigned int, nfds, N_STACK_PPS); 947 for (;;) { 948 walk->next = NULL; 949 walk->len = len; 950 if (!len) 951 break; 952 953 if (copy_from_user(walk->entries, ufds + nfds-todo, 954 sizeof(struct pollfd) * walk->len)) 955 goto out_fds; 956 957 todo -= walk->len; 958 if (!todo) 959 break; 960 961 len = min(todo, POLLFD_PER_PAGE); 962 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len; 963 walk = walk->next = kmalloc(size, GFP_KERNEL); 964 if (!walk) { 965 err = -ENOMEM; 966 goto out_fds; 967 } 968 } 969 970 poll_initwait(&table); 971 fdcount = do_poll(head, &table, end_time); 972 poll_freewait(&table); 973 974 for (walk = head; walk; walk = walk->next) { 975 struct pollfd *fds = walk->entries; 976 int j; 977 978 for (j = 0; j < walk->len; j++, ufds++) 979 if (__put_user(fds[j].revents, &ufds->revents)) 980 goto out_fds; 981 } 982 983 err = fdcount; 984out_fds: 985 walk = head->next; 986 while (walk) { 987 struct poll_list *pos = walk; 988 walk = walk->next; 989 kfree(pos); 990 } 991 992 return err; 993} 994 995static long do_restart_poll(struct restart_block *restart_block) 996{ 997 struct pollfd __user *ufds = restart_block->poll.ufds; 998 int nfds = restart_block->poll.nfds; 999 struct timespec64 *to = NULL, end_time; 1000 int ret; 1001 1002 if (restart_block->poll.has_timeout) { 1003 end_time.tv_sec = restart_block->poll.tv_sec; 1004 end_time.tv_nsec = restart_block->poll.tv_nsec; 1005 to = &end_time; 1006 } 1007 1008 ret = do_sys_poll(ufds, nfds, to); 1009 1010 if (ret == -EINTR) { 1011 restart_block->fn = do_restart_poll; 1012 ret = -ERESTART_RESTARTBLOCK; 1013 } 1014 return ret; 1015} 1016 1017SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, 1018 int, timeout_msecs) 1019{ 1020 struct timespec64 end_time, *to = NULL; 1021 int ret; 1022 1023 if (timeout_msecs >= 0) { 1024 to = &end_time; 1025 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC, 1026 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC)); 1027 } 1028 1029 ret = do_sys_poll(ufds, nfds, to); 1030 1031 if (ret == -EINTR) { 1032 struct restart_block *restart_block; 1033 1034 restart_block = &current->restart_block; 1035 restart_block->fn = do_restart_poll; 1036 restart_block->poll.ufds = ufds; 1037 restart_block->poll.nfds = nfds; 1038 1039 if (timeout_msecs >= 0) { 1040 restart_block->poll.tv_sec = end_time.tv_sec; 1041 restart_block->poll.tv_nsec = end_time.tv_nsec; 1042 restart_block->poll.has_timeout = 1; 1043 } else 1044 restart_block->poll.has_timeout = 0; 1045 1046 ret = -ERESTART_RESTARTBLOCK; 1047 } 1048 return ret; 1049} 1050 1051SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 1052 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 1053 size_t, sigsetsize) 1054{ 1055 sigset_t ksigmask, sigsaved; 1056 struct timespec64 ts, end_time, *to = NULL; 1057 int ret; 1058 1059 if (tsp) { 1060 if (get_timespec64(&ts, tsp)) 1061 return -EFAULT; 1062 1063 to = &end_time; 1064 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1065 return -EINVAL; 1066 } 1067 1068 if (sigmask) { 1069 /* XXX: Don't preclude handling different sized sigset_t's. */ 1070 if (sigsetsize != sizeof(sigset_t)) 1071 return -EINVAL; 1072 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) 1073 return -EFAULT; 1074 1075 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1076 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1077 } 1078 1079 ret = do_sys_poll(ufds, nfds, to); 1080 1081 /* We can restart this syscall, usually */ 1082 if (ret == -EINTR) { 1083 /* 1084 * Don't restore the signal mask yet. Let do_signal() deliver 1085 * the signal on the way back to userspace, before the signal 1086 * mask is restored. 1087 */ 1088 if (sigmask) { 1089 memcpy(&current->saved_sigmask, &sigsaved, 1090 sizeof(sigsaved)); 1091 set_restore_sigmask(); 1092 } 1093 ret = -ERESTARTNOHAND; 1094 } else if (sigmask) 1095 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1096 1097 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret); 1098 1099 return ret; 1100} 1101 1102#ifdef CONFIG_COMPAT 1103#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t)) 1104 1105static 1106int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *p, 1107 int timeval, int ret) 1108{ 1109 struct timespec64 ts; 1110 1111 if (!p) 1112 return ret; 1113 1114 if (current->personality & STICKY_TIMEOUTS) 1115 goto sticky; 1116 1117 /* No update for zero timeout */ 1118 if (!end_time->tv_sec && !end_time->tv_nsec) 1119 return ret; 1120 1121 ktime_get_ts64(&ts); 1122 ts = timespec64_sub(*end_time, ts); 1123 if (ts.tv_sec < 0) 1124 ts.tv_sec = ts.tv_nsec = 0; 1125 1126 if (timeval) { 1127 struct compat_timeval rtv; 1128 1129 rtv.tv_sec = ts.tv_sec; 1130 rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC; 1131 1132 if (!copy_to_user(p, &rtv, sizeof(rtv))) 1133 return ret; 1134 } else { 1135 if (!compat_put_timespec64(&ts, p)) 1136 return ret; 1137 } 1138 /* 1139 * If an application puts its timeval in read-only memory, we 1140 * don't want the Linux-specific update to the timeval to 1141 * cause a fault after the select has completed 1142 * successfully. However, because we're not updating the 1143 * timeval, we can't restart the system call. 1144 */ 1145 1146sticky: 1147 if (ret == -ERESTARTNOHAND) 1148 ret = -EINTR; 1149 return ret; 1150} 1151 1152/* 1153 * Ooo, nasty. We need here to frob 32-bit unsigned longs to 1154 * 64-bit unsigned longs. 1155 */ 1156static 1157int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1158 unsigned long *fdset) 1159{ 1160 if (ufdset) { 1161 return compat_get_bitmap(fdset, ufdset, nr); 1162 } else { 1163 zero_fd_set(nr, fdset); 1164 return 0; 1165 } 1166} 1167 1168static 1169int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset, 1170 unsigned long *fdset) 1171{ 1172 if (!ufdset) 1173 return 0; 1174 return compat_put_bitmap(ufdset, fdset, nr); 1175} 1176 1177 1178/* 1179 * This is a virtual copy of sys_select from fs/select.c and probably 1180 * should be compared to it from time to time 1181 */ 1182 1183/* 1184 * We can actually return ERESTARTSYS instead of EINTR, but I'd 1185 * like to be certain this leads to no problems. So I return 1186 * EINTR just for safety. 1187 * 1188 * Update: ERESTARTSYS breaks at least the xview clock binary, so 1189 * I'm trying ERESTARTNOHAND which restart only when you want to. 1190 */ 1191static int compat_core_sys_select(int n, compat_ulong_t __user *inp, 1192 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1193 struct timespec64 *end_time) 1194{ 1195 fd_set_bits fds; 1196 void *bits; 1197 int size, max_fds, ret = -EINVAL; 1198 struct fdtable *fdt; 1199 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)]; 1200 1201 if (n < 0) 1202 goto out_nofds; 1203 1204 /* max_fds can increase, so grab it once to avoid race */ 1205 rcu_read_lock(); 1206 fdt = files_fdtable(current->files); 1207 max_fds = fdt->max_fds; 1208 rcu_read_unlock(); 1209 if (n > max_fds) 1210 n = max_fds; 1211 1212 /* 1213 * We need 6 bitmaps (in/out/ex for both incoming and outgoing), 1214 * since we used fdset we need to allocate memory in units of 1215 * long-words. 1216 */ 1217 size = FDS_BYTES(n); 1218 bits = stack_fds; 1219 if (size > sizeof(stack_fds) / 6) { 1220 bits = kmalloc(6 * size, GFP_KERNEL); 1221 ret = -ENOMEM; 1222 if (!bits) 1223 goto out_nofds; 1224 } 1225 fds.in = (unsigned long *) bits; 1226 fds.out = (unsigned long *) (bits + size); 1227 fds.ex = (unsigned long *) (bits + 2*size); 1228 fds.res_in = (unsigned long *) (bits + 3*size); 1229 fds.res_out = (unsigned long *) (bits + 4*size); 1230 fds.res_ex = (unsigned long *) (bits + 5*size); 1231 1232 if ((ret = compat_get_fd_set(n, inp, fds.in)) || 1233 (ret = compat_get_fd_set(n, outp, fds.out)) || 1234 (ret = compat_get_fd_set(n, exp, fds.ex))) 1235 goto out; 1236 zero_fd_set(n, fds.res_in); 1237 zero_fd_set(n, fds.res_out); 1238 zero_fd_set(n, fds.res_ex); 1239 1240 ret = do_select(n, &fds, end_time); 1241 1242 if (ret < 0) 1243 goto out; 1244 if (!ret) { 1245 ret = -ERESTARTNOHAND; 1246 if (signal_pending(current)) 1247 goto out; 1248 ret = 0; 1249 } 1250 1251 if (compat_set_fd_set(n, inp, fds.res_in) || 1252 compat_set_fd_set(n, outp, fds.res_out) || 1253 compat_set_fd_set(n, exp, fds.res_ex)) 1254 ret = -EFAULT; 1255out: 1256 if (bits != stack_fds) 1257 kfree(bits); 1258out_nofds: 1259 return ret; 1260} 1261 1262COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp, 1263 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1264 struct compat_timeval __user *, tvp) 1265{ 1266 struct timespec64 end_time, *to = NULL; 1267 struct compat_timeval tv; 1268 int ret; 1269 1270 if (tvp) { 1271 if (copy_from_user(&tv, tvp, sizeof(tv))) 1272 return -EFAULT; 1273 1274 to = &end_time; 1275 if (poll_select_set_timeout(to, 1276 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC), 1277 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC)) 1278 return -EINVAL; 1279 } 1280 1281 ret = compat_core_sys_select(n, inp, outp, exp, to); 1282 ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret); 1283 1284 return ret; 1285} 1286 1287struct compat_sel_arg_struct { 1288 compat_ulong_t n; 1289 compat_uptr_t inp; 1290 compat_uptr_t outp; 1291 compat_uptr_t exp; 1292 compat_uptr_t tvp; 1293}; 1294 1295COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg) 1296{ 1297 struct compat_sel_arg_struct a; 1298 1299 if (copy_from_user(&a, arg, sizeof(a))) 1300 return -EFAULT; 1301 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), 1302 compat_ptr(a.exp), compat_ptr(a.tvp)); 1303} 1304 1305static long do_compat_pselect(int n, compat_ulong_t __user *inp, 1306 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1307 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, 1308 compat_size_t sigsetsize) 1309{ 1310 sigset_t ksigmask, sigsaved; 1311 struct timespec64 ts, end_time, *to = NULL; 1312 int ret; 1313 1314 if (tsp) { 1315 if (compat_get_timespec64(&ts, tsp)) 1316 return -EFAULT; 1317 1318 to = &end_time; 1319 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1320 return -EINVAL; 1321 } 1322 1323 if (sigmask) { 1324 if (sigsetsize != sizeof(compat_sigset_t)) 1325 return -EINVAL; 1326 if (get_compat_sigset(&ksigmask, sigmask)) 1327 return -EFAULT; 1328 1329 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1330 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1331 } 1332 1333 ret = compat_core_sys_select(n, inp, outp, exp, to); 1334 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1335 1336 if (ret == -ERESTARTNOHAND) { 1337 /* 1338 * Don't restore the signal mask yet. Let do_signal() deliver 1339 * the signal on the way back to userspace, before the signal 1340 * mask is restored. 1341 */ 1342 if (sigmask) { 1343 memcpy(&current->saved_sigmask, &sigsaved, 1344 sizeof(sigsaved)); 1345 set_restore_sigmask(); 1346 } 1347 } else if (sigmask) 1348 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1349 1350 return ret; 1351} 1352 1353COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp, 1354 compat_ulong_t __user *, outp, compat_ulong_t __user *, exp, 1355 struct compat_timespec __user *, tsp, void __user *, sig) 1356{ 1357 compat_size_t sigsetsize = 0; 1358 compat_uptr_t up = 0; 1359 1360 if (sig) { 1361 if (!access_ok(VERIFY_READ, sig, 1362 sizeof(compat_uptr_t)+sizeof(compat_size_t)) || 1363 __get_user(up, (compat_uptr_t __user *)sig) || 1364 __get_user(sigsetsize, 1365 (compat_size_t __user *)(sig+sizeof(up)))) 1366 return -EFAULT; 1367 } 1368 return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up), 1369 sigsetsize); 1370} 1371 1372COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, 1373 unsigned int, nfds, struct compat_timespec __user *, tsp, 1374 const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize) 1375{ 1376 sigset_t ksigmask, sigsaved; 1377 struct timespec64 ts, end_time, *to = NULL; 1378 int ret; 1379 1380 if (tsp) { 1381 if (compat_get_timespec64(&ts, tsp)) 1382 return -EFAULT; 1383 1384 to = &end_time; 1385 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) 1386 return -EINVAL; 1387 } 1388 1389 if (sigmask) { 1390 if (sigsetsize != sizeof(compat_sigset_t)) 1391 return -EINVAL; 1392 if (get_compat_sigset(&ksigmask, sigmask)) 1393 return -EFAULT; 1394 1395 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); 1396 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); 1397 } 1398 1399 ret = do_sys_poll(ufds, nfds, to); 1400 1401 /* We can restart this syscall, usually */ 1402 if (ret == -EINTR) { 1403 /* 1404 * Don't restore the signal mask yet. Let do_signal() deliver 1405 * the signal on the way back to userspace, before the signal 1406 * mask is restored. 1407 */ 1408 if (sigmask) { 1409 memcpy(&current->saved_sigmask, &sigsaved, 1410 sizeof(sigsaved)); 1411 set_restore_sigmask(); 1412 } 1413 ret = -ERESTARTNOHAND; 1414 } else if (sigmask) 1415 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1416 1417 ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret); 1418 1419 return ret; 1420} 1421#endif