Merge branch 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull more poll annotation updates from Al Viro:
"This is preparation to solving the problems you've mentioned in the
original poll series.

After this series, the kernel is ready for running

for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
done

as a for bulk search-and-replace.

After that, the kernel is ready to apply the patch to unify
{de,}mangle_poll(), and then get rid of kernel-side POLL... uses
entirely, and we should be all done with that stuff.

Basically, that's what you suggested wrt KPOLL..., except that we can
use EPOLL... instead - they already are arch-independent (and equal to
what is currently kernel-side POLL...).

After the preparations (in this series) switch to returning EPOLL...
from ->poll() instances is completely mechanical and kernel-side
POLL... can go away. The last step (killing kernel-side POLL... and
unifying {de,}mangle_poll() has to be done after the
search-and-replace job, since we need userland-side POLL... for
unified {de,}mangle_poll(), thus the cherry-pick at the last step.

After that we will have:

- POLL{IN,OUT,...} *not* in __poll_t, so any stray instances of
->poll() still using those will be caught by sparse.

- eventpoll.c and select.c warning-free wrt __poll_t

- no more kernel-side definitions of POLL... - userland ones are
visible through the entire kernel (and used pretty much only for
mangle/demangle)

- same behavior as after the first series (i.e. sparc et.al. epoll(2)
working correctly)"

* 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
annotate ep_scan_ready_list()
ep_send_events_proc(): return result via esed->res
preparation to switching ->poll() to returning EPOLL...
add EPOLLNVAL, annotate EPOLL... and event_poll->event
use linux/poll.h instead of asm/poll.h
xen: fix poll misannotation
smc: missing poll annotations

Changed files
+47 -40
drivers
fs
include
linux
uapi
linux
net
smc
+1 -1
drivers/xen/pvcalls-front.h
··· 20 20 struct msghdr *msg, 21 21 size_t len, 22 22 int flags); 23 - unsigned int pvcalls_front_poll(struct file *file, 23 + __poll_t pvcalls_front_poll(struct file *file, 24 24 struct socket *sock, 25 25 poll_table *wait); 26 26 int pvcalls_front_release(struct socket *sock);
+1 -1
fs/coda/psdev.c
··· 39 39 #include <linux/device.h> 40 40 #include <linux/pid_namespace.h> 41 41 #include <asm/io.h> 42 - #include <asm/poll.h> 42 + #include <linux/poll.h> 43 43 #include <linux/uaccess.h> 44 44 45 45 #include <linux/coda.h>
+1 -1
fs/debugfs/file.c
··· 18 18 #include <linux/slab.h> 19 19 #include <linux/atomic.h> 20 20 #include <linux/device.h> 21 - #include <asm/poll.h> 21 + #include <linux/poll.h> 22 22 23 23 #include "internal.h" 24 24
+23 -18
fs/eventpoll.c
··· 260 260 struct ep_send_events_data { 261 261 int maxevents; 262 262 struct epoll_event __user *events; 263 + int res; 263 264 }; 264 265 265 266 /* ··· 661 660 * 662 661 * Returns: The same integer error code returned by the @sproc callback. 663 662 */ 664 - static int ep_scan_ready_list(struct eventpoll *ep, 665 - int (*sproc)(struct eventpoll *, 663 + static __poll_t ep_scan_ready_list(struct eventpoll *ep, 664 + __poll_t (*sproc)(struct eventpoll *, 666 665 struct list_head *, void *), 667 666 void *priv, int depth, bool ep_locked) 668 667 { 669 - int error, pwake = 0; 668 + __poll_t res; 669 + int pwake = 0; 670 670 unsigned long flags; 671 671 struct epitem *epi, *nepi; 672 672 LIST_HEAD(txlist); ··· 696 694 /* 697 695 * Now call the callback function. 698 696 */ 699 - error = (*sproc)(ep, &txlist, priv); 697 + res = (*sproc)(ep, &txlist, priv); 700 698 701 699 spin_lock_irqsave(&ep->lock, flags); 702 700 /* ··· 749 747 if (pwake) 750 748 ep_poll_safewake(&ep->poll_wait); 751 749 752 - return error; 750 + return res; 753 751 } 754 752 755 753 static void epi_rcu_free(struct rcu_head *head) ··· 866 864 return 0; 867 865 } 868 866 869 - static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, 867 + static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, 870 868 void *priv); 871 869 static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, 872 870 poll_table *pt); ··· 876 874 * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested() 877 875 * is correctly annotated. 878 876 */ 879 - static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt, 877 + static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, 880 878 int depth) 881 879 { 882 880 struct eventpoll *ep; ··· 896 894 locked) & epi->event.events; 897 895 } 898 896 899 - static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, 897 + static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, 900 898 void *priv) 901 899 { 902 900 struct epitem *epi, *tmp; ··· 1416 1414 static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, 1417 1415 struct file *tfile, int fd, int full_check) 1418 1416 { 1419 - int error, revents, pwake = 0; 1417 + int error, pwake = 0; 1418 + __poll_t revents; 1420 1419 unsigned long flags; 1421 1420 long user_watches; 1422 1421 struct epitem *epi; ··· 1615 1612 return 0; 1616 1613 } 1617 1614 1618 - static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, 1615 + static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head, 1619 1616 void *priv) 1620 1617 { 1621 1618 struct ep_send_events_data *esed = priv; 1622 - int eventcnt; 1623 - unsigned int revents; 1619 + __poll_t revents; 1624 1620 struct epitem *epi; 1625 1621 struct epoll_event __user *uevent; 1626 1622 struct wakeup_source *ws; ··· 1632 1630 * Items cannot vanish during the loop because ep_scan_ready_list() is 1633 1631 * holding "mtx" during this call. 1634 1632 */ 1635 - for (eventcnt = 0, uevent = esed->events; 1636 - !list_empty(head) && eventcnt < esed->maxevents;) { 1633 + for (esed->res = 0, uevent = esed->events; 1634 + !list_empty(head) && esed->res < esed->maxevents;) { 1637 1635 epi = list_first_entry(head, struct epitem, rdllink); 1638 1636 1639 1637 /* ··· 1667 1665 __put_user(epi->event.data, &uevent->data)) { 1668 1666 list_add(&epi->rdllink, head); 1669 1667 ep_pm_stay_awake(epi); 1670 - return eventcnt ? eventcnt : -EFAULT; 1668 + if (!esed->res) 1669 + esed->res = -EFAULT; 1670 + return 0; 1671 1671 } 1672 - eventcnt++; 1672 + esed->res++; 1673 1673 uevent++; 1674 1674 if (epi->event.events & EPOLLONESHOT) 1675 1675 epi->event.events &= EP_PRIVATE_BITS; ··· 1693 1689 } 1694 1690 } 1695 1691 1696 - return eventcnt; 1692 + return 0; 1697 1693 } 1698 1694 1699 1695 static int ep_send_events(struct eventpoll *ep, ··· 1704 1700 esed.maxevents = maxevents; 1705 1701 esed.events = events; 1706 1702 1707 - return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); 1703 + ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); 1704 + return esed.res; 1708 1705 } 1709 1706 1710 1707 static inline struct timespec64 ep_set_mstimeout(long ms)
+1 -1
fs/fcntl.c
··· 26 26 #include <linux/shmem_fs.h> 27 27 #include <linux/compat.h> 28 28 29 - #include <asm/poll.h> 29 + #include <linux/poll.h> 30 30 #include <asm/siginfo.h> 31 31 #include <linux/uaccess.h> 32 32
+2 -1
include/linux/poll.h
··· 11 11 #include <linux/sysctl.h> 12 12 #include <linux/uaccess.h> 13 13 #include <uapi/linux/poll.h> 14 + #include <uapi/linux/eventpoll.h> 14 15 15 16 extern struct ctl_table epoll_table[]; /* for sysctl */ 16 17 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating ··· 23 22 #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) 24 23 #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) 25 24 26 - #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) 25 + #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) 27 26 28 27 struct poll_table_struct; 29 28
+17 -16
include/uapi/linux/eventpoll.h
··· 28 28 #define EPOLL_CTL_MOD 3 29 29 30 30 /* Epoll event masks */ 31 - #define EPOLLIN 0x00000001 32 - #define EPOLLPRI 0x00000002 33 - #define EPOLLOUT 0x00000004 34 - #define EPOLLERR 0x00000008 35 - #define EPOLLHUP 0x00000010 36 - #define EPOLLRDNORM 0x00000040 37 - #define EPOLLRDBAND 0x00000080 38 - #define EPOLLWRNORM 0x00000100 39 - #define EPOLLWRBAND 0x00000200 40 - #define EPOLLMSG 0x00000400 41 - #define EPOLLRDHUP 0x00002000 31 + #define EPOLLIN (__force __poll_t)0x00000001 32 + #define EPOLLPRI (__force __poll_t)0x00000002 33 + #define EPOLLOUT (__force __poll_t)0x00000004 34 + #define EPOLLERR (__force __poll_t)0x00000008 35 + #define EPOLLHUP (__force __poll_t)0x00000010 36 + #define EPOLLNVAL (__force __poll_t)0x00000020 37 + #define EPOLLRDNORM (__force __poll_t)0x00000040 38 + #define EPOLLRDBAND (__force __poll_t)0x00000080 39 + #define EPOLLWRNORM (__force __poll_t)0x00000100 40 + #define EPOLLWRBAND (__force __poll_t)0x00000200 41 + #define EPOLLMSG (__force __poll_t)0x00000400 42 + #define EPOLLRDHUP (__force __poll_t)0x00002000 42 43 43 44 /* Set exclusive wakeup mode for the target file descriptor */ 44 - #define EPOLLEXCLUSIVE (1U << 28) 45 + #define EPOLLEXCLUSIVE (__force __poll_t)(1U << 28) 45 46 46 47 /* 47 48 * Request the handling of system wakeup events so as to prevent system suspends ··· 54 53 * 55 54 * Requires CAP_BLOCK_SUSPEND 56 55 */ 57 - #define EPOLLWAKEUP (1U << 29) 56 + #define EPOLLWAKEUP (__force __poll_t)(1U << 29) 58 57 59 58 /* Set the One Shot behaviour for the target file descriptor */ 60 - #define EPOLLONESHOT (1U << 30) 59 + #define EPOLLONESHOT (__force __poll_t)(1U << 30) 61 60 62 61 /* Set the Edge Triggered behaviour for the target file descriptor */ 63 - #define EPOLLET (1U << 31) 62 + #define EPOLLET (__force __poll_t)(1U << 31) 64 63 65 64 /* 66 65 * On x86-64 make the 64bit structure have the same alignment as the ··· 75 74 #endif 76 75 77 76 struct epoll_event { 78 - __u32 events; 77 + __poll_t events; 79 78 __u64 data; 80 79 } EPOLL_PACKED; 81 80
+1 -1
net/smc/af_smc.c
··· 1141 1141 static __poll_t smc_accept_poll(struct sock *parent) 1142 1142 { 1143 1143 struct smc_sock *isk = smc_sk(parent); 1144 - int mask = 0; 1144 + __poll_t mask = 0; 1145 1145 1146 1146 spin_lock(&isk->accept_q_lock); 1147 1147 if (!list_empty(&isk->accept_q))