Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull aio race fixes and cleanups from Al Viro.

The aio code had more issues with error handling and races with the aio
completing at just the right (wrong) time along with freeing the file
descriptor when another thread closes the file.

Just a couple of these commits are the actual fixes: the others are
cleanups to either make the fixes simpler, or to make the code legible
and understandable enough that we hope there's no more fundamental races
hiding.

* 'work.aio' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
aio: move sanity checks and request allocation to io_submit_one()
deal with get_reqs_available() in aio_get_req() itself
aio: move dropping ->ki_eventfd into iocb_destroy()
make aio_read()/aio_write() return int
Fix aio_poll() races
aio: store event at final iocb_put()
aio: keep io_event in aio_kiocb
aio: fold lookup_kiocb() into its sole caller
pin iocb through aio.

+151 -189
+151 -189
fs/aio.c
··· 181 181 struct file *file; 182 182 struct wait_queue_head *head; 183 183 __poll_t events; 184 - bool woken; 184 + bool done; 185 185 bool cancelled; 186 186 struct wait_queue_entry wait; 187 187 struct work_struct work; ··· 204 204 struct kioctx *ki_ctx; 205 205 kiocb_cancel_fn *ki_cancel; 206 206 207 - struct iocb __user *ki_user_iocb; /* user's aiocb */ 208 - __u64 ki_user_data; /* user's data for completion */ 207 + struct io_event ki_res; 209 208 210 209 struct list_head ki_list; /* the aio core uses this 211 210 * for cancellation */ ··· 1021 1022 /* aio_get_req 1022 1023 * Allocate a slot for an aio request. 1023 1024 * Returns NULL if no requests are free. 1025 + * 1026 + * The refcount is initialized to 2 - one for the async op completion, 1027 + * one for the synchronous code that does this. 1024 1028 */ 1025 1029 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1026 1030 { ··· 1033 1031 if (unlikely(!req)) 1034 1032 return NULL; 1035 1033 1034 + if (unlikely(!get_reqs_available(ctx))) { 1035 + kfree(req); 1036 + return NULL; 1037 + } 1038 + 1036 1039 percpu_ref_get(&ctx->reqs); 1037 1040 req->ki_ctx = ctx; 1038 1041 INIT_LIST_HEAD(&req->ki_list); 1039 - refcount_set(&req->ki_refcnt, 0); 1042 + refcount_set(&req->ki_refcnt, 2); 1040 1043 req->ki_eventfd = NULL; 1041 1044 return req; 1042 1045 } ··· 1074 1067 return ret; 1075 1068 } 1076 1069 1077 - static inline void iocb_put(struct aio_kiocb *iocb) 1070 + static inline void iocb_destroy(struct aio_kiocb *iocb) 1078 1071 { 1079 - if (refcount_read(&iocb->ki_refcnt) == 0 || 1080 - refcount_dec_and_test(&iocb->ki_refcnt)) { 1081 - if (iocb->ki_filp) 1082 - fput(iocb->ki_filp); 1083 - percpu_ref_put(&iocb->ki_ctx->reqs); 1084 - kmem_cache_free(kiocb_cachep, iocb); 1085 - } 1086 - } 1087 - 1088 - static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, 1089 - long res, long res2) 1090 - { 1091 - ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; 1092 - ev->data = iocb->ki_user_data; 1093 - ev->res = res; 1094 - ev->res2 = res2; 1072 + if (iocb->ki_eventfd) 1073 + eventfd_ctx_put(iocb->ki_eventfd); 1074 + if (iocb->ki_filp) 1075 + fput(iocb->ki_filp); 1076 + percpu_ref_put(&iocb->ki_ctx->reqs); 1077 + kmem_cache_free(kiocb_cachep, iocb); 1095 1078 } 1096 1079 1097 1080 /* aio_complete 1098 1081 * Called when the io request on the given iocb is complete. 1099 1082 */ 1100 - static void aio_complete(struct aio_kiocb *iocb, long res, long res2) 1083 + static void aio_complete(struct aio_kiocb *iocb) 1101 1084 { 1102 1085 struct kioctx *ctx = iocb->ki_ctx; 1103 1086 struct aio_ring *ring; ··· 1111 1114 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1112 1115 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1113 1116 1114 - aio_fill_event(event, iocb, res, res2); 1117 + *event = iocb->ki_res; 1115 1118 1116 1119 kunmap_atomic(ev_page); 1117 1120 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1118 1121 1119 - pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 1120 - ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, 1121 - res, res2); 1122 + pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, 1123 + (void __user *)(unsigned long)iocb->ki_res.obj, 1124 + iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); 1122 1125 1123 1126 /* after flagging the request as done, we 1124 1127 * must never even look at it again ··· 1145 1148 * eventfd. The eventfd_signal() function is safe to be called 1146 1149 * from IRQ context. 1147 1150 */ 1148 - if (iocb->ki_eventfd) { 1151 + if (iocb->ki_eventfd) 1149 1152 eventfd_signal(iocb->ki_eventfd, 1); 1150 - eventfd_ctx_put(iocb->ki_eventfd); 1151 - } 1152 1153 1153 1154 /* 1154 1155 * We have to order our ring_info tail store above and test ··· 1158 1163 1159 1164 if (waitqueue_active(&ctx->wait)) 1160 1165 wake_up(&ctx->wait); 1161 - iocb_put(iocb); 1166 + } 1167 + 1168 + static inline void iocb_put(struct aio_kiocb *iocb) 1169 + { 1170 + if (refcount_dec_and_test(&iocb->ki_refcnt)) { 1171 + aio_complete(iocb); 1172 + iocb_destroy(iocb); 1173 + } 1162 1174 } 1163 1175 1164 1176 /* aio_read_events_ring ··· 1439 1437 file_end_write(kiocb->ki_filp); 1440 1438 } 1441 1439 1442 - aio_complete(iocb, res, res2); 1440 + iocb->ki_res.res = res; 1441 + iocb->ki_res.res2 = res2; 1442 + iocb_put(iocb); 1443 1443 } 1444 1444 1445 1445 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) ··· 1518 1514 } 1519 1515 } 1520 1516 1521 - static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, 1517 + static int aio_read(struct kiocb *req, const struct iocb *iocb, 1522 1518 bool vectored, bool compat) 1523 1519 { 1524 1520 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1525 1521 struct iov_iter iter; 1526 1522 struct file *file; 1527 - ssize_t ret; 1523 + int ret; 1528 1524 1529 1525 ret = aio_prep_rw(req, iocb); 1530 1526 if (ret) ··· 1546 1542 return ret; 1547 1543 } 1548 1544 1549 - static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, 1545 + static int aio_write(struct kiocb *req, const struct iocb *iocb, 1550 1546 bool vectored, bool compat) 1551 1547 { 1552 1548 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1553 1549 struct iov_iter iter; 1554 1550 struct file *file; 1555 - ssize_t ret; 1551 + int ret; 1556 1552 1557 1553 ret = aio_prep_rw(req, iocb); 1558 1554 if (ret) ··· 1589 1585 1590 1586 static void aio_fsync_work(struct work_struct *work) 1591 1587 { 1592 - struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); 1593 - int ret; 1588 + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); 1594 1589 1595 - ret = vfs_fsync(req->file, req->datasync); 1596 - aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); 1590 + iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); 1591 + iocb_put(iocb); 1597 1592 } 1598 1593 1599 1594 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, ··· 1609 1606 INIT_WORK(&req->work, aio_fsync_work); 1610 1607 schedule_work(&req->work); 1611 1608 return 0; 1612 - } 1613 - 1614 - static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) 1615 - { 1616 - aio_complete(iocb, mangle_poll(mask), 0); 1617 1609 } 1618 1610 1619 1611 static void aio_poll_complete_work(struct work_struct *work) ··· 1636 1638 return; 1637 1639 } 1638 1640 list_del_init(&iocb->ki_list); 1641 + iocb->ki_res.res = mangle_poll(mask); 1642 + req->done = true; 1639 1643 spin_unlock_irq(&ctx->ctx_lock); 1640 1644 1641 - aio_poll_complete(iocb, mask); 1645 + iocb_put(iocb); 1642 1646 } 1643 1647 1644 1648 /* assumes we are called with irqs disabled */ ··· 1668 1668 __poll_t mask = key_to_poll(key); 1669 1669 unsigned long flags; 1670 1670 1671 - req->woken = true; 1672 - 1673 1671 /* for instances that support it check for an event match first: */ 1674 - if (mask) { 1675 - if (!(mask & req->events)) 1676 - return 0; 1672 + if (mask && !(mask & req->events)) 1673 + return 0; 1677 1674 1675 + list_del_init(&req->wait.entry); 1676 + 1677 + if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1678 1678 /* 1679 1679 * Try to complete the iocb inline if we can. Use 1680 1680 * irqsave/irqrestore because not all filesystems (e.g. fuse) 1681 1681 * call this function with IRQs disabled and because IRQs 1682 1682 * have to be disabled before ctx_lock is obtained. 1683 1683 */ 1684 - if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1685 - list_del(&iocb->ki_list); 1686 - spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); 1687 - 1688 - list_del_init(&req->wait.entry); 1689 - aio_poll_complete(iocb, mask); 1690 - return 1; 1691 - } 1684 + list_del(&iocb->ki_list); 1685 + iocb->ki_res.res = mangle_poll(mask); 1686 + req->done = true; 1687 + spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); 1688 + iocb_put(iocb); 1689 + } else { 1690 + schedule_work(&req->work); 1692 1691 } 1693 - 1694 - list_del_init(&req->wait.entry); 1695 - schedule_work(&req->work); 1696 1692 return 1; 1697 1693 } 1698 1694 ··· 1715 1719 add_wait_queue(head, &pt->iocb->poll.wait); 1716 1720 } 1717 1721 1718 - static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1722 + static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1719 1723 { 1720 1724 struct kioctx *ctx = aiocb->ki_ctx; 1721 1725 struct poll_iocb *req = &aiocb->poll; 1722 1726 struct aio_poll_table apt; 1727 + bool cancel = false; 1723 1728 __poll_t mask; 1724 1729 1725 1730 /* reject any unknown events outside the normal event mask. */ ··· 1734 1737 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1735 1738 1736 1739 req->head = NULL; 1737 - req->woken = false; 1740 + req->done = false; 1738 1741 req->cancelled = false; 1739 1742 1740 1743 apt.pt._qproc = aio_poll_queue_proc; ··· 1746 1749 INIT_LIST_HEAD(&req->wait.entry); 1747 1750 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1748 1751 1749 - /* one for removal from waitqueue, one for this function */ 1750 - refcount_set(&aiocb->ki_refcnt, 2); 1751 - 1752 1752 mask = vfs_poll(req->file, &apt.pt) & req->events; 1753 - if (unlikely(!req->head)) { 1754 - /* we did not manage to set up a waitqueue, done */ 1755 - goto out; 1756 - } 1757 - 1758 1753 spin_lock_irq(&ctx->ctx_lock); 1759 - spin_lock(&req->head->lock); 1760 - if (req->woken) { 1761 - /* wake_up context handles the rest */ 1762 - mask = 0; 1763 - apt.error = 0; 1764 - } else if (mask || apt.error) { 1765 - /* if we get an error or a mask we are done */ 1766 - WARN_ON_ONCE(list_empty(&req->wait.entry)); 1767 - list_del_init(&req->wait.entry); 1768 - } else { 1769 - /* actually waiting for an event */ 1770 - list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1771 - aiocb->ki_cancel = aio_poll_cancel; 1754 + if (likely(req->head)) { 1755 + spin_lock(&req->head->lock); 1756 + if (unlikely(list_empty(&req->wait.entry))) { 1757 + if (apt.error) 1758 + cancel = true; 1759 + apt.error = 0; 1760 + mask = 0; 1761 + } 1762 + if (mask || apt.error) { 1763 + list_del_init(&req->wait.entry); 1764 + } else if (cancel) { 1765 + WRITE_ONCE(req->cancelled, true); 1766 + } else if (!req->done) { /* actually waiting for an event */ 1767 + list_add_tail(&aiocb->ki_list, &ctx->active_reqs); 1768 + aiocb->ki_cancel = aio_poll_cancel; 1769 + } 1770 + spin_unlock(&req->head->lock); 1772 1771 } 1773 - spin_unlock(&req->head->lock); 1772 + if (mask) { /* no async, we'd stolen it */ 1773 + aiocb->ki_res.res = mangle_poll(mask); 1774 + apt.error = 0; 1775 + } 1774 1776 spin_unlock_irq(&ctx->ctx_lock); 1775 - 1776 - out: 1777 - if (unlikely(apt.error)) 1778 - return apt.error; 1779 - 1780 1777 if (mask) 1781 - aio_poll_complete(aiocb, mask); 1782 - iocb_put(aiocb); 1783 - return 0; 1778 + iocb_put(aiocb); 1779 + return apt.error; 1784 1780 } 1785 1781 1786 1782 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1787 - struct iocb __user *user_iocb, bool compat) 1783 + struct iocb __user *user_iocb, struct aio_kiocb *req, 1784 + bool compat) 1788 1785 { 1789 - struct aio_kiocb *req; 1790 - ssize_t ret; 1791 - 1792 - /* enforce forwards compatibility on users */ 1793 - if (unlikely(iocb->aio_reserved2)) { 1794 - pr_debug("EINVAL: reserve field set\n"); 1795 - return -EINVAL; 1796 - } 1797 - 1798 - /* prevent overflows */ 1799 - if (unlikely( 1800 - (iocb->aio_buf != (unsigned long)iocb->aio_buf) || 1801 - (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || 1802 - ((ssize_t)iocb->aio_nbytes < 0) 1803 - )) { 1804 - pr_debug("EINVAL: overflow check\n"); 1805 - return -EINVAL; 1806 - } 1807 - 1808 - if (!get_reqs_available(ctx)) 1809 - return -EAGAIN; 1810 - 1811 - ret = -EAGAIN; 1812 - req = aio_get_req(ctx); 1813 - if (unlikely(!req)) 1814 - goto out_put_reqs_available; 1815 - 1816 1786 req->ki_filp = fget(iocb->aio_fildes); 1817 - ret = -EBADF; 1818 1787 if (unlikely(!req->ki_filp)) 1819 - goto out_put_req; 1788 + return -EBADF; 1820 1789 1821 1790 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1791 + struct eventfd_ctx *eventfd; 1822 1792 /* 1823 1793 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1824 1794 * instance of the file* now. The file descriptor must be 1825 1795 * an eventfd() fd, and will be signaled for each completed 1826 1796 * event using the eventfd_signal() function. 1827 1797 */ 1828 - req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1829 - if (IS_ERR(req->ki_eventfd)) { 1830 - ret = PTR_ERR(req->ki_eventfd); 1831 - req->ki_eventfd = NULL; 1832 - goto out_put_req; 1833 - } 1798 + eventfd = eventfd_ctx_fdget(iocb->aio_resfd); 1799 + if (IS_ERR(eventfd)) 1800 + return PTR_ERR(req->ki_eventfd); 1801 + 1802 + req->ki_eventfd = eventfd; 1834 1803 } 1835 1804 1836 - ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1837 - if (unlikely(ret)) { 1805 + if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) { 1838 1806 pr_debug("EFAULT: aio_key\n"); 1839 - goto out_put_req; 1807 + return -EFAULT; 1840 1808 } 1841 1809 1842 - req->ki_user_iocb = user_iocb; 1843 - req->ki_user_data = iocb->aio_data; 1810 + req->ki_res.obj = (u64)(unsigned long)user_iocb; 1811 + req->ki_res.data = iocb->aio_data; 1812 + req->ki_res.res = 0; 1813 + req->ki_res.res2 = 0; 1844 1814 1845 1815 switch (iocb->aio_lio_opcode) { 1846 1816 case IOCB_CMD_PREAD: 1847 - ret = aio_read(&req->rw, iocb, false, compat); 1848 - break; 1817 + return aio_read(&req->rw, iocb, false, compat); 1849 1818 case IOCB_CMD_PWRITE: 1850 - ret = aio_write(&req->rw, iocb, false, compat); 1851 - break; 1819 + return aio_write(&req->rw, iocb, false, compat); 1852 1820 case IOCB_CMD_PREADV: 1853 - ret = aio_read(&req->rw, iocb, true, compat); 1854 - break; 1821 + return aio_read(&req->rw, iocb, true, compat); 1855 1822 case IOCB_CMD_PWRITEV: 1856 - ret = aio_write(&req->rw, iocb, true, compat); 1857 - break; 1823 + return aio_write(&req->rw, iocb, true, compat); 1858 1824 case IOCB_CMD_FSYNC: 1859 - ret = aio_fsync(&req->fsync, iocb, false); 1860 - break; 1825 + return aio_fsync(&req->fsync, iocb, false); 1861 1826 case IOCB_CMD_FDSYNC: 1862 - ret = aio_fsync(&req->fsync, iocb, true); 1863 - break; 1827 + return aio_fsync(&req->fsync, iocb, true); 1864 1828 case IOCB_CMD_POLL: 1865 - ret = aio_poll(req, iocb); 1866 - break; 1829 + return aio_poll(req, iocb); 1867 1830 default: 1868 1831 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1869 - ret = -EINVAL; 1870 - break; 1832 + return -EINVAL; 1871 1833 } 1872 - 1873 - /* 1874 - * If ret is 0, we'd either done aio_complete() ourselves or have 1875 - * arranged for that to be done asynchronously. Anything non-zero 1876 - * means that we need to destroy req ourselves. 1877 - */ 1878 - if (ret) 1879 - goto out_put_req; 1880 - return 0; 1881 - out_put_req: 1882 - if (req->ki_eventfd) 1883 - eventfd_ctx_put(req->ki_eventfd); 1884 - iocb_put(req); 1885 - out_put_reqs_available: 1886 - put_reqs_available(ctx, 1); 1887 - return ret; 1888 1834 } 1889 1835 1890 1836 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1891 1837 bool compat) 1892 1838 { 1839 + struct aio_kiocb *req; 1893 1840 struct iocb iocb; 1841 + int err; 1894 1842 1895 1843 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 1896 1844 return -EFAULT; 1897 1845 1898 - return __io_submit_one(ctx, &iocb, user_iocb, compat); 1846 + /* enforce forwards compatibility on users */ 1847 + if (unlikely(iocb.aio_reserved2)) { 1848 + pr_debug("EINVAL: reserve field set\n"); 1849 + return -EINVAL; 1850 + } 1851 + 1852 + /* prevent overflows */ 1853 + if (unlikely( 1854 + (iocb.aio_buf != (unsigned long)iocb.aio_buf) || 1855 + (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || 1856 + ((ssize_t)iocb.aio_nbytes < 0) 1857 + )) { 1858 + pr_debug("EINVAL: overflow check\n"); 1859 + return -EINVAL; 1860 + } 1861 + 1862 + req = aio_get_req(ctx); 1863 + if (unlikely(!req)) 1864 + return -EAGAIN; 1865 + 1866 + err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); 1867 + 1868 + /* Done with the synchronous reference */ 1869 + iocb_put(req); 1870 + 1871 + /* 1872 + * If err is 0, we'd either done aio_complete() ourselves or have 1873 + * arranged for that to be done asynchronously. Anything non-zero 1874 + * means that we need to destroy req ourselves. 1875 + */ 1876 + if (unlikely(err)) { 1877 + iocb_destroy(req); 1878 + put_reqs_available(ctx, 1); 1879 + } 1880 + return err; 1899 1881 } 1900 1882 1901 1883 /* sys_io_submit: ··· 1973 1997 } 1974 1998 #endif 1975 1999 1976 - /* lookup_kiocb 1977 - * Finds a given iocb for cancellation. 1978 - */ 1979 - static struct aio_kiocb * 1980 - lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) 1981 - { 1982 - struct aio_kiocb *kiocb; 1983 - 1984 - assert_spin_locked(&ctx->ctx_lock); 1985 - 1986 - /* TODO: use a hash or array, this sucks. */ 1987 - list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 1988 - if (kiocb->ki_user_iocb == iocb) 1989 - return kiocb; 1990 - } 1991 - return NULL; 1992 - } 1993 - 1994 2000 /* sys_io_cancel: 1995 2001 * Attempts to cancel an iocb previously passed to io_submit. If 1996 2002 * the operation is successfully cancelled, the resulting event is ··· 1990 2032 struct aio_kiocb *kiocb; 1991 2033 int ret = -EINVAL; 1992 2034 u32 key; 2035 + u64 obj = (u64)(unsigned long)iocb; 1993 2036 1994 2037 if (unlikely(get_user(key, &iocb->aio_key))) 1995 2038 return -EFAULT; ··· 2002 2043 return -EINVAL; 2003 2044 2004 2045 spin_lock_irq(&ctx->ctx_lock); 2005 - kiocb = lookup_kiocb(ctx, iocb); 2006 - if (kiocb) { 2007 - ret = kiocb->ki_cancel(&kiocb->rw); 2008 - list_del_init(&kiocb->ki_list); 2046 + /* TODO: use a hash or array, this sucks. */ 2047 + list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { 2048 + if (kiocb->ki_res.obj == obj) { 2049 + ret = kiocb->ki_cancel(&kiocb->rw); 2050 + list_del_init(&kiocb->ki_list); 2051 + break; 2052 + } 2009 2053 } 2010 2054 spin_unlock_irq(&ctx->ctx_lock); 2011 2055