usb gadget: g_ether spinlock recursion fix

The new spinlock debug code turned up a spinlock recursion bug in the
Ethernet gadget driver on a disconnect path; it would show up with any
UDC driver where the cancellation of active requests was synchronous,
rather than e.g. delayed until a controller's completion IRQ.

That recursion is fixed here by creating and using a new spinlock to
protect the relevant lists.

Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>


authored by David Brownell and committed by Greg Kroah-Hartman 789851cf ea186651

+31 -14
+31 -14
drivers/usb/gadget/ether.c
··· 117 117 struct usb_ep *in_ep, *out_ep, *status_ep; 118 118 const struct usb_endpoint_descriptor 119 119 *in, *out, *status; 120 + 121 + spinlock_t req_lock; 120 122 struct list_head tx_reqs, rx_reqs; 121 123 122 124 struct net_device *net; ··· 1068 1066 */ 1069 1067 if (dev->in) { 1070 1068 usb_ep_disable (dev->in_ep); 1069 + spin_lock(&dev->req_lock); 1071 1070 while (likely (!list_empty (&dev->tx_reqs))) { 1072 1071 req = container_of (dev->tx_reqs.next, 1073 1072 struct usb_request, list); 1074 1073 list_del (&req->list); 1074 + 1075 + spin_unlock(&dev->req_lock); 1075 1076 usb_ep_free_request (dev->in_ep, req); 1077 + spin_lock(&dev->req_lock); 1076 1078 } 1079 + spin_unlock(&dev->req_lock); 1077 1080 } 1078 1081 if (dev->out) { 1079 1082 usb_ep_disable (dev->out_ep); 1083 + spin_lock(&dev->req_lock); 1080 1084 while (likely (!list_empty (&dev->rx_reqs))) { 1081 1085 req = container_of (dev->rx_reqs.next, 1082 1086 struct usb_request, list); 1083 1087 list_del (&req->list); 1088 + 1089 + spin_unlock(&dev->req_lock); 1084 1090 usb_ep_free_request (dev->out_ep, req); 1091 + spin_lock(&dev->req_lock); 1085 1092 } 1093 + spin_unlock(&dev->req_lock); 1086 1094 } 1087 1095 1088 1096 if (dev->status) { ··· 1671 1659 if (retval) { 1672 1660 DEBUG (dev, "rx submit --> %d\n", retval); 1673 1661 dev_kfree_skb_any (skb); 1674 - spin_lock (&dev->lock); 1662 + spin_lock(&dev->req_lock); 1675 1663 list_add (&req->list, &dev->rx_reqs); 1676 - spin_unlock (&dev->lock); 1664 + spin_unlock(&dev->req_lock); 1677 1665 } 1678 1666 return retval; 1679 1667 } ··· 1742 1730 dev_kfree_skb_any (skb); 1743 1731 if (!netif_running (dev->net)) { 1744 1732 clean: 1745 - /* nobody reading rx_reqs, so no dev->lock */ 1733 + spin_lock(&dev->req_lock); 1746 1734 list_add (&req->list, &dev->rx_reqs); 1735 + spin_unlock(&dev->req_lock); 1747 1736 req = NULL; 1748 1737 } 1749 1738 if (req) ··· 1795 1782 { 1796 1783 int status; 1797 1784 1785 + spin_lock(&dev->req_lock); 1798 1786 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags); 1799 1787 if (status < 0) 1800 1788 goto fail; 1801 1789 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags); 1802 1790 if (status < 0) 1803 1791 goto fail; 1804 - return 0; 1792 + goto done; 1805 1793 fail: 1806 1794 DEBUG (dev, "can't alloc requests\n"); 1795 + done: 1796 + spin_unlock(&dev->req_lock); 1807 1797 return status; 1808 1798 } 1809 1799 ··· 1816 1800 unsigned long flags; 1817 1801 1818 1802 /* fill unused rxq slots with some skb */ 1819 - spin_lock_irqsave (&dev->lock, flags); 1803 + spin_lock_irqsave(&dev->req_lock, flags); 1820 1804 while (!list_empty (&dev->rx_reqs)) { 1821 1805 req = container_of (dev->rx_reqs.next, 1822 1806 struct usb_request, list); 1823 1807 list_del_init (&req->list); 1824 - spin_unlock_irqrestore (&dev->lock, flags); 1808 + spin_unlock_irqrestore(&dev->req_lock, flags); 1825 1809 1826 1810 if (rx_submit (dev, req, gfp_flags) < 0) { 1827 1811 defer_kevent (dev, WORK_RX_MEMORY); 1828 1812 return; 1829 1813 } 1830 1814 1831 - spin_lock_irqsave (&dev->lock, flags); 1815 + spin_lock_irqsave(&dev->req_lock, flags); 1832 1816 } 1833 - spin_unlock_irqrestore (&dev->lock, flags); 1817 + spin_unlock_irqrestore(&dev->req_lock, flags); 1834 1818 } 1835 1819 1836 1820 static void eth_work (void *_dev) ··· 1864 1848 } 1865 1849 dev->stats.tx_packets++; 1866 1850 1867 - spin_lock (&dev->lock); 1851 + spin_lock(&dev->req_lock); 1868 1852 list_add (&req->list, &dev->tx_reqs); 1869 - spin_unlock (&dev->lock); 1853 + spin_unlock(&dev->req_lock); 1870 1854 dev_kfree_skb_any (skb); 1871 1855 1872 1856 atomic_dec (&dev->tx_qlen); ··· 1912 1896 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 1913 1897 } 1914 1898 1915 - spin_lock_irqsave (&dev->lock, flags); 1899 + spin_lock_irqsave(&dev->req_lock, flags); 1916 1900 req = container_of (dev->tx_reqs.next, struct usb_request, list); 1917 1901 list_del (&req->list); 1918 1902 if (list_empty (&dev->tx_reqs)) 1919 1903 netif_stop_queue (net); 1920 - spin_unlock_irqrestore (&dev->lock, flags); 1904 + spin_unlock_irqrestore(&dev->req_lock, flags); 1921 1905 1922 1906 /* no buffer copies needed, unless the network stack did it 1923 1907 * or the hardware can't use skb buffers. ··· 1971 1955 drop: 1972 1956 dev->stats.tx_dropped++; 1973 1957 dev_kfree_skb_any (skb); 1974 - spin_lock_irqsave (&dev->lock, flags); 1958 + spin_lock_irqsave(&dev->req_lock, flags); 1975 1959 if (list_empty (&dev->tx_reqs)) 1976 1960 netif_start_queue (net); 1977 1961 list_add (&req->list, &dev->tx_reqs); 1978 - spin_unlock_irqrestore (&dev->lock, flags); 1962 + spin_unlock_irqrestore(&dev->req_lock, flags); 1979 1963 } 1980 1964 return 0; 1981 1965 } ··· 2394 2378 return status; 2395 2379 dev = netdev_priv(net); 2396 2380 spin_lock_init (&dev->lock); 2381 + spin_lock_init (&dev->req_lock); 2397 2382 INIT_WORK (&dev->work, eth_work, dev); 2398 2383 INIT_LIST_HEAD (&dev->tx_reqs); 2399 2384 INIT_LIST_HEAD (&dev->rx_reqs);