usb gadget: g_ether spinlock recursion fix

The new spinlock debug code turned up a spinlock recursion bug in the
Ethernet gadget driver on a disconnect path; it would show up with any
UDC driver where the cancellation of active requests was synchronous,
rather than e.g. delayed until a controller's completion IRQ.

That recursion is fixed here by creating and using a new spinlock to
protect the relevant lists.

Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>


authored by David Brownell and committed by Greg Kroah-Hartman 789851cf ea186651

+31 -14
+31 -14
drivers/usb/gadget/ether.c
··· 117 struct usb_ep *in_ep, *out_ep, *status_ep; 118 const struct usb_endpoint_descriptor 119 *in, *out, *status; 120 struct list_head tx_reqs, rx_reqs; 121 122 struct net_device *net; ··· 1068 */ 1069 if (dev->in) { 1070 usb_ep_disable (dev->in_ep); 1071 while (likely (!list_empty (&dev->tx_reqs))) { 1072 req = container_of (dev->tx_reqs.next, 1073 struct usb_request, list); 1074 list_del (&req->list); 1075 usb_ep_free_request (dev->in_ep, req); 1076 } 1077 } 1078 if (dev->out) { 1079 usb_ep_disable (dev->out_ep); 1080 while (likely (!list_empty (&dev->rx_reqs))) { 1081 req = container_of (dev->rx_reqs.next, 1082 struct usb_request, list); 1083 list_del (&req->list); 1084 usb_ep_free_request (dev->out_ep, req); 1085 } 1086 } 1087 1088 if (dev->status) { ··· 1671 if (retval) { 1672 DEBUG (dev, "rx submit --> %d\n", retval); 1673 dev_kfree_skb_any (skb); 1674 - spin_lock (&dev->lock); 1675 list_add (&req->list, &dev->rx_reqs); 1676 - spin_unlock (&dev->lock); 1677 } 1678 return retval; 1679 } ··· 1742 dev_kfree_skb_any (skb); 1743 if (!netif_running (dev->net)) { 1744 clean: 1745 - /* nobody reading rx_reqs, so no dev->lock */ 1746 list_add (&req->list, &dev->rx_reqs); 1747 req = NULL; 1748 } 1749 if (req) ··· 1795 { 1796 int status; 1797 1798 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags); 1799 if (status < 0) 1800 goto fail; 1801 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags); 1802 if (status < 0) 1803 goto fail; 1804 - return 0; 1805 fail: 1806 DEBUG (dev, "can't alloc requests\n"); 1807 return status; 1808 } 1809 ··· 1816 unsigned long flags; 1817 1818 /* fill unused rxq slots with some skb */ 1819 - spin_lock_irqsave (&dev->lock, flags); 1820 while (!list_empty (&dev->rx_reqs)) { 1821 req = container_of (dev->rx_reqs.next, 1822 struct usb_request, list); 1823 list_del_init (&req->list); 1824 - spin_unlock_irqrestore (&dev->lock, flags); 1825 1826 if (rx_submit (dev, req, gfp_flags) < 0) { 1827 defer_kevent (dev, WORK_RX_MEMORY); 1828 return; 1829 } 1830 1831 - spin_lock_irqsave (&dev->lock, flags); 1832 } 1833 - spin_unlock_irqrestore (&dev->lock, flags); 1834 } 1835 1836 static void eth_work (void *_dev) ··· 1864 } 1865 dev->stats.tx_packets++; 1866 1867 - spin_lock (&dev->lock); 1868 list_add (&req->list, &dev->tx_reqs); 1869 - spin_unlock (&dev->lock); 1870 dev_kfree_skb_any (skb); 1871 1872 atomic_dec (&dev->tx_qlen); ··· 1912 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 1913 } 1914 1915 - spin_lock_irqsave (&dev->lock, flags); 1916 req = container_of (dev->tx_reqs.next, struct usb_request, list); 1917 list_del (&req->list); 1918 if (list_empty (&dev->tx_reqs)) 1919 netif_stop_queue (net); 1920 - spin_unlock_irqrestore (&dev->lock, flags); 1921 1922 /* no buffer copies needed, unless the network stack did it 1923 * or the hardware can't use skb buffers. ··· 1971 drop: 1972 dev->stats.tx_dropped++; 1973 dev_kfree_skb_any (skb); 1974 - spin_lock_irqsave (&dev->lock, flags); 1975 if (list_empty (&dev->tx_reqs)) 1976 netif_start_queue (net); 1977 list_add (&req->list, &dev->tx_reqs); 1978 - spin_unlock_irqrestore (&dev->lock, flags); 1979 } 1980 return 0; 1981 } ··· 2394 return status; 2395 dev = netdev_priv(net); 2396 spin_lock_init (&dev->lock); 2397 INIT_WORK (&dev->work, eth_work, dev); 2398 INIT_LIST_HEAD (&dev->tx_reqs); 2399 INIT_LIST_HEAD (&dev->rx_reqs);
··· 117 struct usb_ep *in_ep, *out_ep, *status_ep; 118 const struct usb_endpoint_descriptor 119 *in, *out, *status; 120 + 121 + spinlock_t req_lock; 122 struct list_head tx_reqs, rx_reqs; 123 124 struct net_device *net; ··· 1066 */ 1067 if (dev->in) { 1068 usb_ep_disable (dev->in_ep); 1069 + spin_lock(&dev->req_lock); 1070 while (likely (!list_empty (&dev->tx_reqs))) { 1071 req = container_of (dev->tx_reqs.next, 1072 struct usb_request, list); 1073 list_del (&req->list); 1074 + 1075 + spin_unlock(&dev->req_lock); 1076 usb_ep_free_request (dev->in_ep, req); 1077 + spin_lock(&dev->req_lock); 1078 } 1079 + spin_unlock(&dev->req_lock); 1080 } 1081 if (dev->out) { 1082 usb_ep_disable (dev->out_ep); 1083 + spin_lock(&dev->req_lock); 1084 while (likely (!list_empty (&dev->rx_reqs))) { 1085 req = container_of (dev->rx_reqs.next, 1086 struct usb_request, list); 1087 list_del (&req->list); 1088 + 1089 + spin_unlock(&dev->req_lock); 1090 usb_ep_free_request (dev->out_ep, req); 1091 + spin_lock(&dev->req_lock); 1092 } 1093 + spin_unlock(&dev->req_lock); 1094 } 1095 1096 if (dev->status) { ··· 1659 if (retval) { 1660 DEBUG (dev, "rx submit --> %d\n", retval); 1661 dev_kfree_skb_any (skb); 1662 + spin_lock(&dev->req_lock); 1663 list_add (&req->list, &dev->rx_reqs); 1664 + spin_unlock(&dev->req_lock); 1665 } 1666 return retval; 1667 } ··· 1730 dev_kfree_skb_any (skb); 1731 if (!netif_running (dev->net)) { 1732 clean: 1733 + spin_lock(&dev->req_lock); 1734 list_add (&req->list, &dev->rx_reqs); 1735 + spin_unlock(&dev->req_lock); 1736 req = NULL; 1737 } 1738 if (req) ··· 1782 { 1783 int status; 1784 1785 + spin_lock(&dev->req_lock); 1786 status = prealloc (&dev->tx_reqs, dev->in_ep, n, gfp_flags); 1787 if (status < 0) 1788 goto fail; 1789 status = prealloc (&dev->rx_reqs, dev->out_ep, n, gfp_flags); 1790 if (status < 0) 1791 goto fail; 1792 + goto done; 1793 fail: 1794 DEBUG (dev, "can't alloc requests\n"); 1795 + done: 1796 + spin_unlock(&dev->req_lock); 1797 return status; 1798 } 1799 ··· 1800 unsigned long flags; 1801 1802 /* fill unused rxq slots with some skb */ 1803 + spin_lock_irqsave(&dev->req_lock, flags); 1804 while (!list_empty (&dev->rx_reqs)) { 1805 req = container_of (dev->rx_reqs.next, 1806 struct usb_request, list); 1807 list_del_init (&req->list); 1808 + spin_unlock_irqrestore(&dev->req_lock, flags); 1809 1810 if (rx_submit (dev, req, gfp_flags) < 0) { 1811 defer_kevent (dev, WORK_RX_MEMORY); 1812 return; 1813 } 1814 1815 + spin_lock_irqsave(&dev->req_lock, flags); 1816 } 1817 + spin_unlock_irqrestore(&dev->req_lock, flags); 1818 } 1819 1820 static void eth_work (void *_dev) ··· 1848 } 1849 dev->stats.tx_packets++; 1850 1851 + spin_lock(&dev->req_lock); 1852 list_add (&req->list, &dev->tx_reqs); 1853 + spin_unlock(&dev->req_lock); 1854 dev_kfree_skb_any (skb); 1855 1856 atomic_dec (&dev->tx_qlen); ··· 1896 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 1897 } 1898 1899 + spin_lock_irqsave(&dev->req_lock, flags); 1900 req = container_of (dev->tx_reqs.next, struct usb_request, list); 1901 list_del (&req->list); 1902 if (list_empty (&dev->tx_reqs)) 1903 netif_stop_queue (net); 1904 + spin_unlock_irqrestore(&dev->req_lock, flags); 1905 1906 /* no buffer copies needed, unless the network stack did it 1907 * or the hardware can't use skb buffers. ··· 1955 drop: 1956 dev->stats.tx_dropped++; 1957 dev_kfree_skb_any (skb); 1958 + spin_lock_irqsave(&dev->req_lock, flags); 1959 if (list_empty (&dev->tx_reqs)) 1960 netif_start_queue (net); 1961 list_add (&req->list, &dev->tx_reqs); 1962 + spin_unlock_irqrestore(&dev->req_lock, flags); 1963 } 1964 return 0; 1965 } ··· 2378 return status; 2379 dev = netdev_priv(net); 2380 spin_lock_init (&dev->lock); 2381 + spin_lock_init (&dev->req_lock); 2382 INIT_WORK (&dev->work, eth_work, dev); 2383 INIT_LIST_HEAD (&dev->tx_reqs); 2384 INIT_LIST_HEAD (&dev->rx_reqs);