Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (30 commits)
e1000: fix virtualization bug
bonding: fix alb mode locking regression
Bluetooth: Fix issue with sysfs handling for connections
usbnet: CDC EEM support (v5)
tcp: Fix tcp_prequeue() to get correct rto_min value
ehea: fix invalid pointer access
ne2k-pci: Do not register device until initialized.
Subject: [PATCH] br2684: restore net_dev initialization
net: Only store high 16 bits of kernel generated filter priorities
virtio_net: Fix function name typo
virtio_net: Cleanup command queue scatterlist usage
bonding: correct the cleanup in bond_create()
virtio: add missing include to virtio_net.h
smsc95xx: add support for LAN9512 and LAN9514
smsc95xx: configure LED outputs
netconsole: take care of NETDEV_UNREGISTER event
xt_socket: checks for the state of nf_conntrack
bonding: bond_slave_info_query() fix
cxgb3: fixing gcc 4.4 compiler warning: suggest parentheses around operand of ‘!’
netfilter: use likely() in xt_info_rdlock_bh()
...

+599 -158
+3 -9
drivers/net/bonding/bond_alb.c
··· 1706 * Called with RTNL 1707 */ 1708 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) 1709 - __releases(&bond->curr_slave_lock) 1710 - __releases(&bond->lock) 1711 __acquires(&bond->lock) 1712 - __acquires(&bond->curr_slave_lock) 1713 { 1714 struct bonding *bond = netdev_priv(bond_dev); 1715 struct sockaddr *sa = addr; ··· 1743 } 1744 } 1745 1746 - write_unlock_bh(&bond->curr_slave_lock); 1747 - read_unlock(&bond->lock); 1748 - 1749 if (swap_slave) { 1750 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1751 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); ··· 1750 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1751 bond->alb_info.rlb_enabled); 1752 1753 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1754 if (bond->alb_info.rlb_enabled) { 1755 /* inform clients mac address has changed */ 1756 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1757 } 1758 } 1759 - 1760 - read_lock(&bond->lock); 1761 - write_lock_bh(&bond->curr_slave_lock); 1762 1763 return 0; 1764 }
··· 1706 * Called with RTNL 1707 */ 1708 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) 1709 __acquires(&bond->lock) 1710 + __releases(&bond->lock) 1711 { 1712 struct bonding *bond = netdev_priv(bond_dev); 1713 struct sockaddr *sa = addr; ··· 1745 } 1746 } 1747 1748 if (swap_slave) { 1749 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); 1750 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); ··· 1755 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, 1756 bond->alb_info.rlb_enabled); 1757 1758 + read_lock(&bond->lock); 1759 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1760 if (bond->alb_info.rlb_enabled) { 1761 /* inform clients mac address has changed */ 1762 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1763 } 1764 + read_unlock(&bond->lock); 1765 } 1766 1767 return 0; 1768 }
+13 -23
drivers/net/bonding/bond_main.c
··· 2213 { 2214 struct bonding *bond = netdev_priv(bond_dev); 2215 struct slave *slave; 2216 - int i, found = 0; 2217 - 2218 - if (info->slave_id < 0) { 2219 - return -ENODEV; 2220 - } 2221 2222 read_lock(&bond->lock); 2223 2224 bond_for_each_slave(bond, slave, i) { 2225 if (i == (int)info->slave_id) { 2226 - found = 1; 2227 break; 2228 } 2229 } 2230 2231 read_unlock(&bond->lock); 2232 2233 - if (found) { 2234 - strcpy(info->slave_name, slave->dev->name); 2235 - info->link = slave->link; 2236 - info->state = slave->state; 2237 - info->link_failure_count = slave->link_failure_count; 2238 - } else { 2239 - return -ENODEV; 2240 - } 2241 - 2242 - return 0; 2243 } 2244 2245 /*-------------------------------- Monitoring -------------------------------*/ ··· 5158 up_write(&bonding_rwsem); 5159 rtnl_unlock(); /* allows sysfs registration of net device */ 5160 res = bond_create_sysfs_entry(netdev_priv(bond_dev)); 5161 - if (res < 0) { 5162 - rtnl_lock(); 5163 - down_write(&bonding_rwsem); 5164 - bond_deinit(bond_dev); 5165 - unregister_netdevice(bond_dev); 5166 - goto out_rtnl; 5167 - } 5168 5169 return 0; 5170 5171 out_bond: 5172 bond_deinit(bond_dev); 5173 out_netdev:
··· 2213 { 2214 struct bonding *bond = netdev_priv(bond_dev); 2215 struct slave *slave; 2216 + int i, res = -ENODEV; 2217 2218 read_lock(&bond->lock); 2219 2220 bond_for_each_slave(bond, slave, i) { 2221 if (i == (int)info->slave_id) { 2222 + res = 0; 2223 + strcpy(info->slave_name, slave->dev->name); 2224 + info->link = slave->link; 2225 + info->state = slave->state; 2226 + info->link_failure_count = slave->link_failure_count; 2227 break; 2228 } 2229 } 2230 2231 read_unlock(&bond->lock); 2232 2233 + return res; 2234 } 2235 2236 /*-------------------------------- Monitoring -------------------------------*/ ··· 5167 up_write(&bonding_rwsem); 5168 rtnl_unlock(); /* allows sysfs registration of net device */ 5169 res = bond_create_sysfs_entry(netdev_priv(bond_dev)); 5170 + if (res < 0) 5171 + goto out_unreg; 5172 5173 return 0; 5174 5175 + out_unreg: 5176 + rtnl_lock(); 5177 + down_write(&bonding_rwsem); 5178 + unregister_netdevice(bond_dev); 5179 out_bond: 5180 bond_deinit(bond_dev); 5181 out_netdev:
+1 -1
drivers/net/cxgb3/t3_hw.c
··· 3779 3780 adapter->params.info = ai; 3781 adapter->params.nports = ai->nports0 + ai->nports1; 3782 - adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1); 3783 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 3784 /* 3785 * We used to only run the "adapter check task" once a second if
··· 3779 3780 adapter->params.info = ai; 3781 adapter->params.nports = ai->nports0 + ai->nports1; 3782 + adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1); 3783 adapter->params.rev = t3_read_reg(adapter, A_PL_REV); 3784 /* 3785 * We used to only run the "adapter check task" once a second if
+1 -1
drivers/net/e1000/e1000_main.c
··· 3738 struct e1000_hw *hw = &adapter->hw; 3739 u32 rctl, icr = er32(ICR); 3740 3741 - if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) 3742 return IRQ_NONE; /* Not our interrupt */ 3743 3744 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
··· 3738 struct e1000_hw *hw = &adapter->hw; 3739 u32 rctl, icr = er32(ICR); 3740 3741 + if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3742 return IRQ_NONE; /* Not our interrupt */ 3743 3744 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+1 -1
drivers/net/ehea/ehea.h
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0100" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 + #define DRV_VERSION "EHEA_0101" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
+17 -12
drivers/net/ehea/ehea_main.c
··· 545 x &= (arr_len - 1); 546 547 pref = skb_array[x]; 548 - prefetchw(pref); 549 - prefetchw(pref + EHEA_CACHE_LINE); 550 551 - pref = (skb_array[x]->data); 552 - prefetch(pref); 553 - prefetch(pref + EHEA_CACHE_LINE); 554 - prefetch(pref + EHEA_CACHE_LINE * 2); 555 - prefetch(pref + EHEA_CACHE_LINE * 3); 556 skb = skb_array[skb_index]; 557 skb_array[skb_index] = NULL; 558 return skb; ··· 572 x &= (arr_len - 1); 573 574 pref = skb_array[x]; 575 - prefetchw(pref); 576 - prefetchw(pref + EHEA_CACHE_LINE); 577 578 - pref = (skb_array[x]->data); 579 - prefetchw(pref); 580 - prefetchw(pref + EHEA_CACHE_LINE); 581 582 skb = skb_array[wqe_index]; 583 skb_array[wqe_index] = NULL;
··· 545 x &= (arr_len - 1); 546 547 pref = skb_array[x]; 548 + if (pref) { 549 + prefetchw(pref); 550 + prefetchw(pref + EHEA_CACHE_LINE); 551 552 + pref = (skb_array[x]->data); 553 + prefetch(pref); 554 + prefetch(pref + EHEA_CACHE_LINE); 555 + prefetch(pref + EHEA_CACHE_LINE * 2); 556 + prefetch(pref + EHEA_CACHE_LINE * 3); 557 + } 558 + 559 skb = skb_array[skb_index]; 560 skb_array[skb_index] = NULL; 561 return skb; ··· 569 x &= (arr_len - 1); 570 571 pref = skb_array[x]; 572 + if (pref) { 573 + prefetchw(pref); 574 + prefetchw(pref + EHEA_CACHE_LINE); 575 576 + pref = (skb_array[x]->data); 577 + prefetchw(pref); 578 + prefetchw(pref + EHEA_CACHE_LINE); 579 + } 580 581 skb = skb_array[wqe_index]; 582 skb_array[wqe_index] = NULL;
+13 -11
drivers/net/mv643xx_eth.c
··· 393 struct work_struct tx_timeout_task; 394 395 struct napi_struct napi; 396 u8 work_link; 397 u8 work_tx; 398 u8 work_tx_end; 399 u8 work_rx; 400 u8 work_rx_refill; 401 - u8 work_rx_oom; 402 403 int skb_size; 404 struct sk_buff_head rx_recycle; ··· 661 dma_get_cache_alignment() - 1); 662 663 if (skb == NULL) { 664 - mp->work_rx_oom |= 1 << rxq->index; 665 goto oom; 666 } 667 ··· 1255 1256 spin_lock_bh(&mp->mib_counters_lock); 1257 p->good_octets_received += mib_read(mp, 0x00); 1258 - p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1259 p->bad_octets_received += mib_read(mp, 0x08); 1260 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1261 p->good_frames_received += mib_read(mp, 0x10); ··· 1268 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1269 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1270 p->good_octets_sent += mib_read(mp, 0x38); 1271 - p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32; 1272 p->good_frames_sent += mib_read(mp, 0x40); 1273 p->excessive_collision += mib_read(mp, 0x44); 1274 p->multicast_frames_sent += mib_read(mp, 0x48); ··· 2165 2166 mp = container_of(napi, struct mv643xx_eth_private, napi); 2167 2168 - mp->work_rx_refill |= mp->work_rx_oom; 2169 - mp->work_rx_oom = 0; 2170 2171 work_done = 0; 2172 while (work_done < budget) { ··· 2182 continue; 2183 } 2184 2185 - queue_mask = mp->work_tx | mp->work_tx_end | 2186 - mp->work_rx | mp->work_rx_refill; 2187 if (!queue_mask) { 2188 if (mv643xx_eth_collect_events(mp)) 2189 continue; ··· 2206 txq_maybe_wake(mp->txq + queue); 2207 } else if (mp->work_rx & queue_mask) { 2208 work_done += rxq_process(mp->rxq + queue, work_tbd); 2209 - } else if (mp->work_rx_refill & queue_mask) { 2210 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2211 } else { 2212 BUG(); ··· 2214 } 2215 2216 if (work_done < budget) { 2217 - if (mp->work_rx_oom) 2218 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2219 napi_complete(napi); 2220 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); ··· 2374 rxq_refill(mp->rxq + i, INT_MAX); 2375 } 2376 2377 - if (mp->work_rx_oom) { 2378 mp->rx_oom.expires = jiffies + (HZ / 10); 2379 add_timer(&mp->rx_oom); 2380 }
··· 393 struct work_struct tx_timeout_task; 394 395 struct napi_struct napi; 396 + u8 oom; 397 u8 work_link; 398 u8 work_tx; 399 u8 work_tx_end; 400 u8 work_rx; 401 u8 work_rx_refill; 402 403 int skb_size; 404 struct sk_buff_head rx_recycle; ··· 661 dma_get_cache_alignment() - 1); 662 663 if (skb == NULL) { 664 + mp->oom = 1; 665 goto oom; 666 } 667 ··· 1255 1256 spin_lock_bh(&mp->mib_counters_lock); 1257 p->good_octets_received += mib_read(mp, 0x00); 1258 p->bad_octets_received += mib_read(mp, 0x08); 1259 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1260 p->good_frames_received += mib_read(mp, 0x10); ··· 1269 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1270 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1271 p->good_octets_sent += mib_read(mp, 0x38); 1272 p->good_frames_sent += mib_read(mp, 0x40); 1273 p->excessive_collision += mib_read(mp, 0x44); 1274 p->multicast_frames_sent += mib_read(mp, 0x48); ··· 2167 2168 mp = container_of(napi, struct mv643xx_eth_private, napi); 2169 2170 + if (unlikely(mp->oom)) { 2171 + mp->oom = 0; 2172 + del_timer(&mp->rx_oom); 2173 + } 2174 2175 work_done = 0; 2176 while (work_done < budget) { ··· 2182 continue; 2183 } 2184 2185 + queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; 2186 + if (likely(!mp->oom)) 2187 + queue_mask |= mp->work_rx_refill; 2188 + 2189 if (!queue_mask) { 2190 if (mv643xx_eth_collect_events(mp)) 2191 continue; ··· 2204 txq_maybe_wake(mp->txq + queue); 2205 } else if (mp->work_rx & queue_mask) { 2206 work_done += rxq_process(mp->rxq + queue, work_tbd); 2207 + } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { 2208 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2209 } else { 2210 BUG(); ··· 2212 } 2213 2214 if (work_done < budget) { 2215 + if (mp->oom) 2216 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2217 napi_complete(napi); 2218 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); ··· 2372 rxq_refill(mp->rxq + i, INT_MAX); 2373 } 2374 2375 + if (mp->oom) { 2376 mp->rx_oom.expires = jiffies + (HZ / 10); 2377 add_timer(&mp->rx_oom); 2378 }
+3 -4
drivers/net/ne2k-pci.c
··· 374 dev->ethtool_ops = &ne2k_pci_ethtool_ops; 375 NS8390_init(dev, 0); 376 377 i = register_netdev(dev); 378 if (i) 379 goto err_out_free_netdev; 380 381 - for(i = 0; i < 6; i++) 382 - dev->dev_addr[i] = SA_prom[i]; 383 printk("%s: %s found at %#lx, IRQ %d, %pM.\n", 384 dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, 385 dev->dev_addr); 386 - 387 - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 388 389 return 0; 390
··· 374 dev->ethtool_ops = &ne2k_pci_ethtool_ops; 375 NS8390_init(dev, 0); 376 377 + memcpy(dev->dev_addr, SA_prom, 6); 378 + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 379 + 380 i = register_netdev(dev); 381 if (i) 382 goto err_out_free_netdev; 383 384 printk("%s: %s found at %#lx, IRQ %d, %pM.\n", 385 dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, 386 dev->dev_addr); 387 388 return 0; 389
+10 -1
drivers/net/netconsole.c
··· 664 struct netconsole_target *nt; 665 struct net_device *dev = ptr; 666 667 - if (!(event == NETDEV_CHANGENAME)) 668 goto done; 669 670 spin_lock_irqsave(&target_list_lock, flags); ··· 674 switch (event) { 675 case NETDEV_CHANGENAME: 676 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 677 break; 678 } 679 }
··· 664 struct netconsole_target *nt; 665 struct net_device *dev = ptr; 666 667 + if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER)) 668 goto done; 669 670 spin_lock_irqsave(&target_list_lock, flags); ··· 674 switch (event) { 675 case NETDEV_CHANGENAME: 676 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 677 + break; 678 + case NETDEV_UNREGISTER: 679 + if (!nt->enabled) 680 + break; 681 + netpoll_cleanup(&nt->np); 682 + nt->enabled = 0; 683 + printk(KERN_INFO "netconsole: network logging stopped" 684 + ", interface %s unregistered\n", 685 + dev->name); 686 break; 687 } 688 }
+14
drivers/net/usb/Kconfig
··· 180 IEEE 802 "local assignment" bit is set in the address, a "usbX" 181 name is used instead. 182 183 config USB_NET_DM9601 184 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 185 depends on USB_USBNET
··· 180 IEEE 802 "local assignment" bit is set in the address, a "usbX" 181 name is used instead. 182 183 + config USB_NET_CDC_EEM 184 + tristate "CDC EEM support" 185 + depends on USB_USBNET && EXPERIMENTAL 186 + help 187 + This option supports devices conforming to the Communication Device 188 + Class (CDC) Ethernet Emulation Model, a specification that's easy to 189 + implement in device firmware. The CDC EEM specifications are available 190 + from <http://www.usb.org/>. 191 + 192 + This driver creates an interface named "ethX", where X depends on 193 + what other networking devices you have in use. However, if the 194 + IEEE 802 "local assignment" bit is set in the address, a "usbX" 195 + name is used instead. 196 + 197 config USB_NET_DM9601 198 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 199 depends on USB_USBNET
+1
drivers/net/usb/Makefile
··· 9 obj-$(CONFIG_USB_HSO) += hso.o 10 obj-$(CONFIG_USB_NET_AX8817X) += asix.o 11 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 12 obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 13 obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 14 obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
··· 9 obj-$(CONFIG_USB_HSO) += hso.o 10 obj-$(CONFIG_USB_NET_AX8817X) += asix.o 11 obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o 12 + obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o 13 obj-$(CONFIG_USB_NET_DM9601) += dm9601.o 14 obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o 15 obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
+381
drivers/net/usb/cdc_eem.c
···
··· 1 + /* 2 + * USB CDC EEM network interface driver 3 + * Copyright (C) 2009 Oberthur Technologies 4 + * by Omar Laazimani, Olivier Condemine 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 + */ 20 + 21 + #include <linux/module.h> 22 + #include <linux/init.h> 23 + #include <linux/netdevice.h> 24 + #include <linux/etherdevice.h> 25 + #include <linux/ctype.h> 26 + #include <linux/ethtool.h> 27 + #include <linux/workqueue.h> 28 + #include <linux/mii.h> 29 + #include <linux/usb.h> 30 + #include <linux/crc32.h> 31 + #include <linux/usb/cdc.h> 32 + #include <linux/usb/usbnet.h> 33 + 34 + 35 + /* 36 + * This driver is an implementation of the CDC "Ethernet Emulation 37 + * Model" (EEM) specification, which encapsulates Ethernet frames 38 + * for transport over USB using a simpler USB device model than the 39 + * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet"). 40 + * 41 + * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf 42 + * 43 + * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24, 44 + * 2.6.27 and 2.6.30rc2 kernel. 45 + * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel). 46 + * build on 23-April-2009 47 + */ 48 + 49 + #define EEM_HEAD 2 /* 2 byte header */ 50 + 51 + /*-------------------------------------------------------------------------*/ 52 + 53 + static void eem_linkcmd_complete(struct urb *urb) 54 + { 55 + dev_kfree_skb(urb->context); 56 + usb_free_urb(urb); 57 + } 58 + 59 + static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) 60 + { 61 + struct urb *urb; 62 + int status; 63 + 64 + urb = usb_alloc_urb(0, GFP_ATOMIC); 65 + if (!urb) 66 + goto fail; 67 + 68 + usb_fill_bulk_urb(urb, dev->udev, dev->out, 69 + skb->data, skb->len, eem_linkcmd_complete, skb); 70 + 71 + status = usb_submit_urb(urb, GFP_ATOMIC); 72 + if (status) { 73 + usb_free_urb(urb); 74 + fail: 75 + dev_kfree_skb(skb); 76 + devwarn(dev, "link cmd failure\n"); 77 + return; 78 + } 79 + } 80 + 81 + static int eem_bind(struct usbnet *dev, struct usb_interface *intf) 82 + { 83 + int status = 0; 84 + 85 + status = usbnet_get_endpoints(dev, intf); 86 + if (status < 0) { 87 + usb_set_intfdata(intf, NULL); 88 + usb_driver_release_interface(driver_of(intf), intf); 89 + return status; 90 + } 91 + 92 + /* no jumbogram (16K) support for now */ 93 + 94 + dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; 95 + 96 + return 0; 97 + } 98 + 99 + /* 100 + * EEM permits packing multiple Ethernet frames into USB transfers 101 + * (a "bundle"), but for TX we don't try to do that. 102 + */ 103 + static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 104 + gfp_t flags) 105 + { 106 + struct sk_buff *skb2 = NULL; 107 + u16 len = skb->len; 108 + u32 crc = 0; 109 + int padlen = 0; 110 + 111 + /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is 112 + * zero, stick two bytes of zero length EEM packet on the end. 113 + * Else the framework would add invalid single byte padding, 114 + * since it can't know whether ZLPs will be handled right by 115 + * all the relevant hardware and software. 116 + */ 117 + if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket)) 118 + padlen += 2; 119 + 120 + if (!skb_cloned(skb)) { 121 + int headroom = skb_headroom(skb); 122 + int tailroom = skb_tailroom(skb); 123 + 124 + if ((tailroom >= ETH_FCS_LEN + padlen) 125 + && (headroom >= EEM_HEAD)) 126 + goto done; 127 + 128 + if ((headroom + tailroom) 129 + > (EEM_HEAD + ETH_FCS_LEN + padlen)) { 130 + skb->data = memmove(skb->head + 131 + EEM_HEAD, 132 + skb->data, 133 + skb->len); 134 + skb_set_tail_pointer(skb, len); 135 + goto done; 136 + } 137 + } 138 + 139 + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); 140 + if (!skb2) 141 + return NULL; 142 + 143 + dev_kfree_skb_any(skb); 144 + skb = skb2; 145 + 146 + done: 147 + /* we don't use the "no Ethernet CRC" option */ 148 + crc = crc32_le(~0, skb->data, skb->len); 149 + crc = ~crc; 150 + 151 + put_unaligned_le32(crc, skb_put(skb, 4)); 152 + 153 + /* EEM packet header format: 154 + * b0..13: length of ethernet frame 155 + * b14: bmCRC (1 == valid Ethernet CRC) 156 + * b15: bmType (0 == data) 157 + */ 158 + len = skb->len; 159 + put_unaligned_le16(BIT(14) | len, skb_push(skb, 2)); 160 + 161 + /* Bundle a zero length EEM packet if needed */ 162 + if (padlen) 163 + put_unaligned_le16(0, skb_put(skb, 2)); 164 + 165 + return skb; 166 + } 167 + 168 + static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 169 + { 170 + /* 171 + * Our task here is to strip off framing, leaving skb with one 172 + * data frame for the usbnet framework code to process. But we 173 + * may have received multiple EEM payloads, or command payloads. 174 + * So we must process _everything_ as if it's a header, except 175 + * maybe the last data payload 176 + * 177 + * REVISIT the framework needs updating so that when we consume 178 + * all payloads (the last or only message was a command, or a 179 + * zero length EEM packet) that is not accounted as an rx_error. 180 + */ 181 + do { 182 + struct sk_buff *skb2 = NULL; 183 + u16 header; 184 + u16 len = 0; 185 + 186 + /* incomplete EEM header? */ 187 + if (skb->len < EEM_HEAD) 188 + return 0; 189 + 190 + /* 191 + * EEM packet header format: 192 + * b0..14: EEM type dependant (Data or Command) 193 + * b15: bmType 194 + */ 195 + header = get_unaligned_le16(skb->data); 196 + skb_pull(skb, EEM_HEAD); 197 + 198 + /* 199 + * The bmType bit helps to denote when EEM 200 + * packet is data or command : 201 + * bmType = 0 : EEM data payload 202 + * bmType = 1 : EEM (link) command 203 + */ 204 + if (header & BIT(15)) { 205 + u16 bmEEMCmd; 206 + 207 + /* 208 + * EEM (link) command packet: 209 + * b0..10: bmEEMCmdParam 210 + * b11..13: bmEEMCmd 211 + * b14: bmReserved (must be 0) 212 + * b15: 1 (EEM command) 213 + */ 214 + if (header & BIT(14)) { 215 + devdbg(dev, "reserved command %04x\n", header); 216 + continue; 217 + } 218 + 219 + bmEEMCmd = (header >> 11) & 0x7; 220 + switch (bmEEMCmd) { 221 + 222 + /* Responding to echo requests is mandatory. */ 223 + case 0: /* Echo command */ 224 + len = header & 0x7FF; 225 + 226 + /* bogus command? */ 227 + if (skb->len < len) 228 + return 0; 229 + 230 + skb2 = skb_clone(skb, GFP_ATOMIC); 231 + if (unlikely(!skb2)) 232 + goto next; 233 + skb_trim(skb2, len); 234 + put_unaligned_le16(BIT(15) | (1 << 11) | len, 235 + skb_push(skb2, 2)); 236 + eem_linkcmd(dev, skb2); 237 + break; 238 + 239 + /* 240 + * Host may choose to ignore hints. 241 + * - suspend: peripheral ready to suspend 242 + * - response: suggest N millisec polling 243 + * - response complete: suggest N sec polling 244 + */ 245 + case 2: /* Suspend hint */ 246 + case 3: /* Response hint */ 247 + case 4: /* Response complete hint */ 248 + continue; 249 + 250 + /* 251 + * Hosts should never receive host-to-peripheral 252 + * or reserved command codes; or responses to an 253 + * echo command we didn't send. 254 + */ 255 + case 1: /* Echo response */ 256 + case 5: /* Tickle */ 257 + default: /* reserved */ 258 + devwarn(dev, "unexpected link command %d\n", 259 + bmEEMCmd); 260 + continue; 261 + } 262 + 263 + } else { 264 + u32 crc, crc2; 265 + int is_last; 266 + 267 + /* zero length EEM packet? */ 268 + if (header == 0) 269 + continue; 270 + 271 + /* 272 + * EEM data packet header : 273 + * b0..13: length of ethernet frame 274 + * b14: bmCRC 275 + * b15: 0 (EEM data) 276 + */ 277 + len = header & 0x3FFF; 278 + 279 + /* bogus EEM payload? */ 280 + if (skb->len < len) 281 + return 0; 282 + 283 + /* bogus ethernet frame? */ 284 + if (len < (ETH_HLEN + ETH_FCS_LEN)) 285 + goto next; 286 + 287 + /* 288 + * Treat the last payload differently: framework 289 + * code expects our "fixup" to have stripped off 290 + * headers, so "skb" is a data packet (or error). 291 + * Else if it's not the last payload, keep "skb" 292 + * for further processing. 293 + */ 294 + is_last = (len == skb->len); 295 + if (is_last) 296 + skb2 = skb; 297 + else { 298 + skb2 = skb_clone(skb, GFP_ATOMIC); 299 + if (unlikely(!skb2)) 300 + return 0; 301 + } 302 + 303 + crc = get_unaligned_le32(skb2->data 304 + + len - ETH_FCS_LEN); 305 + skb_trim(skb2, len - ETH_FCS_LEN); 306 + 307 + /* 308 + * The bmCRC helps to denote when the CRC field in 309 + * the Ethernet frame contains a calculated CRC: 310 + * bmCRC = 1 : CRC is calculated 311 + * bmCRC = 0 : CRC = 0xDEADBEEF 312 + */ 313 + if (header & BIT(14)) 314 + crc2 = ~crc32_le(~0, skb2->data, len); 315 + else 316 + crc2 = 0xdeadbeef; 317 + 318 + if (is_last) 319 + return crc == crc2; 320 + 321 + if (unlikely(crc != crc2)) { 322 + dev->stats.rx_errors++; 323 + dev_kfree_skb_any(skb2); 324 + } else 325 + usbnet_skb_return(dev, skb2); 326 + } 327 + 328 + next: 329 + skb_pull(skb, len); 330 + } while (skb->len); 331 + 332 + return 1; 333 + } 334 + 335 + static const struct driver_info eem_info = { 336 + .description = "CDC EEM Device", 337 + .flags = FLAG_ETHER, 338 + .bind = eem_bind, 339 + .rx_fixup = eem_rx_fixup, 340 + .tx_fixup = eem_tx_fixup, 341 + }; 342 + 343 + /*-------------------------------------------------------------------------*/ 344 + 345 + static const struct usb_device_id products[] = { 346 + { 347 + USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM, 348 + USB_CDC_PROTO_EEM), 349 + .driver_info = (unsigned long) &eem_info, 350 + }, 351 + { 352 + /* EMPTY == end of list */ 353 + }, 354 + }; 355 + MODULE_DEVICE_TABLE(usb, products); 356 + 357 + static struct usb_driver eem_driver = { 358 + .name = "cdc_eem", 359 + .id_table = products, 360 + .probe = usbnet_probe, 361 + .disconnect = usbnet_disconnect, 362 + .suspend = usbnet_suspend, 363 + .resume = usbnet_resume, 364 + }; 365 + 366 + 367 + static int __init eem_init(void) 368 + { 369 + return usb_register(&eem_driver); 370 + } 371 + module_init(eem_init); 372 + 373 + static void __exit eem_exit(void) 374 + { 375 + usb_deregister(&eem_driver); 376 + } 377 + module_exit(eem_exit); 378 + 379 + MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>"); 380 + MODULE_DESCRIPTION("USB CDC EEM"); 381 + MODULE_LICENSE("GPL");
+15
drivers/net/usb/smsc95xx.c
··· 941 if (netif_msg_ifup(dev)) 942 devdbg(dev, "ID_REV = 0x%08x", read_buf); 943 944 /* Init Tx */ 945 write_buf = 0; 946 ret = smsc95xx_write_reg(dev, FLOW, write_buf); ··· 1239 { 1240 /* SMSC9500 USB Ethernet Device */ 1241 USB_DEVICE(0x0424, 0x9500), 1242 .driver_info = (unsigned long) &smsc95xx_info, 1243 }, 1244 { }, /* END */
··· 941 if (netif_msg_ifup(dev)) 942 devdbg(dev, "ID_REV = 0x%08x", read_buf); 943 944 + /* Configure GPIO pins as LED outputs */ 945 + write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | 946 + LED_GPIO_CFG_FDX_LED; 947 + ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); 948 + if (ret < 0) { 949 + devwarn(dev, "Failed to write LED_GPIO_CFG register, ret=%d", 950 + ret); 951 + return ret; 952 + } 953 + 954 /* Init Tx */ 955 write_buf = 0; 956 ret = smsc95xx_write_reg(dev, FLOW, write_buf); ··· 1229 { 1230 /* SMSC9500 USB Ethernet Device */ 1231 USB_DEVICE(0x0424, 0x9500), 1232 + .driver_info = (unsigned long) &smsc95xx_info, 1233 + }, 1234 + { 1235 + /* SMSC9512/9514 USB Hub & Ethernet Device */ 1236 + USB_DEVICE(0x0424, 0xec00), 1237 .driver_info = (unsigned long) &smsc95xx_info, 1238 }, 1239 { }, /* END */
+3
drivers/net/usb/smsc95xx.h
··· 99 #define PM_CTL_WUPS_MULTI_ (0x00000003) 100 101 #define LED_GPIO_CFG (0x24) 102 103 #define GPIO_CFG (0x28) 104
··· 99 #define PM_CTL_WUPS_MULTI_ (0x00000003) 100 101 #define LED_GPIO_CFG (0x24) 102 + #define LED_GPIO_CFG_SPD_LED (0x01000000) 103 + #define LED_GPIO_CFG_LNK_LED (0x00100000) 104 + #define LED_GPIO_CFG_FDX_LED (0x00010000) 105 106 #define GPIO_CFG (0x28) 107
+14 -10
drivers/net/virtio_net.c
··· 616 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 617 struct scatterlist *data, int out, int in) 618 { 619 - struct scatterlist sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 620 struct virtio_net_ctrl_hdr ctrl; 621 virtio_net_ctrl_ack status = ~0; 622 unsigned int tmp; 623 624 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 625 BUG(); /* Caller should know better */ ··· 638 sg_init_table(sg, out + in); 639 640 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 641 - memcpy(&sg[1], data, sizeof(struct scatterlist) * (out + in - 2)); 642 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 643 644 if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0) ··· 694 promisc = ((dev->flags & IFF_PROMISC) != 0); 695 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 696 697 - sg_set_buf(sg, &promisc, sizeof(promisc)); 698 699 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 700 VIRTIO_NET_CTRL_RX_PROMISC, ··· 702 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 703 promisc ? "en" : "dis"); 704 705 - sg_set_buf(sg, &allmulti, sizeof(allmulti)); 706 707 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 708 VIRTIO_NET_CTRL_RX_ALLMULTI, ··· 717 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 718 return; 719 } 720 721 /* Store the unicast list and count in the front of the buffer */ 722 mac_data->entries = dev->uc_count; ··· 748 kfree(buf); 749 } 750 751 - static void virnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 752 { 753 struct virtnet_info *vi = netdev_priv(dev); 754 struct scatterlist sg; 755 756 - sg_set_buf(&sg, &vid, sizeof(vid)); 757 758 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 759 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 760 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 761 } 762 763 - static void virnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 764 { 765 struct virtnet_info *vi = netdev_priv(dev); 766 struct scatterlist sg; 767 768 - sg_set_buf(&sg, &vid, sizeof(vid)); 769 770 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 771 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) ··· 798 .ndo_set_mac_address = virtnet_set_mac_address, 799 .ndo_set_rx_mode = virtnet_set_rx_mode, 800 .ndo_change_mtu = virtnet_change_mtu, 801 - .ndo_vlan_rx_add_vid = virnet_vlan_rx_add_vid, 802 - .ndo_vlan_rx_kill_vid = virnet_vlan_rx_kill_vid, 803 #ifdef CONFIG_NET_POLL_CONTROLLER 804 .ndo_poll_controller = virtnet_netpoll, 805 #endif
··· 616 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, 617 struct scatterlist *data, int out, int in) 618 { 619 + struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; 620 struct virtio_net_ctrl_hdr ctrl; 621 virtio_net_ctrl_ack status = ~0; 622 unsigned int tmp; 623 + int i; 624 625 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 626 BUG(); /* Caller should know better */ ··· 637 sg_init_table(sg, out + in); 638 639 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); 640 + for_each_sg(data, s, out + in - 2, i) 641 + sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 642 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 643 644 if (vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) != 0) ··· 692 promisc = ((dev->flags & IFF_PROMISC) != 0); 693 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 694 695 + sg_init_one(sg, &promisc, sizeof(promisc)); 696 697 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 698 VIRTIO_NET_CTRL_RX_PROMISC, ··· 700 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 701 promisc ? "en" : "dis"); 702 703 + sg_init_one(sg, &allmulti, sizeof(allmulti)); 704 705 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 706 VIRTIO_NET_CTRL_RX_ALLMULTI, ··· 715 dev_warn(&dev->dev, "No memory for MAC address buffer\n"); 716 return; 717 } 718 + 719 + sg_init_table(sg, 2); 720 721 /* Store the unicast list and count in the front of the buffer */ 722 mac_data->entries = dev->uc_count; ··· 744 kfree(buf); 745 } 746 747 + static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) 748 { 749 struct virtnet_info *vi = netdev_priv(dev); 750 struct scatterlist sg; 751 752 + sg_init_one(&sg, &vid, sizeof(vid)); 753 754 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 755 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) 756 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); 757 } 758 759 + static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) 760 { 761 struct virtnet_info *vi = netdev_priv(dev); 762 struct scatterlist sg; 763 764 + sg_init_one(&sg, &vid, sizeof(vid)); 765 766 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, 767 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) ··· 794 .ndo_set_mac_address = virtnet_set_mac_address, 795 .ndo_set_rx_mode = virtnet_set_rx_mode, 796 .ndo_change_mtu = virtnet_change_mtu, 797 + .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 798 + .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 799 #ifdef CONFIG_NET_POLL_CONTROLLER 800 .ndo_poll_controller = virtnet_netpoll, 801 #endif
+1 -1
drivers/net/wireless/ath5k/debug.c
··· 424 425 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 426 struct ieee80211_supported_band *band = &sc->sbands[b]; 427 - char bname[5]; 428 switch (band->band) { 429 case IEEE80211_BAND_2GHZ: 430 strcpy(bname, "2 GHz");
··· 424 425 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 426 struct ieee80211_supported_band *band = &sc->sbands[b]; 427 + char bname[6]; 428 switch (band->band) { 429 case IEEE80211_BAND_2GHZ: 430 strcpy(bname, "2 GHz");
+2 -2
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 925 926 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 927 928 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 929 return; 930 - 931 - ieee80211_scan_completed(priv->hw, false); 932 933 /* Since setting the TXPOWER may have been deferred while 934 * performing the scan, fire one off */
··· 925 926 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 927 928 + ieee80211_scan_completed(priv->hw, false); 929 + 930 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 931 return; 932 933 /* Since setting the TXPOWER may have been deferred while 934 * performing the scan, fire one off */
-1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 1694 rxq->free_count = 0; 1695 spin_unlock_irqrestore(&rxq->lock, flags); 1696 } 1697 - EXPORT_SYMBOL(iwl3945_rx_queue_reset); 1698 1699 /* 1700 * this should be called while priv->lock is locked
··· 1694 rxq->free_count = 0; 1695 spin_unlock_irqrestore(&rxq->lock, flags); 1696 } 1697 1698 /* 1699 * this should be called while priv->lock is locked
+10 -4
drivers/net/wireless/rndis_wlan.c
··· 2558 mutex_init(&priv->command_lock); 2559 spin_lock_init(&priv->stats_lock); 2560 2561 /* try bind rndis_host */ 2562 retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); 2563 if (retval < 0) ··· 2608 disassociate(usbdev, 1); 2609 netif_carrier_off(usbdev->net); 2610 2611 - /* because rndis_command() sleeps we need to use workqueue */ 2612 - priv->workqueue = create_singlethread_workqueue("rndis_wlan"); 2613 - INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); 2614 queue_delayed_work(priv->workqueue, &priv->stats_work, 2615 round_jiffies_relative(STATS_UPDATE_JIFFIES)); 2616 - INIT_WORK(&priv->work, rndis_wext_worker); 2617 2618 return 0; 2619 2620 fail: 2621 kfree(priv); 2622 return retval; 2623 }
··· 2558 mutex_init(&priv->command_lock); 2559 spin_lock_init(&priv->stats_lock); 2560 2561 + /* because rndis_command() sleeps we need to use workqueue */ 2562 + priv->workqueue = create_singlethread_workqueue("rndis_wlan"); 2563 + INIT_WORK(&priv->work, rndis_wext_worker); 2564 + INIT_DELAYED_WORK(&priv->stats_work, rndis_update_wireless_stats); 2565 + 2566 /* try bind rndis_host */ 2567 retval = generic_rndis_bind(usbdev, intf, FLAG_RNDIS_PHYM_WIRELESS); 2568 if (retval < 0) ··· 2603 disassociate(usbdev, 1); 2604 netif_carrier_off(usbdev->net); 2605 2606 queue_delayed_work(priv->workqueue, &priv->stats_work, 2607 round_jiffies_relative(STATS_UPDATE_JIFFIES)); 2608 2609 return 0; 2610 2611 fail: 2612 + cancel_delayed_work_sync(&priv->stats_work); 2613 + cancel_work_sync(&priv->work); 2614 + flush_workqueue(priv->workqueue); 2615 + destroy_workqueue(priv->workqueue); 2616 + 2617 kfree(priv); 2618 return retval; 2619 }
+2 -2
include/linux/netfilter/x_tables.h
··· 472 473 local_bh_disable(); 474 lock = &__get_cpu_var(xt_info_locks); 475 - if (!lock->readers++) 476 spin_lock(&lock->lock); 477 } 478 ··· 480 { 481 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 482 483 - if (!--lock->readers) 484 spin_unlock(&lock->lock); 485 local_bh_enable(); 486 }
··· 472 473 local_bh_disable(); 474 lock = &__get_cpu_var(xt_info_locks); 475 + if (likely(!lock->readers++)) 476 spin_lock(&lock->lock); 477 } 478 ··· 480 { 481 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 482 483 + if (likely(!--lock->readers)) 484 spin_unlock(&lock->lock); 485 local_bh_enable(); 486 }
+3
include/linux/usb/cdc.h
··· 17 #define USB_CDC_SUBCLASS_DMM 0x09 18 #define USB_CDC_SUBCLASS_MDLM 0x0a 19 #define USB_CDC_SUBCLASS_OBEX 0x0b 20 21 #define USB_CDC_PROTO_NONE 0 22 ··· 28 #define USB_CDC_ACM_PROTO_AT_3G 5 29 #define USB_CDC_ACM_PROTO_AT_CDMA 6 30 #define USB_CDC_ACM_PROTO_VENDOR 0xff 31 32 /*-------------------------------------------------------------------------*/ 33
··· 17 #define USB_CDC_SUBCLASS_DMM 0x09 18 #define USB_CDC_SUBCLASS_MDLM 0x0a 19 #define USB_CDC_SUBCLASS_OBEX 0x0b 20 + #define USB_CDC_SUBCLASS_EEM 0x0c 21 22 #define USB_CDC_PROTO_NONE 0 23 ··· 27 #define USB_CDC_ACM_PROTO_AT_3G 5 28 #define USB_CDC_ACM_PROTO_AT_CDMA 6 29 #define USB_CDC_ACM_PROTO_VENDOR 0xff 30 + 31 + #define USB_CDC_PROTO_EEM 7 32 33 /*-------------------------------------------------------------------------*/ 34
+1
include/linux/virtio_net.h
··· 4 * compatible drivers/servers. */ 5 #include <linux/types.h> 6 #include <linux/virtio_config.h> 7 8 /* The ID for virtio_net */ 9 #define VIRTIO_ID_NET 1
··· 4 * compatible drivers/servers. */ 5 #include <linux/types.h> 6 #include <linux/virtio_config.h> 7 + #include <linux/if_ether.h> 8 9 /* The ID for virtio_net */ 10 #define VIRTIO_ID_NET 1
+1
include/net/bluetooth/hci_core.h
··· 457 458 int hci_register_sysfs(struct hci_dev *hdev); 459 void hci_unregister_sysfs(struct hci_dev *hdev); 460 void hci_conn_add_sysfs(struct hci_conn *conn); 461 void hci_conn_del_sysfs(struct hci_conn *conn); 462
··· 457 458 int hci_register_sysfs(struct hci_dev *hdev); 459 void hci_unregister_sysfs(struct hci_dev *hdev); 460 + void hci_conn_init_sysfs(struct hci_conn *conn); 461 void hci_conn_add_sysfs(struct hci_conn *conn); 462 void hci_conn_del_sysfs(struct hci_conn *conn); 463
+13 -1
include/net/tcp.h
··· 41 #include <net/ip.h> 42 #include <net/tcp_states.h> 43 #include <net/inet_ecn.h> 44 45 #include <linux/seq_file.h> 46 ··· 531 tcp_fast_path_on(tp); 532 } 533 534 /* Compute the actual receive window we are currently advertising. 535 * Rcv_nxt can be after the window if our peer push more data 536 * than the offered window. ··· 907 wake_up_interruptible(sk->sk_sleep); 908 if (!inet_csk_ack_scheduled(sk)) 909 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 910 - (3 * TCP_RTO_MIN) / 4, 911 TCP_RTO_MAX); 912 } 913 return 1;
··· 41 #include <net/ip.h> 42 #include <net/tcp_states.h> 43 #include <net/inet_ecn.h> 44 + #include <net/dst.h> 45 46 #include <linux/seq_file.h> 47 ··· 530 tcp_fast_path_on(tp); 531 } 532 533 + /* Compute the actual rto_min value */ 534 + static inline u32 tcp_rto_min(struct sock *sk) 535 + { 536 + struct dst_entry *dst = __sk_dst_get(sk); 537 + u32 rto_min = TCP_RTO_MIN; 538 + 539 + if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 540 + rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 541 + return rto_min; 542 + } 543 + 544 /* Compute the actual receive window we are currently advertising. 545 * Rcv_nxt can be after the window if our peer push more data 546 * than the offered window. ··· 895 wake_up_interruptible(sk->sk_sleep); 896 if (!inet_csk_ack_scheduled(sk)) 897 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 898 + (3 * tcp_rto_min(sk)) / 4, 899 TCP_RTO_MAX); 900 } 901 return 1;
+1
net/atm/br2684.c
··· 549 struct br2684_dev *brdev = BRPRIV(netdev); 550 551 ether_setup(netdev); 552 553 netdev->netdev_ops = &br2684_netdev_ops; 554
··· 549 struct br2684_dev *brdev = BRPRIV(netdev); 550 551 ether_setup(netdev); 552 + brdev->net_dev = netdev; 553 554 netdev->netdev_ops = &br2684_netdev_ops; 555
+2
net/bluetooth/hci_conn.c
··· 248 if (hdev->notify) 249 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 250 251 tasklet_enable(&hdev->tx_task); 252 253 return conn;
··· 248 if (hdev->notify) 249 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 250 251 + hci_conn_init_sysfs(conn); 252 + 253 tasklet_enable(&hdev->tx_task); 254 255 return conn;
+42 -36
net/bluetooth/hci_sysfs.c
··· 9 struct class *bt_class = NULL; 10 EXPORT_SYMBOL_GPL(bt_class); 11 12 - static struct workqueue_struct *bluetooth; 13 14 static inline char *link_typetostr(int type) 15 { ··· 89 { 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 91 92 - /* ensure previous add/del is complete */ 93 - flush_workqueue(bluetooth); 94 95 if (device_add(&conn->dev) < 0) { 96 BT_ERR("Failed to register connection device"); 97 return; 98 } 99 - } 100 - 101 - void hci_conn_add_sysfs(struct hci_conn *conn) 102 - { 103 - struct hci_dev *hdev = conn->hdev; 104 - 105 - BT_DBG("conn %p", conn); 106 - 107 - conn->dev.type = &bt_link; 108 - conn->dev.class = bt_class; 109 - conn->dev.parent = &hdev->dev; 110 - 111 - dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 112 - 113 - dev_set_drvdata(&conn->dev, conn); 114 - 115 - device_initialize(&conn->dev); 116 - 117 - INIT_WORK(&conn->work_add, add_conn); 118 - 119 - queue_work(bluetooth, &conn->work_add); 120 } 121 122 /* ··· 113 struct hci_conn *conn = container_of(work, struct hci_conn, work_del); 114 struct hci_dev *hdev = conn->hdev; 115 116 - /* ensure previous add/del is complete */ 117 - flush_workqueue(bluetooth); 118 119 while (1) { 120 struct device *dev; ··· 134 hci_dev_put(hdev); 135 } 136 137 void hci_conn_del_sysfs(struct hci_conn *conn) 138 { 139 BT_DBG("conn %p", conn); 140 141 - if (!device_is_registered(&conn->dev)) 142 - return; 143 - 144 - INIT_WORK(&conn->work_del, del_conn); 145 - 146 - queue_work(bluetooth, &conn->work_del); 147 } 148 149 static inline char *host_typetostr(int type) ··· 444 445 int __init bt_sysfs_init(void) 446 { 447 - bluetooth = create_singlethread_workqueue("bluetooth"); 448 - if (!bluetooth) 449 return -ENOMEM; 450 451 bt_class = class_create(THIS_MODULE, "bluetooth"); 452 if (IS_ERR(bt_class)) { 453 - destroy_workqueue(bluetooth); 454 return PTR_ERR(bt_class); 455 } 456 ··· 459 460 void bt_sysfs_cleanup(void) 461 { 462 - destroy_workqueue(bluetooth); 463 464 class_destroy(bt_class); 465 }
··· 9 struct class *bt_class = NULL; 10 EXPORT_SYMBOL_GPL(bt_class); 11 12 + static struct workqueue_struct *bt_workq; 13 14 static inline char *link_typetostr(int type) 15 { ··· 89 { 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 91 92 + /* ensure previous del is complete */ 93 + flush_work(&conn->work_del); 94 95 if (device_add(&conn->dev) < 0) { 96 BT_ERR("Failed to register connection device"); 97 return; 98 } 99 } 100 101 /* ··· 134 struct hci_conn *conn = container_of(work, struct hci_conn, work_del); 135 struct hci_dev *hdev = conn->hdev; 136 137 + /* ensure previous add is complete */ 138 + flush_work(&conn->work_add); 139 + 140 + if (!device_is_registered(&conn->dev)) 141 + return; 142 143 while (1) { 144 struct device *dev; ··· 152 hci_dev_put(hdev); 153 } 154 155 + void hci_conn_init_sysfs(struct hci_conn *conn) 156 + { 157 + struct hci_dev *hdev = conn->hdev; 158 + 159 + BT_DBG("conn %p", conn); 160 + 161 + conn->dev.type = &bt_link; 162 + conn->dev.class = bt_class; 163 + conn->dev.parent = &hdev->dev; 164 + 165 + dev_set_drvdata(&conn->dev, conn); 166 + 167 + device_initialize(&conn->dev); 168 + 169 + INIT_WORK(&conn->work_add, add_conn); 170 + INIT_WORK(&conn->work_del, del_conn); 171 + } 172 + 173 + void hci_conn_add_sysfs(struct hci_conn *conn) 174 + { 175 + struct hci_dev *hdev = conn->hdev; 176 + 177 + BT_DBG("conn %p", conn); 178 + 179 + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 180 + 181 + queue_work(bt_workq, &conn->work_add); 182 + } 183 + 184 void hci_conn_del_sysfs(struct hci_conn *conn) 185 { 186 BT_DBG("conn %p", conn); 187 188 + queue_work(bt_workq, &conn->work_del); 189 } 190 191 static inline char *host_typetostr(int type) ··· 438 439 int __init bt_sysfs_init(void) 440 { 441 + bt_workq = create_singlethread_workqueue("bluetooth"); 442 + if (!bt_workq) 443 return -ENOMEM; 444 445 bt_class = class_create(THIS_MODULE, "bluetooth"); 446 if (IS_ERR(bt_class)) { 447 + destroy_workqueue(bt_workq); 448 return PTR_ERR(bt_class); 449 } 450 ··· 453 454 void bt_sysfs_cleanup(void) 455 { 456 + destroy_workqueue(bt_workq); 457 458 class_destroy(bt_class); 459 }
+5 -4
net/core/dev.c
··· 1735 { 1736 u32 hash; 1737 1738 - if (skb_rx_queue_recorded(skb)) { 1739 - hash = skb_get_rx_queue(skb); 1740 - } else if (skb->sk && skb->sk->sk_hash) { 1741 hash = skb->sk->sk_hash; 1742 - } else 1743 hash = skb->protocol; 1744 1745 hash = jhash_1word(hash, skb_tx_hashrnd);
··· 1735 { 1736 u32 hash; 1737 1738 + if (skb_rx_queue_recorded(skb)) 1739 + return skb_get_rx_queue(skb) % dev->real_num_tx_queues; 1740 + 1741 + if (skb->sk && skb->sk->sk_hash) 1742 hash = skb->sk->sk_hash; 1743 + else 1744 hash = skb->protocol; 1745 1746 hash = jhash_1word(hash, skb_tx_hashrnd);
+14 -13
net/core/skbuff.c
··· 1365 1366 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1367 unsigned int *offset, 1368 - struct sk_buff *skb) 1369 { 1370 - struct sock *sk = skb->sk; 1371 struct page *p = sk->sk_sndmsg_page; 1372 unsigned int off; 1373 ··· 1404 */ 1405 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1406 unsigned int *len, unsigned int offset, 1407 - struct sk_buff *skb, int linear) 1408 { 1409 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1410 return 1; 1411 1412 if (linear) { 1413 - page = linear_to_page(page, len, &offset, skb); 1414 if (!page) 1415 return 1; 1416 } else ··· 1442 static inline int __splice_segment(struct page *page, unsigned int poff, 1443 unsigned int plen, unsigned int *off, 1444 unsigned int *len, struct sk_buff *skb, 1445 - struct splice_pipe_desc *spd, int linear) 1446 { 1447 if (!*len) 1448 return 1; ··· 1466 /* the linear region may spread across several pages */ 1467 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1468 1469 - if (spd_fill_page(spd, page, &flen, poff, skb, linear)) 1470 return 1; 1471 1472 __segment_seek(&page, &poff, &plen, flen); ··· 1482 * pipe is full or if we already spliced the requested length. 1483 */ 1484 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1485 - unsigned int *len, 1486 - struct splice_pipe_desc *spd) 1487 { 1488 int seg; 1489 ··· 1493 if (__splice_segment(virt_to_page(skb->data), 1494 (unsigned long) skb->data & (PAGE_SIZE - 1), 1495 skb_headlen(skb), 1496 - offset, len, skb, spd, 1)) 1497 return 1; 1498 1499 /* ··· 1503 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1504 1505 if (__splice_segment(f->page, f->page_offset, f->size, 1506 - offset, len, skb, spd, 0)) 1507 return 1; 1508 } 1509 ··· 1529 .ops = &sock_pipe_buf_ops, 1530 .spd_release = sock_spd_release, 1531 }; 1532 1533 /* 1534 * __skb_splice_bits() only fails if the output has no room left, 1535 * so no point in going over the frag_list for the error case. 1536 */ 1537 - if (__skb_splice_bits(skb, &offset, &tlen, &spd)) 1538 goto done; 1539 else if (!tlen) 1540 goto done; ··· 1547 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1548 1549 for (; list && tlen; list = list->next) { 1550 - if (__skb_splice_bits(list, &offset, &tlen, &spd)) 1551 break; 1552 } 1553 } 1554 1555 done: 1556 if (spd.nr_pages) { 1557 - struct sock *sk = skb->sk; 1558 int ret; 1559 1560 /*
··· 1365 1366 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1367 unsigned int *offset, 1368 + struct sk_buff *skb, struct sock *sk) 1369 { 1370 struct page *p = sk->sk_sndmsg_page; 1371 unsigned int off; 1372 ··· 1405 */ 1406 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1407 unsigned int *len, unsigned int offset, 1408 + struct sk_buff *skb, int linear, 1409 + struct sock *sk) 1410 { 1411 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1412 return 1; 1413 1414 if (linear) { 1415 + page = linear_to_page(page, len, &offset, skb, sk); 1416 if (!page) 1417 return 1; 1418 } else ··· 1442 static inline int __splice_segment(struct page *page, unsigned int poff, 1443 unsigned int plen, unsigned int *off, 1444 unsigned int *len, struct sk_buff *skb, 1445 + struct splice_pipe_desc *spd, int linear, 1446 + struct sock *sk) 1447 { 1448 if (!*len) 1449 return 1; ··· 1465 /* the linear region may spread across several pages */ 1466 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1467 1468 + if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) 1469 return 1; 1470 1471 __segment_seek(&page, &poff, &plen, flen); ··· 1481 * pipe is full or if we already spliced the requested length. 1482 */ 1483 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, 1484 + unsigned int *len, struct splice_pipe_desc *spd, 1485 + struct sock *sk) 1486 { 1487 int seg; 1488 ··· 1492 if (__splice_segment(virt_to_page(skb->data), 1493 (unsigned long) skb->data & (PAGE_SIZE - 1), 1494 skb_headlen(skb), 1495 + offset, len, skb, spd, 1, sk)) 1496 return 1; 1497 1498 /* ··· 1502 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1503 1504 if (__splice_segment(f->page, f->page_offset, f->size, 1505 + offset, len, skb, spd, 0, sk)) 1506 return 1; 1507 } 1508 ··· 1528 .ops = &sock_pipe_buf_ops, 1529 .spd_release = sock_spd_release, 1530 }; 1531 + struct sock *sk = skb->sk; 1532 1533 /* 1534 * __skb_splice_bits() only fails if the output has no room left, 1535 * so no point in going over the frag_list for the error case. 1536 */ 1537 + if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) 1538 goto done; 1539 else if (!tlen) 1540 goto done; ··· 1545 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1546 1547 for (; list && tlen; list = list->next) { 1548 + if (__skb_splice_bits(list, &offset, &tlen, &spd, sk)) 1549 break; 1550 } 1551 } 1552 1553 done: 1554 if (spd.nr_pages) { 1555 int ret; 1556 1557 /*
-10
net/ipv4/tcp_input.c
··· 597 tcp_grow_window(sk, skb); 598 } 599 600 - static u32 tcp_rto_min(struct sock *sk) 601 - { 602 - struct dst_entry *dst = __sk_dst_get(sk); 603 - u32 rto_min = TCP_RTO_MIN; 604 - 605 - if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 606 - rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 607 - return rto_min; 608 - } 609 - 610 /* Called to compute a smoothed rtt estimate. The data fed to this 611 * routine either comes from timestamps, or from segments that were 612 * known _not_ to have been retransmitted [see Karn/Partridge
··· 597 tcp_grow_window(sk, skb); 598 } 599 600 /* Called to compute a smoothed rtt estimate. The data fed to this 601 * routine either comes from timestamps, or from segments that were 602 * known _not_ to have been retransmitted [see Karn/Partridge
+10 -10
net/mac80211/main.c
··· 757 local->hw.conf.long_frame_max_tx_count = 4; 758 local->hw.conf.short_frame_max_tx_count = 7; 759 local->hw.conf.radio_enabled = true; 760 761 INIT_LIST_HEAD(&local->interfaces); 762 mutex_init(&local->iflist_mtx); ··· 910 if (result < 0) 911 goto fail_sta_info; 912 913 rtnl_lock(); 914 result = dev_alloc_name(local->mdev, local->mdev->name); 915 if (result < 0) ··· 936 printk(KERN_DEBUG "%s: Failed to initialize rate control " 937 "algorithm\n", wiphy_name(local->hw.wiphy)); 938 goto fail_rate; 939 - } 940 - 941 - result = ieee80211_wep_init(local); 942 - 943 - if (result < 0) { 944 - printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", 945 - wiphy_name(local->hw.wiphy), result); 946 - goto fail_wep; 947 } 948 949 /* add one default STA interface if supported */ ··· 967 968 return 0; 969 970 - fail_wep: 971 - rate_control_deinitialize(local); 972 fail_rate: 973 unregister_netdevice(local->mdev); 974 local->mdev = NULL; 975 fail_dev: 976 rtnl_unlock(); 977 sta_info_stop(local); 978 fail_sta_info: 979 debugfs_hw_del(local);
··· 757 local->hw.conf.long_frame_max_tx_count = 4; 758 local->hw.conf.short_frame_max_tx_count = 7; 759 local->hw.conf.radio_enabled = true; 760 + local->user_power_level = -1; 761 762 INIT_LIST_HEAD(&local->interfaces); 763 mutex_init(&local->iflist_mtx); ··· 909 if (result < 0) 910 goto fail_sta_info; 911 912 + result = ieee80211_wep_init(local); 913 + if (result < 0) { 914 + printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", 915 + wiphy_name(local->hw.wiphy), result); 916 + goto fail_wep; 917 + } 918 + 919 rtnl_lock(); 920 result = dev_alloc_name(local->mdev, local->mdev->name); 921 if (result < 0) ··· 928 printk(KERN_DEBUG "%s: Failed to initialize rate control " 929 "algorithm\n", wiphy_name(local->hw.wiphy)); 930 goto fail_rate; 931 } 932 933 /* add one default STA interface if supported */ ··· 967 968 return 0; 969 970 fail_rate: 971 unregister_netdevice(local->mdev); 972 local->mdev = NULL; 973 fail_dev: 974 rtnl_unlock(); 975 + ieee80211_wep_free(local); 976 + fail_wep: 977 sta_info_stop(local); 978 fail_sta_info: 979 debugfs_hw_del(local);
+1
net/netfilter/Kconfig
··· 837 depends on NETFILTER_TPROXY 838 depends on NETFILTER_XTABLES 839 depends on NETFILTER_ADVANCED 840 select NF_DEFRAG_IPV4 841 help 842 This option adds a `socket' match, which can be used to match
··· 837 depends on NETFILTER_TPROXY 838 depends on NETFILTER_XTABLES 839 depends on NETFILTER_ADVANCED 840 + depends on !NF_CONNTRACK || NF_CONNTRACK 841 select NF_DEFRAG_IPV4 842 help 843 This option adds a `socket' match, which can be used to match
+1 -1
net/sched/cls_api.c
··· 254 } 255 tp->ops = tp_ops; 256 tp->protocol = protocol; 257 - tp->prio = nprio ? : tcf_auto_prio(*back); 258 tp->q = q; 259 tp->classify = tp_ops->classify; 260 tp->classid = parent;
··· 254 } 255 tp->ops = tp_ops; 256 tp->protocol = protocol; 257 + tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); 258 tp->q = q; 259 tp->classify = tp_ops->classify; 260 tp->classid = parent;