Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (47 commits)
ehea: Fix napi list corruption on ifconfig down
igbvf: Allow VF driver to correctly recognize failure to set mac
3c59x: Fix build failure with gcc 3.2
sky2: Avoid transmits during sky2_down()
iwlagn: do not send key clear commands when rfkill enabled
libertas: Read buffer overflow
drivers/net/wireless: introduce missing kfree
drivers/net/wireless/iwlwifi: introduce missing kfree
zd1211rw: fix unaligned access in zd_mac_rx
cfg80211: fix regression on beacon world roaming feature
cfg80211: add two missing NULL pointer checks
ixgbe: Patch to modify 82598 PCIe completion timeout values
bluetooth: rfcomm_init bug fix
mlx4_en: Fix double pci unmapping.
mISDN: Fix handling of receive buffer size in L1oIP
pcnet32: VLB support fixes
pcnet32: remove superfluous NULL pointer check in pcnet32_probe1()
net: restore the original spinlock to protect unicast list
netxen: fix coherent dma mask setting
mISDN: Read buffer overflow
...

+364 -165
+2 -2
Documentation/DocBook/kernel-hacking.tmpl
··· 449 </para> 450 451 <programlisting> 452 - __u32 ipaddress; 453 - printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress)); 454 </programlisting> 455 456 <para>
··· 449 </para> 450 451 <programlisting> 452 + __be32 ipaddress; 453 + printk(KERN_INFO "my ip: %pI4\n", &amp;ipaddress); 454 </programlisting> 455 456 <para>
+1 -1
drivers/isdn/mISDN/l1oip_core.c
··· 1480 return -ENOMEM; 1481 1482 l1oip_cnt = 0; 1483 - while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) { 1484 switch (type[l1oip_cnt] & 0xff) { 1485 case 1: 1486 pri = 0;
··· 1480 return -ENOMEM; 1481 1482 l1oip_cnt = 0; 1483 + while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) { 1484 switch (type[l1oip_cnt] & 0xff) { 1485 case 1: 1486 pri = 0;
+3 -1
drivers/net/3c515.c
··· 832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 834 } 835 - vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 836 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 837 } 838 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
··· 832 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 833 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 834 } 835 + if (i != 0) 836 + vp->rx_ring[i - 1].next = 837 + isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 838 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 839 } 840 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+6 -4
drivers/net/3c59x.c
··· 2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); 2722 issue_and_wait(dev, DownStall); 2723 for (i = 0; i < TX_RING_SIZE; i++) { 2724 - pr_err(" %d: @%p length %8.8x status %8.8x\n", i, 2725 - &vp->tx_ring[i], 2726 #if DO_ZEROCOPY 2727 - le32_to_cpu(vp->tx_ring[i].frag[0].length), 2728 #else 2729 - le32_to_cpu(vp->tx_ring[i].length), 2730 #endif 2731 le32_to_cpu(vp->tx_ring[i].status)); 2732 } 2733 if (!stalled)
··· 2721 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); 2722 issue_and_wait(dev, DownStall); 2723 for (i = 0; i < TX_RING_SIZE; i++) { 2724 + unsigned int length; 2725 + 2726 #if DO_ZEROCOPY 2727 + length = le32_to_cpu(vp->tx_ring[i].frag[0].length); 2728 #else 2729 + length = le32_to_cpu(vp->tx_ring[i].length); 2730 #endif 2731 + pr_err(" %d: @%p length %8.8x status %8.8x\n", 2732 + i, &vp->tx_ring[i], length, 2733 le32_to_cpu(vp->tx_ring[i].status)); 2734 } 2735 if (!stalled)
+3 -3
drivers/net/eexpress.c
··· 1474 outw(0x0000, ioaddr + 0x800c); 1475 outw(0x0000, ioaddr + 0x800e); 1476 1477 - for (i = 0; i < (sizeof(start_code)); i+=32) { 1478 int j; 1479 outw(i, ioaddr + SM_PTR); 1480 - for (j = 0; j < 16; j+=2) 1481 outw(start_code[(i+j)/2], 1482 ioaddr+0x4000+j); 1483 - for (j = 0; j < 16; j+=2) 1484 outw(start_code[(i+j+16)/2], 1485 ioaddr+0x8000+j); 1486 }
··· 1474 outw(0x0000, ioaddr + 0x800c); 1475 outw(0x0000, ioaddr + 0x800e); 1476 1477 + for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) { 1478 int j; 1479 outw(i, ioaddr + SM_PTR); 1480 + for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2) 1481 outw(start_code[(i+j)/2], 1482 ioaddr+0x4000+j); 1483 + for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2) 1484 outw(start_code[(i+j+16)/2], 1485 ioaddr+0x8000+j); 1486 }
+1 -1
drivers/net/ehea/ehea.h
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0101" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 + #define DRV_VERSION "EHEA_0102" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1
+3
drivers/net/ehea/ehea_main.c
··· 1545 { 1546 int ret, i; 1547 1548 ret = ehea_destroy_qp(pr->qp); 1549 1550 if (!ret) {
··· 1545 { 1546 int ret, i; 1547 1548 + if (pr->qp) 1549 + netif_napi_del(&pr->napi); 1550 + 1551 ret = ehea_destroy_qp(pr->qp); 1552 1553 if (!ret) {
+4 -6
drivers/net/gianfar_ethtool.c
··· 366 return -EINVAL; 367 } 368 369 - priv->rxic = mk_ic_value( 370 - gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs), 371 - cvals->rx_max_coalesced_frames); 372 373 /* Set up tx coalescing */ 374 if ((cvals->tx_coalesce_usecs == 0) || ··· 389 return -EINVAL; 390 } 391 392 - priv->txic = mk_ic_value( 393 - gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs), 394 - cvals->tx_max_coalesced_frames); 395 396 gfar_write(&priv->regs->rxic, 0); 397 if (priv->rxcoalescing)
··· 366 return -EINVAL; 367 } 368 369 + priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 370 + gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 371 372 /* Set up tx coalescing */ 373 if ((cvals->tx_coalesce_usecs == 0) || ··· 390 return -EINVAL; 391 } 392 393 + priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 394 + gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 395 396 gfar_write(&priv->regs->rxic, 0); 397 if (priv->rxcoalescing)
+4
drivers/net/igbvf/vf.c
··· 274 275 err = mbx->ops.read_posted(hw, msgbuf, 2); 276 277 /* if nacked the vlan was rejected */ 278 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) 279 err = -E1000_ERR_MAC_INIT; ··· 318 319 if (!ret_val) 320 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 321 322 /* if nacked the address was rejected, use "perm_addr" */ 323 if (!ret_val &&
··· 274 275 err = mbx->ops.read_posted(hw, msgbuf, 2); 276 277 + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 278 + 279 /* if nacked the vlan was rejected */ 280 if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) 281 err = -E1000_ERR_MAC_INIT; ··· 316 317 if (!ret_val) 318 ret_val = mbx->ops.read_posted(hw, msgbuf, 3); 319 + 320 + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; 321 322 /* if nacked the address was rejected, use "perm_addr" */ 323 if (!ret_val &&
+2
drivers/net/ixgbe/ixgbe.h
··· 96 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 97 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 98 99 /* wrapper around a pointer to a socket buffer, 100 * so a DMA handle can be stored along with the buffer */ 101 struct ixgbe_tx_buffer {
··· 96 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 97 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 98 99 + #define IXGBE_MAX_RSC_INT_RATE 162760 100 + 101 /* wrapper around a pointer to a socket buffer, 102 * so a DMA handle can be stored along with the buffer */ 103 struct ixgbe_tx_buffer {
+66 -1
drivers/net/ixgbe/ixgbe_82598.c
··· 50 u8 *eeprom_data); 51 52 /** 53 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count 54 * @hw: pointer to hardware structure 55 * ··· 194 } 195 196 out: 197 return ret_val; 198 } 199 ··· 1150 static struct ixgbe_mac_operations mac_ops_82598 = { 1151 .init_hw = &ixgbe_init_hw_generic, 1152 .reset_hw = &ixgbe_reset_hw_82598, 1153 - .start_hw = &ixgbe_start_hw_generic, 1154 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1155 .get_media_type = &ixgbe_get_media_type_82598, 1156 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
··· 50 u8 *eeprom_data); 51 52 /** 53 + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout 54 + * @hw: pointer to the HW structure 55 + * 56 + * The defaults for 82598 should be in the range of 50us to 50ms, 57 + * however the hardware default for these parts is 500us to 1ms which is less 58 + * than the 10ms recommended by the pci-e spec. To address this we need to 59 + * increase the value to either 10ms to 250ms for capability version 1 config, 60 + * or 16ms to 55ms for version 2. 61 + **/ 62 + void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 63 + { 64 + struct ixgbe_adapter *adapter = hw->back; 65 + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 66 + u16 pcie_devctl2; 67 + 68 + /* only take action if timeout value is defaulted to 0 */ 69 + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 70 + goto out; 71 + 72 + /* 73 + * if capababilities version is type 1 we can write the 74 + * timeout of 10ms to 250ms through the GCR register 75 + */ 76 + if (!(gcr & IXGBE_GCR_CAP_VER2)) { 77 + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; 78 + goto out; 79 + } 80 + 81 + /* 82 + * for version 2 capabilities we need to write the config space 83 + * directly in order to set the completion timeout value for 84 + * 16ms to 55ms 85 + */ 86 + pci_read_config_word(adapter->pdev, 87 + IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2); 88 + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 89 + pci_write_config_word(adapter->pdev, 90 + IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); 91 + out: 92 + /* disable completion timeout resend */ 93 + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 94 + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 95 + } 96 + 97 + /** 98 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count 99 * @hw: pointer to hardware structure 100 * ··· 149 } 150 151 out: 152 + return ret_val; 153 + } 154 + 155 + /** 156 + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx 157 + * @hw: pointer to hardware structure 158 + * 159 + * Starts the hardware using the generic start_hw function. 160 + * Then set pcie completion timeout 161 + **/ 162 + s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 163 + { 164 + s32 ret_val = 0; 165 + 166 + ret_val = ixgbe_start_hw_generic(hw); 167 + 168 + /* set the completion timeout for interface */ 169 + if (ret_val == 0) 170 + ixgbe_set_pcie_completion_timeout(hw); 171 + 172 return ret_val; 173 } 174 ··· 1085 static struct ixgbe_mac_operations mac_ops_82598 = { 1086 .init_hw = &ixgbe_init_hw_generic, 1087 .reset_hw = &ixgbe_reset_hw_82598, 1088 + .start_hw = &ixgbe_start_hw_82598, 1089 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 1090 .get_media_type = &ixgbe_get_media_type_82598, 1091 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
+7 -4
drivers/net/ixgbe/ixgbe_ethtool.c
··· 1975 * any other value means disable eitr, which is best 1976 * served by setting the interrupt rate very high 1977 */ 1978 - adapter->eitr_param = IXGBE_MAX_INT_RATE; 1979 adapter->itr_setting = 0; 1980 } 1981 ··· 2002 2003 ethtool_op_set_flags(netdev, data); 2004 2005 - if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)) 2006 return 0; 2007 2008 /* if state changes we need to update adapter->flags and reset */ 2009 if ((!!(data & ETH_FLAG_LRO)) != 2010 - (!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) { 2011 - adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED; 2012 if (netif_running(netdev)) 2013 ixgbe_reinit_locked(adapter); 2014 else
··· 1975 * any other value means disable eitr, which is best 1976 * served by setting the interrupt rate very high 1977 */ 1978 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 1979 + adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE; 1980 + else 1981 + adapter->eitr_param = IXGBE_MAX_INT_RATE; 1982 adapter->itr_setting = 0; 1983 } 1984 ··· 1999 2000 ethtool_op_set_flags(netdev, data); 2001 2002 + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 2003 return 0; 2004 2005 /* if state changes we need to update adapter->flags and reset */ 2006 if ((!!(data & ETH_FLAG_LRO)) != 2007 + (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2008 + adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2009 if (netif_running(netdev)) 2010 ixgbe_reinit_locked(adapter); 2011 else
+16 -9
drivers/net/ixgbe/ixgbe_main.c
··· 780 prefetch(next_rxd); 781 cleaned_count++; 782 783 - if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE) 784 rsc_count = ixgbe_get_rsc_count(rx_desc); 785 786 if (rsc_count) { ··· 2036 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2037 } 2038 } else { 2039 - if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) && 2040 (netdev->mtu <= ETH_DATA_LEN)) 2041 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2042 else ··· 2165 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2166 } 2167 2168 - if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) { 2169 /* Enable 82599 HW-RSC */ 2170 for (i = 0; i < adapter->num_rx_queues; i++) { 2171 j = adapter->rx_ring[i].reg_idx; ··· 3812 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3813 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3814 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3815 - adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE; 3816 - adapter->flags |= IXGBE_FLAG2_RSC_ENABLED; 3817 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 3818 adapter->ring_feature[RING_F_FDIR].indices = 3819 IXGBE_MAX_FDIR_INDICES; ··· 5360 static void ixgbe_netpoll(struct net_device *netdev) 5361 { 5362 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5363 5364 - disable_irq(adapter->pdev->irq); 5365 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5366 - ixgbe_intr(adapter->pdev->irq, netdev); 5367 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 5368 - enable_irq(adapter->pdev->irq); 5369 } 5370 #endif 5371 ··· 5618 if (pci_using_dac) 5619 netdev->features |= NETIF_F_HIGHDMA; 5620 5621 - if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) 5622 netdev->features |= NETIF_F_LRO; 5623 5624 /* make sure the EEPROM is good */
··· 780 prefetch(next_rxd); 781 cleaned_count++; 782 783 + if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 784 rsc_count = ixgbe_get_rsc_count(rx_desc); 785 786 if (rsc_count) { ··· 2036 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); 2037 } 2038 } else { 2039 + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2040 (netdev->mtu <= ETH_DATA_LEN)) 2041 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2042 else ··· 2165 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2166 } 2167 2168 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2169 /* Enable 82599 HW-RSC */ 2170 for (i = 0; i < adapter->num_rx_queues; i++) { 2171 j = adapter->rx_ring[i].reg_idx; ··· 3812 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 3813 } else if (hw->mac.type == ixgbe_mac_82599EB) { 3814 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 3815 + adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 3816 + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 3817 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 3818 adapter->ring_feature[RING_F_FDIR].indices = 3819 IXGBE_MAX_FDIR_INDICES; ··· 5360 static void ixgbe_netpoll(struct net_device *netdev) 5361 { 5362 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5363 + int i; 5364 5365 adapter->flags |= IXGBE_FLAG_IN_NETPOLL; 5366 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 5367 + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 5368 + for (i = 0; i < num_q_vectors; i++) { 5369 + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 5370 + ixgbe_msix_clean_many(0, q_vector); 5371 + } 5372 + } else { 5373 + ixgbe_intr(adapter->pdev->irq, netdev); 5374 + } 5375 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; 5376 } 5377 #endif 5378 ··· 5611 if (pci_using_dac) 5612 netdev->features |= NETIF_F_HIGHDMA; 5613 5614 + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 5615 netdev->features |= NETIF_F_LRO; 5616 5617 /* make sure the EEPROM is good */
+8
drivers/net/ixgbe/ixgbe_type.h
··· 718 #define IXGBE_ECC_STATUS_82599 0x110E0 719 #define IXGBE_BAR_CTRL_82599 0x110F4 720 721 /* Time Sync Registers */ 722 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 723 #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ ··· 1527 1528 /* PCI Bus Info */ 1529 #define IXGBE_PCI_LINK_STATUS 0xB2 1530 #define IXGBE_PCI_LINK_WIDTH 0x3F0 1531 #define IXGBE_PCI_LINK_WIDTH_1 0x10 1532 #define IXGBE_PCI_LINK_WIDTH_2 0x20 ··· 1538 #define IXGBE_PCI_LINK_SPEED_5000 0x2 1539 #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E 1540 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1541 1542 /* Number of 100 microseconds we wait for PCI Express master disable */ 1543 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
··· 718 #define IXGBE_ECC_STATUS_82599 0x110E0 719 #define IXGBE_BAR_CTRL_82599 0x110F4 720 721 + /* PCI Express Control */ 722 + #define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 723 + #define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 724 + #define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 725 + #define IXGBE_GCR_CAP_VER2 0x00040000 726 + 727 /* Time Sync Registers */ 728 #define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ 729 #define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ ··· 1521 1522 /* PCI Bus Info */ 1523 #define IXGBE_PCI_LINK_STATUS 0xB2 1524 + #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1525 #define IXGBE_PCI_LINK_WIDTH 0x3F0 1526 #define IXGBE_PCI_LINK_WIDTH_1 0x10 1527 #define IXGBE_PCI_LINK_WIDTH_2 0x20 ··· 1531 #define IXGBE_PCI_LINK_SPEED_5000 0x2 1532 #define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E 1533 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1534 + #define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 1535 1536 /* Number of 100 microseconds we wait for PCI Express master disable */ 1537 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+1
drivers/net/mlx4/en_tx.c
··· 249 pci_unmap_page(mdev->pdev, 250 (dma_addr_t) be64_to_cpu(data->addr), 251 frag->size, PCI_DMA_TODEVICE); 252 } 253 } 254 /* Stamp the freed descriptor */
··· 249 pci_unmap_page(mdev->pdev, 250 (dma_addr_t) be64_to_cpu(data->addr), 251 frag->size, PCI_DMA_TODEVICE); 252 + ++data; 253 } 254 } 255 /* Stamp the freed descriptor */
+25 -12
drivers/net/netxen/netxen_nic_main.c
··· 221 } 222 } 223 224 - static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) 225 { 226 struct pci_dev *pdev = adapter->pdev; 227 uint64_t mask, cmask; ··· 229 adapter->pci_using_dac = 0; 230 231 mask = DMA_BIT_MASK(32); 232 - /* 233 - * Consistent DMA mask is set to 32 bit because it cannot be set to 234 - * 35 bits. For P3 also leave it at 32 bits for now. Only the rings 235 - * come off this pool. 236 - */ 237 cmask = DMA_BIT_MASK(32); 238 239 #ifndef CONFIG_IA64 240 - if (revision_id >= NX_P3_B0) 241 - mask = DMA_BIT_MASK(39); 242 - else if (revision_id == NX_P2_C1) 243 mask = DMA_BIT_MASK(35); 244 #endif 245 if (pci_set_dma_mask(pdev, mask) == 0 && 246 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 247 adapter->pci_using_dac = 1; ··· 254 nx_update_dma_mask(struct netxen_adapter *adapter) 255 { 256 int change, shift, err; 257 - uint64_t mask, old_mask; 258 struct pci_dev *pdev = adapter->pdev; 259 260 change = 0; ··· 270 271 if (change) { 272 old_mask = pdev->dma_mask; 273 mask = (1ULL<<(32+shift)) - 1; 274 275 err = pci_set_dma_mask(pdev, mask); 276 if (err) 277 - return pci_set_dma_mask(pdev, old_mask); 278 } 279 280 return 0; 281 } 282 283 static void netxen_check_options(struct netxen_adapter *adapter) ··· 1019 revision_id = pdev->revision; 1020 adapter->ahw.revision_id = revision_id; 1021 1022 - err = nx_set_dma_mask(adapter, revision_id); 1023 if (err) 1024 goto err_out_free_netdev; 1025
··· 221 } 222 } 223 224 + static int nx_set_dma_mask(struct netxen_adapter *adapter) 225 { 226 struct pci_dev *pdev = adapter->pdev; 227 uint64_t mask, cmask; ··· 229 adapter->pci_using_dac = 0; 230 231 mask = DMA_BIT_MASK(32); 232 cmask = DMA_BIT_MASK(32); 233 234 + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 235 #ifndef CONFIG_IA64 236 mask = DMA_BIT_MASK(35); 237 #endif 238 + } else { 239 + mask = DMA_BIT_MASK(39); 240 + cmask = mask; 241 + } 242 + 243 if (pci_set_dma_mask(pdev, mask) == 0 && 244 pci_set_consistent_dma_mask(pdev, cmask) == 0) { 245 adapter->pci_using_dac = 1; ··· 256 nx_update_dma_mask(struct netxen_adapter *adapter) 257 { 258 int change, shift, err; 259 + uint64_t mask, old_mask, old_cmask; 260 struct pci_dev *pdev = adapter->pdev; 261 262 change = 0; ··· 272 273 if (change) { 274 old_mask = pdev->dma_mask; 275 + old_cmask = pdev->dev.coherent_dma_mask; 276 + 277 mask = (1ULL<<(32+shift)) - 1; 278 279 err = pci_set_dma_mask(pdev, mask); 280 if (err) 281 + goto err_out; 282 + 283 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 284 + 285 + err = pci_set_consistent_dma_mask(pdev, mask); 286 + if (err) 287 + goto err_out; 288 + } 289 + dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift); 290 } 291 292 return 0; 293 + 294 + err_out: 295 + pci_set_dma_mask(pdev, old_mask); 296 + pci_set_consistent_dma_mask(pdev, old_cmask); 297 + return err; 298 } 299 300 static void netxen_check_options(struct netxen_adapter *adapter) ··· 1006 revision_id = pdev->revision; 1007 adapter->ahw.revision_id = revision_id; 1008 1009 + err = nx_set_dma_mask(adapter); 1010 if (err) 1011 goto err_out_free_netdev; 1012
+14 -16
drivers/net/pcnet32.c
··· 1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 1612 && pcnet32_dwio_check(ioaddr)) { 1613 a = &pcnet32_dwio; 1614 - } else 1615 goto err_release_region; 1616 } 1617 1618 chip_version = ··· 1722 ret = -ENOMEM; 1723 goto err_release_region; 1724 } 1725 - SET_NETDEV_DEV(dev, &pdev->dev); 1726 1727 if (pcnet32_debug & NETIF_MSG_PROBE) 1728 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); ··· 1823 1824 spin_lock_init(&lp->lock); 1825 1826 - SET_NETDEV_DEV(dev, &pdev->dev); 1827 lp->name = chipname; 1828 lp->shared_irq = shared; 1829 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ ··· 1856 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1857 lp->options |= PCNET32_PORT_FD; 1858 1859 - if (!a) { 1860 - if (pcnet32_debug & NETIF_MSG_PROBE) 1861 - printk(KERN_ERR PFX "No access methods\n"); 1862 - ret = -ENODEV; 1863 - goto err_free_consistent; 1864 - } 1865 lp->a = *a; 1866 1867 /* prior to register_netdev, dev->name is not yet correct */ ··· 1971 1972 return 0; 1973 1974 - err_free_ring: 1975 pcnet32_free_ring(dev); 1976 - err_free_consistent: 1977 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1978 lp->init_block, lp->init_dma_addr); 1979 - err_free_netdev: 1980 free_netdev(dev); 1981 - err_release_region: 1982 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1983 return ret; 1984 } ··· 2086 static int pcnet32_open(struct net_device *dev) 2087 { 2088 struct pcnet32_private *lp = netdev_priv(dev); 2089 unsigned long ioaddr = dev->base_addr; 2090 u16 val; 2091 int i; ··· 2147 lp->a.write_csr(ioaddr, 124, val); 2148 2149 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 2150 - if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 2151 - (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 2152 - lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2153 if (lp->options & PCNET32_PORT_ASEL) { 2154 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2155 if (netif_msg_link(lp))
··· 1611 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 1612 && pcnet32_dwio_check(ioaddr)) { 1613 a = &pcnet32_dwio; 1614 + } else { 1615 + if (pcnet32_debug & NETIF_MSG_PROBE) 1616 + printk(KERN_ERR PFX "No access methods\n"); 1617 goto err_release_region; 1618 + } 1619 } 1620 1621 chip_version = ··· 1719 ret = -ENOMEM; 1720 goto err_release_region; 1721 } 1722 + 1723 + if (pdev) 1724 + SET_NETDEV_DEV(dev, &pdev->dev); 1725 1726 if (pcnet32_debug & NETIF_MSG_PROBE) 1727 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); ··· 1818 1819 spin_lock_init(&lp->lock); 1820 1821 lp->name = chipname; 1822 lp->shared_irq = shared; 1823 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ ··· 1852 ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) 1853 lp->options |= PCNET32_PORT_FD; 1854 1855 lp->a = *a; 1856 1857 /* prior to register_netdev, dev->name is not yet correct */ ··· 1973 1974 return 0; 1975 1976 + err_free_ring: 1977 pcnet32_free_ring(dev); 1978 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 1979 lp->init_block, lp->init_dma_addr); 1980 + err_free_netdev: 1981 free_netdev(dev); 1982 + err_release_region: 1983 release_region(ioaddr, PCNET32_TOTAL_SIZE); 1984 return ret; 1985 } ··· 2089 static int pcnet32_open(struct net_device *dev) 2090 { 2091 struct pcnet32_private *lp = netdev_priv(dev); 2092 + struct pci_dev *pdev = lp->pci_dev; 2093 unsigned long ioaddr = dev->base_addr; 2094 u16 val; 2095 int i; ··· 2149 lp->a.write_csr(ioaddr, 124, val); 2150 2151 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ 2152 + if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT && 2153 + (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 2154 + pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 2155 if (lp->options & PCNET32_PORT_ASEL) { 2156 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; 2157 if (netif_msg_link(lp))
+18 -16
drivers/net/ppp_generic.c
··· 1384 1385 /* create a fragment for each channel */ 1386 bits = B; 1387 - while (nfree > 0 && len > 0) { 1388 list = list->next; 1389 if (list == &ppp->channels) { 1390 i = 0; ··· 1431 *otherwise divide it according to the speed 1432 *of the channel we are going to transmit on 1433 */ 1434 - if (pch->speed == 0) { 1435 - flen = totlen/nfree ; 1436 - if (nbigger > 0) { 1437 - flen++; 1438 - nbigger--; 1439 - } 1440 - } else { 1441 - flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1442 - ((totspeed*totfree)/pch->speed)) - hdrlen; 1443 - if (nbigger > 0) { 1444 - flen += ((totfree - nzero)*pch->speed)/totspeed; 1445 - nbigger -= ((totfree - nzero)*pch->speed)/ 1446 totspeed; 1447 } 1448 } 1449 - nfree--; 1450 1451 /* 1452 *check if we are on the last channel or 1453 *we exceded the lenght of the data to 1454 *fragment 1455 */ 1456 - if ((nfree == 0) || (flen > len)) 1457 flen = len; 1458 /* 1459 *it is not worth to tx on slow channels: ··· 1469 continue; 1470 } 1471 1472 - mtu = pch->chan->mtu + 2 - hdrlen; 1473 if (mtu < 4) 1474 mtu = 4; 1475 if (flen > mtu)
··· 1384 1385 /* create a fragment for each channel */ 1386 bits = B; 1387 + while (len > 0) { 1388 list = list->next; 1389 if (list == &ppp->channels) { 1390 i = 0; ··· 1431 *otherwise divide it according to the speed 1432 *of the channel we are going to transmit on 1433 */ 1434 + if (nfree > 0) { 1435 + if (pch->speed == 0) { 1436 + flen = totlen/nfree ; 1437 + if (nbigger > 0) { 1438 + flen++; 1439 + nbigger--; 1440 + } 1441 + } else { 1442 + flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1443 + ((totspeed*totfree)/pch->speed)) - hdrlen; 1444 + if (nbigger > 0) { 1445 + flen += ((totfree - nzero)*pch->speed)/totspeed; 1446 + nbigger -= ((totfree - nzero)*pch->speed)/ 1447 totspeed; 1448 + } 1449 } 1450 + nfree--; 1451 } 1452 1453 /* 1454 *check if we are on the last channel or 1455 *we exceded the lenght of the data to 1456 *fragment 1457 */ 1458 + if ((nfree <= 0) || (flen > len)) 1459 flen = len; 1460 /* 1461 *it is not worth to tx on slow channels: ··· 1467 continue; 1468 } 1469 1470 + mtu = pch->chan->mtu - hdrlen; 1471 if (mtu < 4) 1472 mtu = 4; 1473 if (flen > mtu)
+1
drivers/net/pppoe.c
··· 1063 else { 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1065 1066 while (++hash < PPPOE_HASH_SIZE) { 1067 po = pn->hash_table[hash]; 1068 if (po)
··· 1063 else { 1064 int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); 1065 1066 + po = NULL; 1067 while (++hash < PPPOE_HASH_SIZE) { 1068 po = pn->hash_table[hash]; 1069 if (po)
+1
drivers/net/pppol2tp.c
··· 2680 static void __exit pppol2tp_exit(void) 2681 { 2682 unregister_pppox_proto(PX_PROTO_OL2TP); 2683 proto_unregister(&pppol2tp_sk_proto); 2684 } 2685
··· 2680 static void __exit pppol2tp_exit(void) 2681 { 2682 unregister_pppox_proto(PX_PROTO_OL2TP); 2683 + unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops); 2684 proto_unregister(&pppol2tp_sk_proto); 2685 } 2686
+1 -1
drivers/net/s6gmac.c
··· 793 struct s6gmac *pd = netdev_priv(dev); 794 int i = 0; 795 struct phy_device *p = NULL; 796 - while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR)) 797 i++; 798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, 799 PHY_INTERFACE_MODE_RGMII);
··· 793 struct s6gmac *pd = netdev_priv(dev); 794 int i = 0; 795 struct phy_device *p = NULL; 796 + while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i]))) 797 i++; 798 p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0, 799 PHY_INTERFACE_MODE_RGMII);
+13 -1
drivers/net/sky2.c
··· 1488 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1489 #endif 1490 1491 err = sky2_rx_start(sky2); 1492 if (err) 1493 goto err_out; ··· 1501 sky2_read32(hw, B0_IMSK); 1502 1503 sky2_set_multicast(dev); 1504 1505 if (netif_msg_ifup(sky2)) 1506 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); ··· 1538 /* Number of list elements available for next tx */ 1539 static inline int tx_avail(const struct sky2_port *sky2) 1540 { 1541 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); 1542 } 1543 ··· 1824 1825 if (netif_msg_ifdown(sky2)) 1826 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1827 1828 /* Force flow control off */ 1829 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); ··· 2370 { 2371 struct sky2_port *sky2 = netdev_priv(dev); 2372 2373 - if (netif_running(dev)) { 2374 netif_tx_lock(dev); 2375 sky2_tx_complete(sky2, last); 2376 netif_tx_unlock(dev); ··· 4294 spin_lock_init(&sky2->phy_lock); 4295 sky2->tx_pending = TX_DEF_PENDING; 4296 sky2->rx_pending = RX_DEF_PENDING; 4297 4298 hw->dev[port] = dev; 4299
··· 1488 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1489 #endif 1490 1491 + sky2->restarting = 0; 1492 + 1493 err = sky2_rx_start(sky2); 1494 if (err) 1495 goto err_out; ··· 1499 sky2_read32(hw, B0_IMSK); 1500 1501 sky2_set_multicast(dev); 1502 + 1503 + /* wake queue incase we are restarting */ 1504 + netif_wake_queue(dev); 1505 1506 if (netif_msg_ifup(sky2)) 1507 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); ··· 1533 /* Number of list elements available for next tx */ 1534 static inline int tx_avail(const struct sky2_port *sky2) 1535 { 1536 + if (unlikely(sky2->restarting)) 1537 + return 0; 1538 return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod); 1539 } 1540 ··· 1817 1818 if (netif_msg_ifdown(sky2)) 1819 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1820 + 1821 + /* explicitly shut off tx incase we're restarting */ 1822 + sky2->restarting = 1; 1823 + netif_tx_disable(dev); 1824 1825 /* Force flow control off */ 1826 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); ··· 2359 { 2360 struct sky2_port *sky2 = netdev_priv(dev); 2361 2362 + if (likely(netif_running(dev) && !sky2->restarting)) { 2363 netif_tx_lock(dev); 2364 sky2_tx_complete(sky2, last); 2365 netif_tx_unlock(dev); ··· 4283 spin_lock_init(&sky2->phy_lock); 4284 sky2->tx_pending = TX_DEF_PENDING; 4285 sky2->rx_pending = RX_DEF_PENDING; 4286 + sky2->restarting = 0; 4287 4288 hw->dev[port] = dev; 4289
+1
drivers/net/sky2.h
··· 2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2052 u8 rx_csum; 2053 u8 wol; 2054 enum flow_control flow_mode; 2055 enum flow_control flow_status; 2056
··· 2051 u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ 2052 u8 rx_csum; 2053 u8 wol; 2054 + u8 restarting; 2055 enum flow_control flow_mode; 2056 enum flow_control flow_status; 2057
+3 -3
drivers/net/tulip/de4x5.c
··· 5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ 5060 for (j=0; j<limit; j++) { /* Search PHY table */ 5061 if (id != phy_info[j].id) continue; /* ID match? */ 5062 - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5063 if (k < DE4X5_MAX_PHY) { 5064 memcpy((char *)&lp->phy[k], 5065 (char *)&phy_info[j], sizeof(struct phy_table)); ··· 5072 break; 5073 } 5074 if ((j == limit) && (i < DE4X5_MAX_MII)) { 5075 - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++); 5076 lp->phy[k].addr = i; 5077 lp->phy[k].id = id; 5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ ··· 5091 purgatory: 5092 lp->active = 0; 5093 if (lp->phy[0].id) { /* Reset the PHY devices */ 5094 - for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/ 5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); 5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); 5097
··· 5059 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ 5060 for (j=0; j<limit; j++) { /* Search PHY table */ 5061 if (id != phy_info[j].id) continue; /* ID match? */ 5062 + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); 5063 if (k < DE4X5_MAX_PHY) { 5064 memcpy((char *)&lp->phy[k], 5065 (char *)&phy_info[j], sizeof(struct phy_table)); ··· 5072 break; 5073 } 5074 if ((j == limit) && (i < DE4X5_MAX_MII)) { 5075 + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); 5076 lp->phy[k].addr = i; 5077 lp->phy[k].id = id; 5078 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ ··· 5091 purgatory: 5092 lp->active = 0; 5093 if (lp->phy[0].id) { /* Reset the PHY devices */ 5094 + for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ 5095 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); 5096 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); 5097
+6 -7
drivers/net/wireless/airo.c
··· 5918 readSsidRid(local, &SSID_rid); 5919 5920 /* Check if we asked for `any' */ 5921 - if(dwrq->flags == 0) { 5922 /* Just send an empty SSID list */ 5923 memset(&SSID_rid, 0, sizeof(SSID_rid)); 5924 } else { 5925 - int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 5926 5927 /* Check the size of the string */ 5928 - if(dwrq->length > IW_ESSID_MAX_SIZE) { 5929 return -E2BIG ; 5930 - } 5931 /* Check if index is valid */ 5932 - if((index < 0) || (index >= 4)) { 5933 return -EINVAL; 5934 - } 5935 5936 /* Set the SSID */ 5937 memset(SSID_rid.ssids[index].ssid, 0, ··· 6818 return -EINVAL; 6819 } 6820 clear_bit (FLAG_RADIO_OFF, &local->flags); 6821 - for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++) 6822 if (v == cap_rid.txPowerLevels[i]) { 6823 readConfigRid(local, 1); 6824 local->config.txPower = v;
··· 5918 readSsidRid(local, &SSID_rid); 5919 5920 /* Check if we asked for `any' */ 5921 + if (dwrq->flags == 0) { 5922 /* Just send an empty SSID list */ 5923 memset(&SSID_rid, 0, sizeof(SSID_rid)); 5924 } else { 5925 + unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 5926 5927 /* Check the size of the string */ 5928 + if (dwrq->length > IW_ESSID_MAX_SIZE) 5929 return -E2BIG ; 5930 + 5931 /* Check if index is valid */ 5932 + if (index >= ARRAY_SIZE(SSID_rid.ssids)) 5933 return -EINVAL; 5934 5935 /* Set the SSID */ 5936 memset(SSID_rid.ssids[index].ssid, 0, ··· 6819 return -EINVAL; 6820 } 6821 clear_bit (FLAG_RADIO_OFF, &local->flags); 6822 + for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++) 6823 if (v == cap_rid.txPowerLevels[i]) { 6824 readConfigRid(local, 1); 6825 local->config.txPower = v;
+2 -2
drivers/net/wireless/ath/ath9k/eeprom.c
··· 460 integer = swab32(eep->modalHeader.antCtrlCommon); 461 eep->modalHeader.antCtrlCommon = integer; 462 463 - for (i = 0; i < AR5416_MAX_CHAINS; i++) { 464 integer = swab32(eep->modalHeader.antCtrlChain[i]); 465 eep->modalHeader.antCtrlChain[i] = integer; 466 } ··· 914 ctlMode, numCtlModes, isHt40CtlMode, 915 (pCtlMode[ctlMode] & EXT_ADDITIVE)); 916 917 - for (i = 0; (i < AR5416_NUM_CTLS) && 918 pEepData->ctlIndex[i]; i++) { 919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
··· 460 integer = swab32(eep->modalHeader.antCtrlCommon); 461 eep->modalHeader.antCtrlCommon = integer; 462 463 + for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { 464 integer = swab32(eep->modalHeader.antCtrlChain[i]); 465 eep->modalHeader.antCtrlChain[i] = integer; 466 } ··· 914 ctlMode, numCtlModes, isHt40CtlMode, 915 (pCtlMode[ctlMode] & EXT_ADDITIVE)); 916 917 + for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && 918 pEepData->ctlIndex[i]; i++) { 919 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 920 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
+1 -1
drivers/net/wireless/iwlwifi/iwl-3945.h
··· 112 #define IWL_TX_FIFO_NONE 7 113 114 /* Minimum number of queues. MAX_NUM is defined in hw specific files */ 115 - #define IWL_MIN_NUM_QUEUES 4 116 117 #define IEEE80211_DATA_LEN 2304 118 #define IEEE80211_4ADDR_LEN 30
··· 112 #define IWL_TX_FIFO_NONE 7 113 114 /* Minimum number of queues. MAX_NUM is defined in hw specific files */ 115 + #define IWL39_MIN_NUM_QUEUES 4 116 117 #define IEEE80211_DATA_LEN 2304 118 #define IEEE80211_4ADDR_LEN 30
+3
drivers/net/wireless/iwlwifi/iwl-core.c
··· 1332 1333 hw->wiphy->custom_regulatory = true; 1334 1335 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 1336 /* we create the 802.11 header and a zero-length SSID element */ 1337 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
··· 1332 1333 hw->wiphy->custom_regulatory = true; 1334 1335 + /* Firmware does not support this */ 1336 + hw->wiphy->disable_beacon_hints = true; 1337 + 1338 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 1339 /* we create the 802.11 header and a zero-length SSID element */ 1340 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
+6 -6
drivers/net/wireless/iwlwifi/iwl-debugfs.c
··· 308 return -ENODATA; 309 } 310 311 /* 4 characters for byte 0xYY */ 312 buf = kzalloc(buf_size, GFP_KERNEL); 313 if (!buf) { 314 IWL_ERR(priv, "Can not allocate Buffer\n"); 315 - return -ENOMEM; 316 - } 317 - 318 - ptr = priv->eeprom; 319 - if (!ptr) { 320 - IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); 321 return -ENOMEM; 322 } 323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
··· 308 return -ENODATA; 309 } 310 311 + ptr = priv->eeprom; 312 + if (!ptr) { 313 + IWL_ERR(priv, "Invalid EEPROM/OTP memory\n"); 314 + return -ENOMEM; 315 + } 316 + 317 /* 4 characters for byte 0xYY */ 318 buf = kzalloc(buf_size, GFP_KERNEL); 319 if (!buf) { 320 IWL_ERR(priv, "Can not allocate Buffer\n"); 321 return -ENOMEM; 322 } 323 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
+4 -2
drivers/net/wireless/iwlwifi/iwl-dev.h
··· 258 #define IWL_TX_FIFO_HCCA_2 6 259 #define IWL_TX_FIFO_NONE 7 260 261 - /* Minimum number of queues. MAX_NUM is defined in hw specific files */ 262 - #define IWL_MIN_NUM_QUEUES 4 263 264 /* Power management (not Tx power) structures */ 265
··· 258 #define IWL_TX_FIFO_HCCA_2 6 259 #define IWL_TX_FIFO_NONE 7 260 261 + /* Minimum number of queues. MAX_NUM is defined in hw specific files. 262 + * Set the minimum to accommodate the 4 standard TX queues, 1 command 263 + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ 264 + #define IWL_MIN_NUM_QUEUES 10 265 266 /* Power management (not Tx power) structures */ 267
+12
drivers/net/wireless/iwlwifi/iwl-sta.c
··· 566 unsigned long flags; 567 568 spin_lock_irqsave(&priv->sta_lock, flags); 569 570 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) 571 IWL_ERR(priv, "index %d not used in uCode key table.\n", ··· 575 576 priv->default_wep_key--; 577 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 578 ret = iwl_send_static_wepkey_cmd(priv, 1); 579 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 580 keyconf->keyidx, ret); ··· 860 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 861 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 862 863 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 864 spin_unlock_irqrestore(&priv->sta_lock, flags); 865 return ret;
··· 566 unsigned long flags; 567 568 spin_lock_irqsave(&priv->sta_lock, flags); 569 + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 570 + keyconf->keyidx); 571 572 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table)) 573 IWL_ERR(priv, "index %d not used in uCode key table.\n", ··· 573 574 priv->default_wep_key--; 575 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 576 + if (iwl_is_rfkill(priv)) { 577 + IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 578 + spin_unlock_irqrestore(&priv->sta_lock, flags); 579 + return 0; 580 + } 581 ret = iwl_send_static_wepkey_cmd(priv, 1); 582 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 583 keyconf->keyidx, ret); ··· 853 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; 854 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 855 856 + if (iwl_is_rfkill(priv)) { 857 + IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); 858 + spin_unlock_irqrestore(&priv->sta_lock, flags); 859 + return 0; 860 + } 861 ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 862 spin_unlock_irqrestore(&priv->sta_lock, flags); 863 return ret;
+8 -6
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 720 goto drop_unlock; 721 } 722 723 - spin_unlock_irqrestore(&priv->lock, flags); 724 - 725 hdr_len = ieee80211_hdrlen(fc); 726 727 /* Find (or create) index into station table for destination station */ ··· 727 if (sta_id == IWL_INVALID_STATION) { 728 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 729 hdr->addr1); 730 - goto drop; 731 } 732 733 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); ··· 748 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 749 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); 750 } 751 - priv->stations[sta_id].tid[tid].tfds_in_queue++; 752 } 753 754 txq = &priv->txq[txq_id]; 755 q = &txq->q; 756 txq->swq_id = swq_id; 757 758 - spin_lock_irqsave(&priv->lock, flags); 759 760 /* Set up driver data for this TFD */ 761 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); ··· 903 904 drop_unlock: 905 spin_unlock_irqrestore(&priv->lock, flags); 906 - drop: 907 return -1; 908 } 909 EXPORT_SYMBOL(iwl_tx_skb); ··· 1171 IWL_ERR(priv, "Start AGG on invalid station\n"); 1172 return -ENXIO; 1173 } 1174 1175 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1176 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
··· 720 goto drop_unlock; 721 } 722 723 hdr_len = ieee80211_hdrlen(fc); 724 725 /* Find (or create) index into station table for destination station */ ··· 729 if (sta_id == IWL_INVALID_STATION) { 730 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 731 hdr->addr1); 732 + goto drop_unlock; 733 } 734 735 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); ··· 750 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 751 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); 752 } 753 } 754 755 txq = &priv->txq[txq_id]; 756 q = &txq->q; 757 txq->swq_id = swq_id; 758 759 + if (unlikely(iwl_queue_space(q) < q->high_mark)) 760 + goto drop_unlock; 761 + 762 + if (ieee80211_is_data_qos(fc)) 763 + priv->stations[sta_id].tid[tid].tfds_in_queue++; 764 765 /* Set up driver data for this TFD */ 766 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); ··· 902 903 drop_unlock: 904 spin_unlock_irqrestore(&priv->lock, flags); 905 return -1; 906 } 907 EXPORT_SYMBOL(iwl_tx_skb); ··· 1171 IWL_ERR(priv, "Start AGG on invalid station\n"); 1172 return -ENXIO; 1173 } 1174 + if (unlikely(tid >= MAX_TID_COUNT)) 1175 + return -EINVAL; 1176 1177 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1178 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+5 -2
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 3968 3969 hw->wiphy->custom_regulatory = true; 3970 3971 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3972 /* we create the 802.11 header and a zero-length SSID element */ 3973 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; ··· 4021 SET_IEEE80211_DEV(hw, &pdev->dev); 4022 4023 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || 4024 - (iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) { 4025 IWL_ERR(priv, 4026 "invalid queues_num, should be between %d and %d\n", 4027 - IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); 4028 err = -EINVAL; 4029 goto out_ieee80211_free_hw; 4030 }
··· 3968 3969 hw->wiphy->custom_regulatory = true; 3970 3971 + /* Firmware does not support this */ 3972 + hw->wiphy->disable_beacon_hints = true; 3973 + 3974 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3975 /* we create the 802.11 header and a zero-length SSID element */ 3976 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; ··· 4018 SET_IEEE80211_DEV(hw, &pdev->dev); 4019 4020 if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) || 4021 + (iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) { 4022 IWL_ERR(priv, 4023 "invalid queues_num, should be between %d and %d\n", 4024 + IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES); 4025 err = -EINVAL; 4026 goto out_ieee80211_free_hw; 4027 }
+1
drivers/net/wireless/iwmc3200wifi/commands.c
··· 220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); 221 if (IS_ERR(eeprom_rxiq)) { 222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); 223 return PTR_ERR(eeprom_rxiq); 224 } 225
··· 220 eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ); 221 if (IS_ERR(eeprom_rxiq)) { 222 IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n"); 223 + kfree(rxiq); 224 return PTR_ERR(eeprom_rxiq); 225 } 226
+2 -4
drivers/net/wireless/iwmc3200wifi/netdev.c
··· 106 int ret = 0; 107 108 wdev = iwm_wdev_alloc(sizeof_bus, dev); 109 - if (!wdev) { 110 - dev_err(dev, "no memory for wireless device instance\n"); 111 - return ERR_PTR(-ENOMEM); 112 - } 113 114 iwm = wdev_to_iwm(wdev); 115 iwm->bus_ops = if_ops;
··· 106 int ret = 0; 107 108 wdev = iwm_wdev_alloc(sizeof_bus, dev); 109 + if (IS_ERR(wdev)) 110 + return wdev; 111 112 iwm = wdev_to_iwm(wdev); 113 iwm->bus_ops = if_ops;
+1 -1
drivers/net/wireless/libertas/11d.c
··· 47 { 48 u8 i; 49 50 - for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++) 51 region[i] = toupper(region[i]); 52 53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
··· 47 { 48 u8 i; 49 50 + for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++) 51 region[i] = toupper(region[i]); 52 53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
+8 -10
drivers/net/wireless/libertas/assoc.c
··· 1 /* Copyright (C) 2006, Red Hat, Inc. */ 2 3 #include <linux/types.h> 4 #include <linux/etherdevice.h> 5 #include <linux/ieee80211.h> 6 #include <linux/if_arp.h> ··· 44 u16 *rates_size) 45 { 46 u8 *card_rates = lbs_bg_rates; 47 - size_t num_card_rates = sizeof(lbs_bg_rates); 48 int ret = 0, i, j; 49 - u8 tmp[30]; 50 size_t tmp_size = 0; 51 52 /* For each rate in card_rates that exists in rate1, copy to tmp */ 53 - for (i = 0; card_rates[i] && (i < num_card_rates); i++) { 54 - for (j = 0; rates[j] && (j < *rates_size); j++) { 55 if (rates[j] == card_rates[i]) 56 tmp[tmp_size++] = card_rates[i]; 57 } 58 } 59 60 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); 61 - lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates); 62 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 63 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 64 ··· 70 lbs_pr_alert("Previously set fixed data rate %#x isn't " 71 "compatible with the network.\n", priv->cur_rate); 72 ret = -1; 73 - goto done; 74 } 75 - ret = 0; 76 - 77 done: 78 memset(rates, 0, *rates_size); 79 *rates_size = min_t(int, tmp_size, *rates_size); ··· 320 rates = (struct mrvl_ie_rates_param_set *) pos; 321 rates->header.type = cpu_to_le16(TLV_TYPE_RATES); 322 memcpy(&rates->rates, &bss->rates, MAX_RATES); 323 - tmplen = MAX_RATES; 324 if (get_common_rates(priv, rates->rates, &tmplen)) { 325 ret = -1; 326 goto done; ··· 596 597 /* Copy Data rates from the rates recorded in scan response */ 598 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); 599 - ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES); 600 memcpy(cmd.bss.rates, bss->rates, ratesize); 601 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { 602 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
··· 1 /* Copyright (C) 2006, Red Hat, Inc. */ 2 3 #include <linux/types.h> 4 + #include <linux/kernel.h> 5 #include <linux/etherdevice.h> 6 #include <linux/ieee80211.h> 7 #include <linux/if_arp.h> ··· 43 u16 *rates_size) 44 { 45 u8 *card_rates = lbs_bg_rates; 46 int ret = 0, i, j; 47 + u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)]; 48 size_t tmp_size = 0; 49 50 /* For each rate in card_rates that exists in rate1, copy to tmp */ 51 + for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) { 52 + for (j = 0; j < *rates_size && rates[j]; j++) { 53 if (rates[j] == card_rates[i]) 54 tmp[tmp_size++] = card_rates[i]; 55 } 56 } 57 58 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); 59 + lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, 60 + ARRAY_SIZE(lbs_bg_rates)); 61 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 62 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 63 ··· 69 lbs_pr_alert("Previously set fixed data rate %#x isn't " 70 "compatible with the network.\n", priv->cur_rate); 71 ret = -1; 72 } 73 done: 74 memset(rates, 0, *rates_size); 75 *rates_size = min_t(int, tmp_size, *rates_size); ··· 322 rates = (struct mrvl_ie_rates_param_set *) pos; 323 rates->header.type = cpu_to_le16(TLV_TYPE_RATES); 324 memcpy(&rates->rates, &bss->rates, MAX_RATES); 325 + tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES); 326 if (get_common_rates(priv, rates->rates, &tmplen)) { 327 ret = -1; 328 goto done; ··· 598 599 /* Copy Data rates from the rates recorded in scan response */ 600 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); 601 + ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES); 602 memcpy(cmd.bss.rates, bss->rates, ratesize); 603 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { 604 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
+2 -1
drivers/net/wireless/libertas/scan.c
··· 5 * for sending scan commands to the firmware. 6 */ 7 #include <linux/types.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_arp.h> 10 #include <asm/unaligned.h> ··· 877 iwe.u.bitrate.disabled = 0; 878 iwe.u.bitrate.value = 0; 879 880 - for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) { 881 /* Bit rate given in 500 kb/s units */ 882 iwe.u.bitrate.value = bss->rates[j] * 500000; 883 current_val = iwe_stream_add_value(info, start, current_val,
··· 5 * for sending scan commands to the firmware. 6 */ 7 #include <linux/types.h> 8 + #include <linux/kernel.h> 9 #include <linux/etherdevice.h> 10 #include <linux/if_arp.h> 11 #include <asm/unaligned.h> ··· 876 iwe.u.bitrate.disabled = 0; 877 iwe.u.bitrate.value = 0; 878 879 + for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) { 880 /* Bit rate given in 500 kb/s units */ 881 iwe.u.bitrate.value = bss->rates[j] * 500000; 882 current_val = iwe_stream_add_value(info, start, current_val,
+1 -1
drivers/net/wireless/zd1211rw/zd_mac.c
··· 698 && !mac->pass_ctrl) 699 return 0; 700 701 - fc = *(__le16 *)buffer; 702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); 703 704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
··· 698 && !mac->pass_ctrl) 699 return 0; 700 701 + fc = get_unaligned((__le16*)buffer); 702 need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); 703 704 skb = dev_alloc_skb(length + (need_padding ? 2 : 0));
+2 -2
drivers/scsi/scsi_transport_iscsi.c
··· 990 struct iscsi_uevent *ev; 991 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 992 993 - skb = alloc_skb(len, GFP_NOIO); 994 if (!skb) { 995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); 996 return -ENOMEM; ··· 1012 1013 memcpy((char *)ev + sizeof(*ev), data, data_size); 1014 1015 - return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); 1016 } 1017 EXPORT_SYMBOL_GPL(iscsi_offload_mesg); 1018
··· 990 struct iscsi_uevent *ev; 991 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 992 993 + skb = alloc_skb(len, GFP_ATOMIC); 994 if (!skb) { 995 printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); 996 return -ENOMEM; ··· 1012 1013 memcpy((char *)ev + sizeof(*ev), data, data_size); 1014 1015 + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC); 1016 } 1017 EXPORT_SYMBOL_GPL(iscsi_offload_mesg); 1018
+1 -1
include/linux/inetdevice.h
··· 82 83 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 84 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 85 - #define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) 86 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 87 ACCEPT_SOURCE_ROUTE) 88 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
··· 82 83 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 84 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 85 + #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) 86 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 87 ACCEPT_SOURCE_ROUTE) 88 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
+11 -1
include/net/bluetooth/rfcomm.h
··· 355 }; 356 357 int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); 358 int rfcomm_init_ttys(void); 359 void rfcomm_cleanup_ttys(void); 360 - 361 #endif /* __RFCOMM_H */
··· 355 }; 356 357 int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); 358 + 359 + #ifdef CONFIG_BT_RFCOMM_TTY 360 int rfcomm_init_ttys(void); 361 void rfcomm_cleanup_ttys(void); 362 + #else 363 + static inline int rfcomm_init_ttys(void) 364 + { 365 + return 0; 366 + } 367 + static inline void rfcomm_cleanup_ttys(void) 368 + { 369 + } 370 + #endif 371 #endif /* __RFCOMM_H */
+5
include/net/cfg80211.h
··· 979 * channels at a later time. This can be used for devices which do not 980 * have calibration information gauranteed for frequencies or settings 981 * outside of its regulatory domain. 982 * @reg_notifier: the driver's regulatory notification callback 983 * @regd: the driver's regulatory domain, if one was requested via 984 * the regulatory_hint() API. This can be used by the driver ··· 1008 1009 bool custom_regulatory; 1010 bool strict_regulatory; 1011 1012 enum cfg80211_signal_type signal_type; 1013
··· 979 * channels at a later time. This can be used for devices which do not 980 * have calibration information gauranteed for frequencies or settings 981 * outside of its regulatory domain. 982 + * @disable_beacon_hints: enable this if your driver needs to ensure that 983 + * passive scan flags and beaconing flags may not be lifted by cfg80211 984 + * due to regulatory beacon hints. For more information on beacon 985 + * hints read the documenation for regulatory_hint_found_beacon() 986 * @reg_notifier: the driver's regulatory notification callback 987 * @regd: the driver's regulatory domain, if one was requested via 988 * the regulatory_hint() API. This can be used by the driver ··· 1004 1005 bool custom_regulatory; 1006 bool strict_regulatory; 1007 + bool disable_beacon_hints; 1008 1009 enum cfg80211_signal_type signal_type; 1010
+19 -8
net/bluetooth/rfcomm/core.c
··· 2080 /* ---- Initialization ---- */ 2081 static int __init rfcomm_init(void) 2082 { 2083 l2cap_load(); 2084 2085 hci_register_cb(&rfcomm_cb); 2086 2087 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2088 if (IS_ERR(rfcomm_thread)) { 2089 - hci_unregister_cb(&rfcomm_cb); 2090 - return PTR_ERR(rfcomm_thread); 2091 } 2092 2093 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2094 BT_ERR("Failed to create RFCOMM info file"); 2095 2096 - rfcomm_init_sockets(); 2097 2098 - #ifdef CONFIG_BT_RFCOMM_TTY 2099 - rfcomm_init_ttys(); 2100 - #endif 2101 2102 BT_INFO("RFCOMM ver %s", VERSION); 2103 2104 return 0; 2105 } 2106 2107 static void __exit rfcomm_exit(void) ··· 2125 2126 kthread_stop(rfcomm_thread); 2127 2128 - #ifdef CONFIG_BT_RFCOMM_TTY 2129 rfcomm_cleanup_ttys(); 2130 - #endif 2131 2132 rfcomm_cleanup_sockets(); 2133 }
··· 2080 /* ---- Initialization ---- */ 2081 static int __init rfcomm_init(void) 2082 { 2083 + int ret; 2084 + 2085 l2cap_load(); 2086 2087 hci_register_cb(&rfcomm_cb); 2088 2089 rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); 2090 if (IS_ERR(rfcomm_thread)) { 2091 + ret = PTR_ERR(rfcomm_thread); 2092 + goto out_thread; 2093 } 2094 2095 if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) 2096 BT_ERR("Failed to create RFCOMM info file"); 2097 2098 + ret = rfcomm_init_ttys(); 2099 + if (ret) 2100 + goto out_tty; 2101 2102 + ret = rfcomm_init_sockets(); 2103 + if (ret) 2104 + goto out_sock; 2105 2106 BT_INFO("RFCOMM ver %s", VERSION); 2107 2108 return 0; 2109 + 2110 + out_sock: 2111 + rfcomm_cleanup_ttys(); 2112 + out_tty: 2113 + kthread_stop(rfcomm_thread); 2114 + out_thread: 2115 + hci_unregister_cb(&rfcomm_cb); 2116 + 2117 + return ret; 2118 } 2119 2120 static void __exit rfcomm_exit(void) ··· 2112 2113 kthread_stop(rfcomm_thread); 2114 2115 rfcomm_cleanup_ttys(); 2116 2117 rfcomm_cleanup_sockets(); 2118 }
+1 -1
net/bluetooth/rfcomm/sock.c
··· 1132 return err; 1133 } 1134 1135 - void __exit rfcomm_cleanup_sockets(void) 1136 { 1137 class_remove_file(bt_class, &class_attr_rfcomm); 1138
··· 1132 return err; 1133 } 1134 1135 + void rfcomm_cleanup_sockets(void) 1136 { 1137 class_remove_file(bt_class, &class_attr_rfcomm); 1138
+16 -9
net/core/dev.c
··· 3865 3866 ASSERT_RTNL(); 3867 3868 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 3869 NETDEV_HW_ADDR_T_UNICAST); 3870 if (!err) 3871 __dev_set_rx_mode(dev); 3872 return err; 3873 } 3874 EXPORT_SYMBOL(dev_unicast_delete); ··· 3891 3892 ASSERT_RTNL(); 3893 3894 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 3895 NETDEV_HW_ADDR_T_UNICAST); 3896 if (!err) 3897 __dev_set_rx_mode(dev); 3898 return err; 3899 } 3900 EXPORT_SYMBOL(dev_unicast_add); ··· 3953 * @from: source device 3954 * 3955 * Add newly added addresses to the destination device and release 3956 - * addresses that have no users left. 3957 * 3958 * This function is intended to be called from the dev->set_rx_mode 3959 * function of layered software devices. ··· 3963 { 3964 int err = 0; 3965 3966 - ASSERT_RTNL(); 3967 - 3968 if (to->addr_len != from->addr_len) 3969 return -EINVAL; 3970 3971 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 3972 if (!err) 3973 __dev_set_rx_mode(to); 3974 return err; 3975 } 3976 EXPORT_SYMBOL(dev_unicast_sync); ··· 3986 */ 3987 void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3988 { 3989 - ASSERT_RTNL(); 3990 - 3991 if (to->addr_len != from->addr_len) 3992 return; 3993 3994 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 3995 __dev_set_rx_mode(to); 3996 } 3997 EXPORT_SYMBOL(dev_unicast_unsync); 3998 3999 static void dev_unicast_flush(struct net_device *dev) 4000 { 4001 - /* rtnl_mutex must be held here */ 4002 - 4003 __hw_addr_flush(&dev->uc); 4004 } 4005 4006 static void dev_unicast_init(struct net_device *dev) 4007 { 4008 - /* rtnl_mutex must be held here */ 4009 - 4010 __hw_addr_init(&dev->uc); 4011 } 4012 4013
··· 3865 3866 ASSERT_RTNL(); 3867 3868 + netif_addr_lock_bh(dev); 3869 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 3870 NETDEV_HW_ADDR_T_UNICAST); 3871 if (!err) 3872 __dev_set_rx_mode(dev); 3873 + netif_addr_unlock_bh(dev); 3874 return err; 3875 } 3876 EXPORT_SYMBOL(dev_unicast_delete); ··· 3889 3890 ASSERT_RTNL(); 3891 3892 + netif_addr_lock_bh(dev); 3893 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 3894 NETDEV_HW_ADDR_T_UNICAST); 3895 if (!err) 3896 __dev_set_rx_mode(dev); 3897 + netif_addr_unlock_bh(dev); 3898 return err; 3899 } 3900 EXPORT_SYMBOL(dev_unicast_add); ··· 3949 * @from: source device 3950 * 3951 * Add newly added addresses to the destination device and release 3952 + * addresses that have no users left. The source device must be 3953 + * locked by netif_tx_lock_bh. 3954 * 3955 * This function is intended to be called from the dev->set_rx_mode 3956 * function of layered software devices. ··· 3958 { 3959 int err = 0; 3960 3961 if (to->addr_len != from->addr_len) 3962 return -EINVAL; 3963 3964 + netif_addr_lock_bh(to); 3965 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 3966 if (!err) 3967 __dev_set_rx_mode(to); 3968 + netif_addr_unlock_bh(to); 3969 return err; 3970 } 3971 EXPORT_SYMBOL(dev_unicast_sync); ··· 3981 */ 3982 void dev_unicast_unsync(struct net_device *to, struct net_device *from) 3983 { 3984 if (to->addr_len != from->addr_len) 3985 return; 3986 3987 + netif_addr_lock_bh(from); 3988 + netif_addr_lock(to); 3989 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 3990 __dev_set_rx_mode(to); 3991 + netif_addr_unlock(to); 3992 + netif_addr_unlock_bh(from); 3993 } 3994 EXPORT_SYMBOL(dev_unicast_unsync); 3995 3996 static void dev_unicast_flush(struct net_device *dev) 3997 { 3998 + netif_addr_lock_bh(dev); 3999 __hw_addr_flush(&dev->uc); 4000 + netif_addr_unlock_bh(dev); 4001 } 4002 4003 static void dev_unicast_init(struct net_device *dev) 4004 { 4005 + netif_addr_lock_bh(dev); 4006 __hw_addr_init(&dev->uc); 4007 + netif_addr_unlock_bh(dev); 4008 } 4009 4010
+1 -1
net/core/net_namespace.c
··· 488 */ 489 490 ng->len = id; 491 - memcpy(&ng->ptr, &old_ng->ptr, old_ng->len); 492 493 rcu_assign_pointer(net->gen, ng); 494 call_rcu(&old_ng->rcu, net_generic_release);
··· 488 */ 489 490 ng->len = id; 491 + memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 492 493 rcu_assign_pointer(net->gen, ng); 494 call_rcu(&old_ng->rcu, net_generic_release);
+3 -1
net/ipv4/arp.c
··· 1304 hbuffer[k++] = hex_asc_lo(n->ha[j]); 1305 hbuffer[k++] = ':'; 1306 } 1307 - hbuffer[--k] = 0; 1308 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 1309 } 1310 #endif
··· 1304 hbuffer[k++] = hex_asc_lo(n->ha[j]); 1305 hbuffer[k++] = ':'; 1306 } 1307 + if (k != 0) 1308 + --k; 1309 + hbuffer[k] = 0; 1310 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 1311 } 1312 #endif
+1 -1
net/mac80211/mlme.c
··· 721 { 722 struct ieee80211_local *local = (void *) data; 723 724 - if (local->quiescing) 725 return; 726 727 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
··· 721 { 722 struct ieee80211_local *local = (void *) data; 723 724 + if (local->quiescing || local->suspended) 725 return; 726 727 queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
+15 -9
net/mac80211/pm.c
··· 55 56 rcu_read_unlock(); 57 58 - /* flush again, in case driver queued work */ 59 - flush_workqueue(local->hw.workqueue); 60 - 61 - /* stop hardware - this must stop RX */ 62 - if (local->open_count) { 63 - ieee80211_led_radio(local, false); 64 - drv_stop(local); 65 - } 66 - 67 /* remove STAs */ 68 spin_lock_irqsave(&local->sta_lock, flags); 69 list_for_each_entry(sta, &local->sta_list, list) { ··· 102 drv_remove_interface(local, &conf); 103 } 104 105 local->suspended = true; 106 local->quiescing = false; 107 108 return 0;
··· 55 56 rcu_read_unlock(); 57 58 /* remove STAs */ 59 spin_lock_irqsave(&local->sta_lock, flags); 60 list_for_each_entry(sta, &local->sta_list, list) { ··· 111 drv_remove_interface(local, &conf); 112 } 113 114 + /* stop hardware - this must stop RX */ 115 + if (local->open_count) { 116 + ieee80211_led_radio(local, false); 117 + drv_stop(local); 118 + } 119 + 120 + /* 121 + * flush again, in case driver queued work -- it 122 + * shouldn't be doing (or cancel everything in the 123 + * stop callback) that but better safe than sorry. 124 + */ 125 + flush_workqueue(local->hw.workqueue); 126 + 127 local->suspended = true; 128 + /* need suspended to be visible before quiescing is false */ 129 + barrier(); 130 local->quiescing = false; 131 132 return 0;
+12
net/mac80211/rx.c
··· 2453 return; 2454 } 2455 2456 if (status->flag & RX_FLAG_HT) { 2457 /* rate_idx is MCS index */ 2458 if (WARN_ON(status->rate_idx < 0 ||
··· 2453 return; 2454 } 2455 2456 + /* 2457 + * If we're suspending, it is possible although not too likely 2458 + * that we'd be receiving frames after having already partially 2459 + * quiesced the stack. We can't process such frames then since 2460 + * that might, for example, cause stations to be added or other 2461 + * driver callbacks be invoked. 2462 + */ 2463 + if (unlikely(local->quiescing || local->suspended)) { 2464 + kfree_skb(skb); 2465 + return; 2466 + } 2467 + 2468 if (status->flag & RX_FLAG_HT) { 2469 /* rate_idx is MCS index */ 2470 if (WARN_ON(status->rate_idx < 0 ||
+1 -1
net/netlabel/netlabel_kapi.c
··· 151 addr6 = addr; 152 mask6 = mask; 153 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); 154 - if (map4 == NULL) 155 goto cfg_unlbl_map_add_failure; 156 map6->type = NETLBL_NLTYPE_UNLABELED; 157 ipv6_addr_copy(&map6->list.addr, addr6);
··· 151 addr6 = addr; 152 mask6 = mask; 153 map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); 154 + if (map6 == NULL) 155 goto cfg_unlbl_map_add_failure; 156 map6->type = NETLBL_NLTYPE_UNLABELED; 157 ipv6_addr_copy(&map6->list.addr, addr6);
+5 -4
net/wireless/reg.c
··· 1089 1090 chan->beacon_found = true; 1091 1092 chan_before.center_freq = chan->center_freq; 1093 chan_before.flags = chan->flags; 1094 1095 - if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) && 1096 - !(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) { 1097 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1098 channel_changed = true; 1099 } 1100 1101 - if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 1102 - !(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) { 1103 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1104 channel_changed = true; 1105 }
··· 1089 1090 chan->beacon_found = true; 1091 1092 + if (wiphy->disable_beacon_hints) 1093 + return; 1094 + 1095 chan_before.center_freq = chan->center_freq; 1096 chan_before.flags = chan->flags; 1097 1098 + if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) { 1099 chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN; 1100 channel_changed = true; 1101 } 1102 1103 + if (chan->flags & IEEE80211_CHAN_NO_IBSS) { 1104 chan->flags &= ~IEEE80211_CHAN_NO_IBSS; 1105 channel_changed = true; 1106 }
+2 -1
net/wireless/reg.h
··· 30 * non-radar 5 GHz channels. 31 * 32 * Drivers do not need to call this, cfg80211 will do it for after a scan 33 - * on a newly found BSS. 34 */ 35 int regulatory_hint_found_beacon(struct wiphy *wiphy, 36 struct ieee80211_channel *beacon_chan,
··· 30 * non-radar 5 GHz channels. 31 * 32 * Drivers do not need to call this, cfg80211 will do it for after a scan 33 + * on a newly found BSS. If you cannot make use of this feature you can 34 + * set the wiphy->disable_beacon_hints to true. 35 */ 36 int regulatory_hint_found_beacon(struct wiphy *wiphy, 37 struct ieee80211_channel *beacon_chan,
+3 -1
net/wireless/scan.c
··· 118 119 if (!ie1 && !ie2) 120 return 0; 121 - if (!ie1) 122 return -1; 123 124 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); ··· 171 ie = find_ie(WLAN_EID_MESH_CONFIG, 172 a->information_elements, 173 a->len_information_elements); 174 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 175 return false; 176
··· 118 119 if (!ie1 && !ie2) 120 return 0; 121 + if (!ie1 || !ie2) 122 return -1; 123 124 r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1])); ··· 171 ie = find_ie(WLAN_EID_MESH_CONFIG, 172 a->information_elements, 173 a->len_information_elements); 174 + if (!ie) 175 + return false; 176 if (ie[1] != IEEE80211_MESH_CONFIG_LEN) 177 return false; 178