Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (25 commits)
smc91c92_cs: define multicast_table as unsigned char
can: avoids a false warning
e1000e: stop cleaning when we reach tx_ring->next_to_use
igb: restrict WoL for 82576 ET2 Quad Port Server Adapter
virtio_net: missing sg_init_table
Revert "tcp: Set CHECKSUM_UNNECESSARY in tcp_init_nondata_skb"
iwlwifi: need check for valid qos packet before free
tcp: Set CHECKSUM_UNNECESSARY in tcp_init_nondata_skb
udp: fix for unicast RX path optimization
myri10ge: fix rx_pause in myri10ge_set_pauseparam
net: corrected documentation for hardware time stamping
stmmac: use resource_size()
x.25 attempts to negotiate invalid throughput
x25: Patch to fix bug 15678 - x25 accesses fields beyond end of packet.
bridge: Fix IGMP3 report parsing
cnic: Fix crash during bnx2x MTU change.
qlcnic: fix set mac addr
r6040: fix r6040_multicast_list
vhost-net: fix vq_memory_access_ok error checking
ath9k: fix double calls to ath_radio_enable
...

+348 -138
+46 -30
Documentation/networking/timestamping.txt
··· 41 SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. 42 SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the 43 following control message: 44 - struct scm_timestamping { 45 - struct timespec systime; 46 - struct timespec hwtimetrans; 47 - struct timespec hwtimeraw; 48 - }; 49 50 recvmsg() can be used to get this control message for regular incoming 51 packets. For send time stamps the outgoing packet is looped back to ··· 88 SIOCSHWTSTAMP: 89 90 Hardware time stamping must also be initialized for each device driver 91 - that is expected to do hardware time stamping. The parameter is: 92 93 struct hwtstamp_config { 94 - int flags; /* no flags defined right now, must be zero */ 95 - int tx_type; /* HWTSTAMP_TX_* */ 96 - int rx_filter; /* HWTSTAMP_FILTER_* */ 97 }; 98 99 Desired behavior is passed into the kernel and to a specific device by ··· 141 /* time stamp any incoming packet */ 142 HWTSTAMP_FILTER_ALL, 143 144 - /* return value: time stamp all packets requested plus some others */ 145 - HWTSTAMP_FILTER_SOME, 146 147 /* PTP v1, UDP, any kind of event packet */ 148 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 149 150 - ... 151 }; 152 153 154 DEVICE IMPLEMENTATION 155 156 A driver which supports hardware time stamping must support the 157 - SIOCSHWTSTAMP ioctl. Time stamps for received packets must be stored 158 - in the skb with skb_hwtstamp_set(). 159 160 Time stamps for outgoing packets are to be generated as follows: 161 - - In hard_start_xmit(), check if skb_hwtstamp_check_tx_hardware() 162 - returns non-zero. If yes, then the driver is expected 163 - to do hardware time stamping. 164 - If this is possible for the skb and requested, then declare 165 - that the driver is doing the time stamping by calling 166 - skb_hwtstamp_tx_in_progress(). A driver not supporting 167 - hardware time stamping doesn't do that. A driver must never 168 - touch sk_buff::tstamp! It is used to store how time stamping 169 - for an outgoing packets is to be done. 170 - As soon as the driver has sent the packet and/or obtained a 171 hardware time stamp for it, it passes the time stamp back by 172 calling skb_hwtstamp_tx() with the original skb, the raw 173 - hardware time stamp and a handle to the device (necessary 174 - to convert the hardware time stamp to system time). If obtaining 175 - the hardware time stamp somehow fails, then the driver should 176 - not fall back to software time stamping. The rationale is that 177 - this would occur at a later time in the processing pipeline 178 - than other software time stamping and therefore could lead 179 - to unexpected deltas between time stamps. 180 - - If the driver did not call skb_hwtstamp_tx_in_progress(), then 181 dev_hard_start_xmit() checks whether software time stamping 182 is wanted as fallback and potentially generates the time stamp.
··· 41 SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. 42 SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the 43 following control message: 44 + 45 + struct scm_timestamping { 46 + struct timespec systime; 47 + struct timespec hwtimetrans; 48 + struct timespec hwtimeraw; 49 + }; 50 51 recvmsg() can be used to get this control message for regular incoming 52 packets. For send time stamps the outgoing packet is looped back to ··· 87 SIOCSHWTSTAMP: 88 89 Hardware time stamping must also be initialized for each device driver 90 + that is expected to do hardware time stamping. The parameter is defined in 91 + /include/linux/net_tstamp.h as: 92 93 struct hwtstamp_config { 94 + int flags; /* no flags defined right now, must be zero */ 95 + int tx_type; /* HWTSTAMP_TX_* */ 96 + int rx_filter; /* HWTSTAMP_FILTER_* */ 97 }; 98 99 Desired behavior is passed into the kernel and to a specific device by ··· 139 /* time stamp any incoming packet */ 140 HWTSTAMP_FILTER_ALL, 141 142 + /* return value: time stamp all packets requested plus some others */ 143 + HWTSTAMP_FILTER_SOME, 144 145 /* PTP v1, UDP, any kind of event packet */ 146 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 147 148 + /* for the complete list of values, please check 149 + * the include file /include/linux/net_tstamp.h 150 + */ 151 }; 152 153 154 DEVICE IMPLEMENTATION 155 156 A driver which supports hardware time stamping must support the 157 + SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with 158 + the actual values as described in the section on SIOCSHWTSTAMP. 159 + 160 + Time stamps for received packets must be stored in the skb. To get a pointer 161 + to the shared time stamp structure of the skb call skb_hwtstamps(). Then 162 + set the time stamps in the structure: 163 + 164 + struct skb_shared_hwtstamps { 165 + /* hardware time stamp transformed into duration 166 + * since arbitrary point in time 167 + */ 168 + ktime_t hwtstamp; 169 + ktime_t syststamp; /* hwtstamp transformed to system time base */ 170 + }; 171 172 Time stamps for outgoing packets are to be generated as follows: 173 + - In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. 174 + If yes, then the driver is expected to do hardware time stamping. 175 - If this is possible for the skb and requested, then declare 176 + that the driver is doing the time stamping by setting the field 177 + skb_tx(skb)->in_progress non-zero. You might want to keep a pointer 178 + to the associated skb for the next step and not free the skb. A driver 179 + not supporting hardware time stamping doesn't do that. A driver must 180 + never touch sk_buff::tstamp! It is used to store software generated 181 + time stamps by the network subsystem. 182 - As soon as the driver has sent the packet and/or obtained a 183 hardware time stamp for it, it passes the time stamp back by 184 calling skb_hwtstamp_tx() with the original skb, the raw 185 + hardware time stamp. skb_hwtstamp_tx() clones the original skb and 186 + adds the timestamps, therefore the original skb has to be freed now. 187 + If obtaining the hardware time stamp somehow fails, then the driver 188 + should not fall back to software time stamping. The rationale is that 189 + this would occur at a later time in the processing pipeline than other 190 + software time stamping and therefore could lead to unexpected deltas 191 + between time stamps. 192 + - If the driver did not call set skb_tx(skb)->in_progress, then 193 dev_hard_start_xmit() checks whether software time stamping 194 is wanted as fallback and potentially generates the time stamp.
+5 -5
drivers/net/cnic.c
··· 2334 struct cnic_local *cp = dev->cnic_priv; 2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2336 2337 - prefetch(cp->status_blk.bnx2x); 2338 - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2339 2340 - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2341 tasklet_schedule(&cp->cnic_irq_task); 2342 - 2343 - cnic_chk_pkt_rings(cp); 2344 2345 return 0; 2346 }
··· 2334 struct cnic_local *cp = dev->cnic_priv; 2335 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; 2336 2337 + if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2338 + prefetch(cp->status_blk.bnx2x); 2339 + prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2340 2341 tasklet_schedule(&cp->cnic_irq_task); 2342 + cnic_chk_pkt_rings(cp); 2343 + } 2344 2345 return 0; 2346 }
+2
drivers/net/e1000e/netdev.c
··· 661 i = 0; 662 } 663 664 eop = tx_ring->buffer_info[i].next_to_watch; 665 eop_desc = E1000_TX_DESC(*tx_ring, eop); 666 }
··· 661 i = 0; 662 } 663 664 + if (i == tx_ring->next_to_use) 665 + break; 666 eop = tx_ring->buffer_info[i].next_to_watch; 667 eop_desc = E1000_TX_DESC(*tx_ring, eop); 668 }
+1
drivers/net/igb/igb_ethtool.c
··· 1814 retval = 0; 1815 break; 1816 case E1000_DEV_ID_82576_QUAD_COPPER: 1817 /* quad port adapters only support WoL on port A */ 1818 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1819 wol->supported = 0;
··· 1814 retval = 0; 1815 break; 1816 case E1000_DEV_ID_82576_QUAD_COPPER: 1817 + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 1818 /* quad port adapters only support WoL on port A */ 1819 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1820 wol->supported = 0;
+1
drivers/net/igb/igb_main.c
··· 1612 adapter->eeprom_wol = 0; 1613 break; 1614 case E1000_DEV_ID_82576_QUAD_COPPER: 1615 /* if quad port adapter, disable WoL on all but port A */ 1616 if (global_quad_port_a != 0) 1617 adapter->eeprom_wol = 0;
··· 1612 adapter->eeprom_wol = 0; 1613 break; 1614 case E1000_DEV_ID_82576_QUAD_COPPER: 1615 + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 1616 /* if quad port adapter, disable WoL on all but port A */ 1617 if (global_quad_port_a != 0) 1618 adapter->eeprom_wol = 0;
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 1690 if (pause->tx_pause != mgp->pause) 1691 return myri10ge_change_pause(mgp, pause->tx_pause); 1692 if (pause->rx_pause != mgp->pause) 1693 - return myri10ge_change_pause(mgp, pause->tx_pause); 1694 if (pause->autoneg != 0) 1695 return -EINVAL; 1696 return 0;
··· 1690 if (pause->tx_pause != mgp->pause) 1691 return myri10ge_change_pause(mgp, pause->tx_pause); 1692 if (pause->rx_pause != mgp->pause) 1693 + return myri10ge_change_pause(mgp, pause->rx_pause); 1694 if (pause->autoneg != 0) 1695 return -EINVAL; 1696 return 0;
+6 -7
drivers/net/pcmcia/smc91c92_cs.c
··· 1608 { 1609 unsigned int ioaddr = dev->base_addr; 1610 struct smc_private *smc = netdev_priv(dev); 1611 - u_int multicast_table[ 2 ] = { 0, }; 1612 unsigned long flags; 1613 u_short rx_cfg_setting; 1614 1615 if (dev->flags & IFF_PROMISC) { 1616 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; ··· 1625 1626 netdev_for_each_mc_addr(mc_addr, dev) { 1627 u_int position = ether_crc(6, mc_addr->dmi_addr); 1628 - #ifndef final_version /* Verify multicast address. */ 1629 - if ((mc_addr->dmi_addr[0] & 1) == 0) 1630 - continue; 1631 - #endif 1632 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); 1633 } 1634 } ··· 1634 /* Load MC table and Rx setting into the chip without interrupts. */ 1635 spin_lock_irqsave(&smc->lock, flags); 1636 SMC_SELECT_BANK(3); 1637 - outl(multicast_table[0], ioaddr + MULTICAST0); 1638 - outl(multicast_table[1], ioaddr + MULTICAST4); 1639 SMC_SELECT_BANK(0); 1640 outw(rx_cfg_setting, ioaddr + RCR); 1641 SMC_SELECT_BANK(2);
··· 1608 { 1609 unsigned int ioaddr = dev->base_addr; 1610 struct smc_private *smc = netdev_priv(dev); 1611 + unsigned char multicast_table[8]; 1612 unsigned long flags; 1613 u_short rx_cfg_setting; 1614 + int i; 1615 + 1616 + memset(multicast_table, 0, sizeof(multicast_table)); 1617 1618 if (dev->flags & IFF_PROMISC) { 1619 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; ··· 1622 1623 netdev_for_each_mc_addr(mc_addr, dev) { 1624 u_int position = ether_crc(6, mc_addr->dmi_addr); 1625 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); 1626 } 1627 } ··· 1635 /* Load MC table and Rx setting into the chip without interrupts. */ 1636 spin_lock_irqsave(&smc->lock, flags); 1637 SMC_SELECT_BANK(3); 1638 + for (i = 0; i < 8; i++) 1639 + outb(multicast_table[i], ioaddr + MULTICAST0 + i); 1640 SMC_SELECT_BANK(0); 1641 outw(rx_cfg_setting, ioaddr + RCR); 1642 SMC_SELECT_BANK(2);
+3
drivers/net/qlcnic/qlcnic_hw.c
··· 431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 432 u32 mode = VPORT_MISS_MODE_DROP; 433 434 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 435 qlcnic_nic_add_mac(adapter, bcast_addr); 436
··· 431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 432 u32 mode = VPORT_MISS_MODE_DROP; 433 434 + if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 435 + return; 436 + 437 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 438 qlcnic_nic_add_mac(adapter, bcast_addr); 439
+4 -7
drivers/net/r6040.c
··· 134 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) 135 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) 136 #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 137 - #define MCAST_MAX 4 /* Max number multicast addresses to filter */ 138 139 /* Descriptor status */ 140 #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ ··· 982 crc >>= 26; 983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 984 } 985 - /* Write the index of the hash table */ 986 - for (i = 0; i < 4; i++) 987 - iowrite16(hash_table[i] << 14, ioaddr + MCR1); 988 /* Fill the MAC hash tables with their values */ 989 iowrite16(hash_table[0], ioaddr + MAR0); 990 iowrite16(hash_table[1], ioaddr + MAR1); ··· 997 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 998 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 999 } else { 1000 - iowrite16(0xffff, ioaddr + MID_0L + 8 * i); 1001 - iowrite16(0xffff, ioaddr + MID_0M + 8 * i); 1002 - iowrite16(0xffff, ioaddr + MID_0H + 8 * i); 1003 } 1004 i++; 1005 }
··· 134 #define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor)) 135 #define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor)) 136 #define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 137 + #define MCAST_MAX 3 /* Max number multicast addresses to filter */ 138 139 /* Descriptor status */ 140 #define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ ··· 982 crc >>= 26; 983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 984 } 985 /* Fill the MAC hash tables with their values */ 986 iowrite16(hash_table[0], ioaddr + MAR0); 987 iowrite16(hash_table[1], ioaddr + MAR1); ··· 1000 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 1001 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 1002 } else { 1003 + iowrite16(0xffff, ioaddr + MID_1L + 8 * i); 1004 + iowrite16(0xffff, ioaddr + MID_1M + 8 * i); 1005 + iowrite16(0xffff, ioaddr + MID_1H + 8 * i); 1006 } 1007 i++; 1008 }
+5 -5
drivers/net/stmmac/stmmac_main.c
··· 1686 } 1687 pr_info("done!\n"); 1688 1689 - if (!request_mem_region(res->start, (res->end - res->start), 1690 pdev->name)) { 1691 pr_err("%s: ERROR: memory allocation failed" 1692 "cannot get the I/O addr 0x%x\n", ··· 1695 goto out; 1696 } 1697 1698 - addr = ioremap(res->start, (res->end - res->start)); 1699 if (!addr) { 1700 - pr_err("%s: ERROR: memory mapping failed \n", __func__); 1701 ret = -ENOMEM; 1702 goto out; 1703 } ··· 1775 out: 1776 if (ret < 0) { 1777 platform_set_drvdata(pdev, NULL); 1778 - release_mem_region(res->start, (res->end - res->start)); 1779 if (addr != NULL) 1780 iounmap(addr); 1781 } ··· 1813 1814 iounmap((void *)ndev->base_addr); 1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1816 - release_mem_region(res->start, (res->end - res->start)); 1817 1818 free_netdev(ndev); 1819
··· 1686 } 1687 pr_info("done!\n"); 1688 1689 + if (!request_mem_region(res->start, resource_size(res), 1690 pdev->name)) { 1691 pr_err("%s: ERROR: memory allocation failed" 1692 "cannot get the I/O addr 0x%x\n", ··· 1695 goto out; 1696 } 1697 1698 + addr = ioremap(res->start, resource_size(res)); 1699 if (!addr) { 1700 + pr_err("%s: ERROR: memory mapping failed\n", __func__); 1701 ret = -ENOMEM; 1702 goto out; 1703 } ··· 1775 out: 1776 if (ret < 0) { 1777 platform_set_drvdata(pdev, NULL); 1778 + release_mem_region(res->start, resource_size(res)); 1779 if (addr != NULL) 1780 iounmap(addr); 1781 } ··· 1813 1814 iounmap((void *)ndev->base_addr); 1815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1816 + release_mem_region(res->start, resource_size(res)); 1817 1818 free_netdev(ndev); 1819
+2
drivers/net/virtio_net.c
··· 327 struct scatterlist sg[2]; 328 int err; 329 330 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 331 if (unlikely(!skb)) 332 return -ENOMEM; ··· 353 char *p; 354 int i, err, offset; 355 356 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ 357 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 358 first = get_a_page(vi, gfp);
··· 327 struct scatterlist sg[2]; 328 int err; 329 330 + sg_init_table(sg, 2); 331 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 332 if (unlikely(!skb)) 333 return -ENOMEM; ··· 352 char *p; 353 int i, err, offset; 354 355 + sg_init_table(sg, MAX_SKB_FRAGS + 2); 356 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */ 357 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 358 first = get_a_page(vi, gfp);
+1 -2
drivers/net/wireless/ath/ath9k/main.c
··· 1532 all_wiphys_idle = ath9k_all_wiphys_idle(sc); 1533 ath9k_set_wiphy_idle(aphy, idle); 1534 1535 - if (!idle && all_wiphys_idle) 1536 - enable_radio = true; 1537 1538 /* 1539 * After we unlock here its possible another wiphy
··· 1532 all_wiphys_idle = ath9k_all_wiphys_idle(sc); 1533 ath9k_set_wiphy_idle(aphy, idle); 1534 1535 + enable_radio = (!idle && all_wiphys_idle); 1536 1537 /* 1538 * After we unlock here its possible another wiphy
+9 -4
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2016 "%d index %d\n", scd_ssn , index); 2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2018 - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2019 2020 if (priv->mac80211_registered && 2021 (iwl_queue_space(&txq->q) > txq->q.low_mark) && ··· 2043 tx_resp->failure_frame); 2044 2045 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2046 - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2047 2048 if (priv->mac80211_registered && 2049 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2050 iwl_wake_queue(priv, txq_id); 2051 } 2052 - 2053 - iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2054 2055 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2056 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
··· 2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2016 "%d index %d\n", scd_ssn , index); 2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2018 + if (qc) 2019 + iwl_free_tfds_in_queue(priv, sta_id, 2020 + tid, freed); 2021 2022 if (priv->mac80211_registered && 2023 (iwl_queue_space(&txq->q) > txq->q.low_mark) && ··· 2041 tx_resp->failure_frame); 2042 2043 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2044 + if (qc && likely(sta_id != IWL_INVALID_STATION)) 2045 + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2046 + else if (sta_id == IWL_INVALID_STATION) 2047 + IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); 2048 2049 if (priv->mac80211_registered && 2050 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2051 iwl_wake_queue(priv, txq_id); 2052 } 2053 + if (qc && likely(sta_id != IWL_INVALID_STATION)) 2054 + iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2055 2056 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2057 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
+24 -31
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
··· 346 !!(rate_n_flags & RATE_MCS_ANT_C_MSK); 347 } 348 349 /** 350 * rs_collect_tx_data - Update the success/failure sliding window 351 * ··· 364 * at this rate. window->data contains the bitmask of successful 365 * packets. 366 */ 367 - static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, 368 - int scale_index, s32 tpt, int attempts, 369 - int successes) 370 { 371 struct iwl_rate_scale_data *window = NULL; 372 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 373 - s32 fail_count; 374 375 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 376 return -EINVAL; 377 378 /* Select window for current tx bit rate */ 379 - window = &(windows[scale_index]); 380 381 /* 382 * Keep track of only the latest 62 tx frame attempts in this rate's ··· 752 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && 753 (a->is_SGI == b->is_SGI); 754 } 755 - /* 756 - * Static function to get the expected throughput from an iwl_scale_tbl_info 757 - * that wraps a NULL pointer check 758 - */ 759 - static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) 760 - { 761 - if (tbl->expected_tpt) 762 - return tbl->expected_tpt[rs_index]; 763 - return 0; 764 - } 765 766 /* 767 * mac80211 sends us Tx status ··· 768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 769 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 770 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 771 - struct iwl_rate_scale_data *window = NULL; 772 enum mac80211_rate_control_flags mac_flags; 773 u32 tx_rate; 774 struct iwl_scale_tbl_info tbl_type; 775 - struct iwl_scale_tbl_info *curr_tbl, *other_tbl; 776 - s32 tpt = 0; 777 778 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 779 ··· 854 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); 855 return; 856 } 857 - window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]); 858 859 /* 860 * Updating the frame history depends on whether packets were ··· 866 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 867 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, 868 &rs_index); 869 - tpt = get_expected_tpt(curr_tbl, rs_index); 870 - rs_collect_tx_data(window, rs_index, tpt, 871 info->status.ampdu_ack_len, 872 info->status.ampdu_ack_map); 873 ··· 896 * table as active/search. 897 */ 898 if (table_type_matches(&tbl_type, curr_tbl)) 899 - tpt = get_expected_tpt(curr_tbl, rs_index); 900 else if (table_type_matches(&tbl_type, other_tbl)) 901 - tpt = get_expected_tpt(other_tbl, rs_index); 902 else 903 continue; 904 - 905 - /* Constants mean 1 transmission, 0 successes */ 906 - if (i < retries) 907 - rs_collect_tx_data(window, rs_index, tpt, 1, 908 - 0); 909 - else 910 - rs_collect_tx_data(window, rs_index, tpt, 1, 911 - legacy_success); 912 } 913 914 /* Update success/fail counts if not searching for new mode */
··· 346 !!(rate_n_flags & RATE_MCS_ANT_C_MSK); 347 } 348 349 + /* 350 + * Static function to get the expected throughput from an iwl_scale_tbl_info 351 + * that wraps a NULL pointer check 352 + */ 353 + static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) 354 + { 355 + if (tbl->expected_tpt) 356 + return tbl->expected_tpt[rs_index]; 357 + return 0; 358 + } 359 + 360 /** 361 * rs_collect_tx_data - Update the success/failure sliding window 362 * ··· 353 * at this rate. window->data contains the bitmask of successful 354 * packets. 355 */ 356 + static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, 357 + int scale_index, int attempts, int successes) 358 { 359 struct iwl_rate_scale_data *window = NULL; 360 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); 361 + s32 fail_count, tpt; 362 363 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 364 return -EINVAL; 365 366 /* Select window for current tx bit rate */ 367 + window = &(tbl->win[scale_index]); 368 + 369 + /* Get expected throughput */ 370 + tpt = get_expected_tpt(tbl, scale_index); 371 372 /* 373 * Keep track of only the latest 62 tx frame attempts in this rate's ··· 739 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && 740 (a->is_SGI == b->is_SGI); 741 } 742 743 /* 744 * mac80211 sends us Tx status ··· 765 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 766 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 767 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 768 enum mac80211_rate_control_flags mac_flags; 769 u32 tx_rate; 770 struct iwl_scale_tbl_info tbl_type; 771 + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 772 773 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 774 ··· 853 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); 854 return; 855 } 856 857 /* 858 * Updating the frame history depends on whether packets were ··· 866 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 867 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, 868 &rs_index); 869 + rs_collect_tx_data(curr_tbl, rs_index, 870 info->status.ampdu_ack_len, 871 info->status.ampdu_ack_map); 872 ··· 897 * table as active/search. 898 */ 899 if (table_type_matches(&tbl_type, curr_tbl)) 900 + tmp_tbl = curr_tbl; 901 else if (table_type_matches(&tbl_type, other_tbl)) 902 + tmp_tbl = other_tbl; 903 else 904 continue; 905 + rs_collect_tx_data(tmp_tbl, rs_index, 1, 906 + i < retries ? 0 : legacy_success); 907 } 908 909 /* Update success/fail counts if not searching for new mode */
+7 -4
drivers/net/wireless/iwlwifi/iwl-core.c
··· 308 309 spin_unlock_irqrestore(&priv->lock, flags); 310 311 - /* Allocate and init all Tx and Command queues */ 312 - ret = iwl_txq_ctx_reset(priv); 313 - if (ret) 314 - return ret; 315 316 set_bit(STATUS_INIT, &priv->status); 317
··· 308 309 spin_unlock_irqrestore(&priv->lock, flags); 310 311 + /* Allocate or reset and init all Tx and Command queues */ 312 + if (!priv->txq) { 313 + ret = iwl_txq_ctx_alloc(priv); 314 + if (ret) 315 + return ret; 316 + } else 317 + iwl_txq_ctx_reset(priv); 318 319 set_bit(STATUS_INIT, &priv->status); 320
+4 -1
drivers/net/wireless/iwlwifi/iwl-core.h
··· 442 /***************************************************** 443 * TX 444 ******************************************************/ 445 - int iwl_txq_ctx_reset(struct iwl_priv *priv); 446 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 447 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 448 struct iwl_tx_queue *txq, ··· 457 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 458 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 459 int slots_num, u32 txq_id); 460 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 461 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 462 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
··· 442 /***************************************************** 443 * TX 444 ******************************************************/ 445 + int iwl_txq_ctx_alloc(struct iwl_priv *priv); 446 + void iwl_txq_ctx_reset(struct iwl_priv *priv); 447 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 448 int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 449 struct iwl_tx_queue *txq, ··· 456 void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 457 int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 458 int slots_num, u32 txq_id); 459 + void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 460 + int slots_num, u32 txq_id); 461 void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 462 int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 463 int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
+94 -13
drivers/net/wireless/iwlwifi/iwl-tx.c
··· 194 struct iwl_queue *q = &txq->q; 195 struct device *dev = &priv->pci_dev->dev; 196 int i; 197 198 if (q->n_bd == 0) 199 return; 200 201 /* De-alloc array of command/tx buffers */ 202 for (i = 0; i <= TFD_CMD_SLOTS; i++) ··· 434 } 435 EXPORT_SYMBOL(iwl_tx_queue_init); 436 437 /** 438 * iwl_hw_txq_ctx_free - Free TXQ Context 439 * ··· 465 466 /* Tx queues */ 467 if (priv->txq) { 468 - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 469 - txq_id++) 470 if (txq_id == IWL_CMD_QUEUE_NUM) 471 iwl_cmd_queue_free(priv); 472 else ··· 481 EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 482 483 /** 484 - * iwl_txq_ctx_reset - Reset TX queue context 485 - * Destroys all DMA structures and initialize them again 486 * 487 * @param priv 488 * @return error code 489 */ 490 - int iwl_txq_ctx_reset(struct iwl_priv *priv) 491 { 492 - int ret = 0; 493 int txq_id, slots_num; 494 unsigned long flags; 495 ··· 547 return ret; 548 } 549 550 /** 551 - * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 552 */ 553 void iwl_txq_ctx_stop(struct iwl_priv *priv) 554 { ··· 591 1000); 592 } 593 spin_unlock_irqrestore(&priv->lock, flags); 594 - 595 - /* Deallocate memory for all Tx queues */ 596 - iwl_hw_txq_ctx_free(priv); 597 } 598 EXPORT_SYMBOL(iwl_txq_ctx_stop); 599 ··· 1113 1114 spin_lock_irqsave(&priv->hcmd_lock, flags); 1115 1116 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 1117 out_cmd = txq->cmd[idx]; 1118 out_meta = &txq->meta[idx]; ··· 1298 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 1299 struct iwl_device_cmd *cmd; 1300 struct iwl_cmd_meta *meta; 1301 1302 /* If a Tx command is being handled and it isn't in the actual 1303 * command queue then there a command routing bug has been introduced ··· 1312 return; 1313 } 1314 1315 - cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1316 - cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1317 - meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1318 1319 pci_unmap_single(priv->pci_dev, 1320 pci_unmap_addr(meta, mapping), ··· 1344 get_cmd_string(cmd->hdr.cmd)); 1345 wake_up_interruptible(&priv->wait_command_queue); 1346 } 1347 } 1348 EXPORT_SYMBOL(iwl_tx_cmd_complete); 1349
··· 194 struct iwl_queue *q = &txq->q; 195 struct device *dev = &priv->pci_dev->dev; 196 int i; 197 + bool huge = false; 198 199 if (q->n_bd == 0) 200 return; 201 + 202 + for (; q->read_ptr != q->write_ptr; 203 + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 204 + /* we have no way to tell if it is a huge cmd ATM */ 205 + i = get_cmd_index(q, q->read_ptr, 0); 206 + 207 + if (txq->meta[i].flags & CMD_SIZE_HUGE) { 208 + huge = true; 209 + continue; 210 + } 211 + 212 + pci_unmap_single(priv->pci_dev, 213 + pci_unmap_addr(&txq->meta[i], mapping), 214 + pci_unmap_len(&txq->meta[i], len), 215 + PCI_DMA_BIDIRECTIONAL); 216 + } 217 + if (huge) { 218 + i = q->n_window; 219 + pci_unmap_single(priv->pci_dev, 220 + pci_unmap_addr(&txq->meta[i], mapping), 221 + pci_unmap_len(&txq->meta[i], len), 222 + PCI_DMA_BIDIRECTIONAL); 223 + } 224 225 /* De-alloc array of command/tx buffers */ 226 for (i = 0; i <= TFD_CMD_SLOTS; i++) ··· 410 } 411 EXPORT_SYMBOL(iwl_tx_queue_init); 412 413 + void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 414 + int slots_num, u32 txq_id) 415 + { 416 + int actual_slots = slots_num; 417 + 418 + if (txq_id == IWL_CMD_QUEUE_NUM) 419 + actual_slots++; 420 + 421 + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); 422 + 423 + txq->need_update = 0; 424 + 425 + /* Initialize queue's high/low-water marks, and head/tail indexes */ 426 + iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 427 + 428 + /* Tell device where to find queue */ 429 + priv->cfg->ops->lib->txq_init(priv, txq); 430 + } 431 + EXPORT_SYMBOL(iwl_tx_queue_reset); 432 + 433 /** 434 * iwl_hw_txq_ctx_free - Free TXQ Context 435 * ··· 421 422 /* Tx queues */ 423 if (priv->txq) { 424 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 425 if (txq_id == IWL_CMD_QUEUE_NUM) 426 iwl_cmd_queue_free(priv); 427 else ··· 438 EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 439 440 /** 441 + * iwl_txq_ctx_alloc - allocate TX queue context 442 + * Allocate all Tx DMA structures and initialize them 443 * 444 * @param priv 445 * @return error code 446 */ 447 + int iwl_txq_ctx_alloc(struct iwl_priv *priv) 448 { 449 + int ret; 450 int txq_id, slots_num; 451 unsigned long flags; 452 ··· 504 return ret; 505 } 506 507 + void iwl_txq_ctx_reset(struct iwl_priv *priv) 508 + { 509 + int txq_id, slots_num; 510 + unsigned long flags; 511 + 512 + spin_lock_irqsave(&priv->lock, flags); 513 + 514 + /* Turn off all Tx DMA fifos */ 515 + priv->cfg->ops->lib->txq_set_sched(priv, 0); 516 + 517 + /* Tell NIC where to find the "keep warm" buffer */ 518 + iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 519 + 520 + spin_unlock_irqrestore(&priv->lock, flags); 521 + 522 + /* Alloc and init all Tx queues, including the command queue (#4) */ 523 + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 524 + slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 525 + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 526 + iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 527 + } 528 + } 529 + 530 /** 531 + * iwl_txq_ctx_stop - Stop all Tx DMA channels 532 */ 533 void iwl_txq_ctx_stop(struct iwl_priv *priv) 534 { ··· 525 1000); 526 } 527 spin_unlock_irqrestore(&priv->lock, flags); 528 } 529 EXPORT_SYMBOL(iwl_txq_ctx_stop); 530 ··· 1050 1051 spin_lock_irqsave(&priv->hcmd_lock, flags); 1052 1053 + /* If this is a huge cmd, mark the huge flag also on the meta.flags 1054 + * of the _original_ cmd. This is used for DMA mapping clean up. 1055 + */ 1056 + if (cmd->flags & CMD_SIZE_HUGE) { 1057 + idx = get_cmd_index(q, q->write_ptr, 0); 1058 + txq->meta[idx].flags = CMD_SIZE_HUGE; 1059 + } 1060 + 1061 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 1062 out_cmd = txq->cmd[idx]; 1063 out_meta = &txq->meta[idx]; ··· 1227 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 1228 struct iwl_device_cmd *cmd; 1229 struct iwl_cmd_meta *meta; 1230 + struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1231 1232 /* If a Tx command is being handled and it isn't in the actual 1233 * command queue then there a command routing bug has been introduced ··· 1240 return; 1241 } 1242 1243 + /* If this is a huge cmd, clear the huge flag on the meta.flags 1244 + * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap 1245 + * the DMA buffer for the scan (huge) command. 1246 + */ 1247 + if (huge) { 1248 + cmd_index = get_cmd_index(&txq->q, index, 0); 1249 + txq->meta[cmd_index].flags = 0; 1250 + } 1251 + cmd_index = get_cmd_index(&txq->q, index, huge); 1252 + cmd = txq->cmd[cmd_index]; 1253 + meta = &txq->meta[cmd_index]; 1254 1255 pci_unmap_single(priv->pci_dev, 1256 pci_unmap_addr(meta, mapping), ··· 1264 get_cmd_string(cmd->hdr.cmd)); 1265 wake_up_interruptible(&priv->wait_command_queue); 1266 } 1267 + meta->flags = 0; 1268 } 1269 EXPORT_SYMBOL(iwl_tx_cmd_complete); 1270
+4
drivers/vhost/vhost.c
··· 236 int log_all) 237 { 238 int i; 239 for (i = 0; i < mem->nregions; ++i) { 240 struct vhost_memory_region *m = mem->regions + i; 241 unsigned long a = m->userspace_addr;
··· 236 int log_all) 237 { 238 int i; 239 + 240 + if (!mem) 241 + return 0; 242 + 243 for (i = 0; i < mem->nregions; ++i) { 244 struct vhost_memory_region *m = mem->regions + i; 245 unsigned long a = m->userspace_addr;
+4
include/net/x25.h
··· 183 extern int sysctl_x25_ack_holdback_timeout; 184 extern int sysctl_x25_forward; 185 186 extern int x25_addr_ntoa(unsigned char *, struct x25_address *, 187 struct x25_address *); 188 extern int x25_addr_aton(unsigned char *, struct x25_address *,
··· 183 extern int sysctl_x25_ack_holdback_timeout; 184 extern int sysctl_x25_forward; 185 186 + extern int x25_parse_address_block(struct sk_buff *skb, 187 + struct x25_address *called_addr, 188 + struct x25_address *calling_addr); 189 + 190 extern int x25_addr_ntoa(unsigned char *, struct x25_address *, 191 struct x25_address *); 192 extern int x25_addr_aton(unsigned char *, struct x25_address *,
+1 -1
net/bridge/br_multicast.c
··· 723 if (!pskb_may_pull(skb, len)) 724 return -EINVAL; 725 726 - grec = (void *)(skb->data + len); 727 group = grec->grec_mca; 728 type = grec->grec_type; 729
··· 723 if (!pskb_may_pull(skb, len)) 724 return -EINVAL; 725 726 + grec = (void *)(skb->data + len - sizeof(*grec)); 727 group = grec->grec_mca; 728 type = grec->grec_type; 729
+1 -1
net/can/raw.c
··· 445 return -EFAULT; 446 } 447 } else if (count == 1) { 448 - if (copy_from_user(&sfilter, optval, optlen)) 449 return -EFAULT; 450 } 451
··· 445 return -EFAULT; 446 } 447 } else if (count == 1) { 448 + if (copy_from_user(&sfilter, optval, sizeof(sfilter))) 449 return -EFAULT; 450 } 451
+2 -2
net/ipv4/udp.c
··· 472 if (hslot->count < hslot2->count) 473 goto begin; 474 475 - result = udp4_lib_lookup2(net, INADDR_ANY, sport, 476 - daddr, hnum, dif, 477 hslot2, slot2); 478 } 479 rcu_read_unlock();
··· 472 if (hslot->count < hslot2->count) 473 goto begin; 474 475 + result = udp4_lib_lookup2(net, saddr, sport, 476 + INADDR_ANY, hnum, dif, 477 hslot2, slot2); 478 } 479 rcu_read_unlock();
+2 -2
net/ipv6/udp.c
··· 259 if (hslot->count < hslot2->count) 260 goto begin; 261 262 - result = udp6_lib_lookup2(net, &in6addr_any, sport, 263 - daddr, hnum, dif, 264 hslot2, slot2); 265 } 266 rcu_read_unlock();
··· 259 if (hslot->count < hslot2->count) 260 goto begin; 261 262 + result = udp6_lib_lookup2(net, saddr, sport, 263 + &in6addr_any, hnum, dif, 264 hslot2, slot2); 265 } 266 rcu_read_unlock();
+2 -2
net/mac80211/main.c
··· 225 switch (sdata->vif.type) { 226 case NL80211_IFTYPE_AP: 227 sdata->vif.bss_conf.enable_beacon = 228 - !!rcu_dereference(sdata->u.ap.beacon); 229 break; 230 case NL80211_IFTYPE_ADHOC: 231 sdata->vif.bss_conf.enable_beacon = 232 - !!rcu_dereference(sdata->u.ibss.presp); 233 break; 234 case NL80211_IFTYPE_MESH_POINT: 235 sdata->vif.bss_conf.enable_beacon = true;
··· 225 switch (sdata->vif.type) { 226 case NL80211_IFTYPE_AP: 227 sdata->vif.bss_conf.enable_beacon = 228 + !!sdata->u.ap.beacon; 229 break; 230 case NL80211_IFTYPE_ADHOC: 231 sdata->vif.bss_conf.enable_beacon = 232 + !!sdata->u.ibss.presp; 233 break; 234 case NL80211_IFTYPE_MESH_POINT: 235 sdata->vif.bss_conf.enable_beacon = true;
-3
net/mac80211/mesh.c
··· 750 751 switch (fc & IEEE80211_FCTL_STYPE) { 752 case IEEE80211_STYPE_ACTION: 753 - if (skb->len < IEEE80211_MIN_ACTION_SIZE) 754 - return RX_DROP_MONITOR; 755 - /* fall through */ 756 case IEEE80211_STYPE_PROBE_RESP: 757 case IEEE80211_STYPE_BEACON: 758 skb_queue_tail(&ifmsh->skb_queue, skb);
··· 750 751 switch (fc & IEEE80211_FCTL_STYPE) { 752 case IEEE80211_STYPE_ACTION: 753 case IEEE80211_STYPE_PROBE_RESP: 754 case IEEE80211_STYPE_BEACON: 755 skb_queue_tail(&ifmsh->skb_queue, skb);
+5
net/mac80211/rx.c
··· 1974 goto handled; 1975 } 1976 break; 1977 } 1978 1979 /*
··· 1974 goto handled; 1975 } 1976 break; 1977 + case MESH_PLINK_CATEGORY: 1978 + case MESH_PATH_SEL_CATEGORY: 1979 + if (ieee80211_vif_is_mesh(&sdata->vif)) 1980 + return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 1981 + break; 1982 } 1983 1984 /*
+16 -4
net/mac80211/sta_info.c
··· 93 struct ieee80211_local *local = sdata->local; 94 struct sta_info *sta; 95 96 - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 97 while (sta) { 98 if (sta->sdata == sdata && 99 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 100 break; 101 - sta = rcu_dereference(sta->hnext); 102 } 103 return sta; 104 } ··· 119 struct ieee80211_local *local = sdata->local; 120 struct sta_info *sta; 121 122 - sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 123 while (sta) { 124 if ((sta->sdata == sdata || 125 sta->sdata->bss == sdata->bss) && 126 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 127 break; 128 - sta = rcu_dereference(sta->hnext); 129 } 130 return sta; 131 }
··· 93 struct ieee80211_local *local = sdata->local; 94 struct sta_info *sta; 95 96 + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 97 + rcu_read_lock_held() || 98 + lockdep_is_held(&local->sta_lock) || 99 + lockdep_is_held(&local->sta_mtx)); 100 while (sta) { 101 if (sta->sdata == sdata && 102 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 103 break; 104 + sta = rcu_dereference_check(sta->hnext, 105 + rcu_read_lock_held() || 106 + lockdep_is_held(&local->sta_lock) || 107 + lockdep_is_held(&local->sta_mtx)); 108 } 109 return sta; 110 } ··· 113 struct ieee80211_local *local = sdata->local; 114 struct sta_info *sta; 115 116 + sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 117 + rcu_read_lock_held() || 118 + lockdep_is_held(&local->sta_lock) || 119 + lockdep_is_held(&local->sta_mtx)); 120 while (sta) { 121 if ((sta->sdata == sdata || 122 sta->sdata->bss == sdata->bss) && 123 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 124 break; 125 + sta = rcu_dereference_check(sta->hnext, 126 + rcu_read_lock_held() || 127 + lockdep_is_held(&local->sta_lock) || 128 + lockdep_is_held(&local->sta_mtx)); 129 } 130 return sta; 131 }
+62 -5
net/x25/af_x25.c
··· 83 }; 84 #endif 85 86 int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 87 struct x25_address *calling_addr) 88 { ··· 589 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 590 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 591 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 592 - x25->facilities.throughput = X25_DEFAULT_THROUGHPUT; 593 x25->facilities.reverse = X25_DEFAULT_REVERSE; 594 x25->dte_facilities.calling_len = 0; 595 x25->dte_facilities.called_len = 0; ··· 958 /* 959 * Extract the X.25 addresses and convert them to ASCII strings, 960 * and remove them. 961 */ 962 - addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr); 963 skb_pull(skb, addr_len); 964 965 /* 966 * Get the length of the facilities, skip past them for the moment 967 * get the call user data because this is needed to determine 968 * the correct listener 969 */ 970 len = skb->data[0] + 1; 971 skb_pull(skb,len); 972 973 /* ··· 1461 if (facilities.winsize_in < 1 || 1462 facilities.winsize_in > 127) 1463 break; 1464 - if (facilities.throughput < 0x03 || 1465 - facilities.throughput > 0xDD) 1466 - break; 1467 if (facilities.reverse && 1468 (facilities.reverse & 0x81) != 0x81) 1469 break;
··· 83 }; 84 #endif 85 86 + 87 + int x25_parse_address_block(struct sk_buff *skb, 88 + struct x25_address *called_addr, 89 + struct x25_address *calling_addr) 90 + { 91 + unsigned char len; 92 + int needed; 93 + int rc; 94 + 95 + if (skb->len < 1) { 96 + /* packet has no address block */ 97 + rc = 0; 98 + goto empty; 99 + } 100 + 101 + len = *skb->data; 102 + needed = 1 + (len >> 4) + (len & 0x0f); 103 + 104 + if (skb->len < needed) { 105 + /* packet is too short to hold the addresses it claims 106 + to hold */ 107 + rc = -1; 108 + goto empty; 109 + } 110 + 111 + return x25_addr_ntoa(skb->data, called_addr, calling_addr); 112 + 113 + empty: 114 + *called_addr->x25_addr = 0; 115 + *calling_addr->x25_addr = 0; 116 + 117 + return rc; 118 + } 119 + 120 + 121 int x25_addr_ntoa(unsigned char *p, struct x25_address *called_addr, 122 struct x25_address *calling_addr) 123 { ··· 554 x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; 555 x25->facilities.pacsize_in = X25_DEFAULT_PACKET_SIZE; 556 x25->facilities.pacsize_out = X25_DEFAULT_PACKET_SIZE; 557 + x25->facilities.throughput = 0; /* by default don't negotiate 558 + throughput */ 559 x25->facilities.reverse = X25_DEFAULT_REVERSE; 560 x25->dte_facilities.calling_len = 0; 561 x25->dte_facilities.called_len = 0; ··· 922 /* 923 * Extract the X.25 addresses and convert them to ASCII strings, 924 * and remove them. 925 + * 926 + * Address block is mandatory in call request packets 927 */ 928 + addr_len = x25_parse_address_block(skb, &source_addr, &dest_addr); 929 + if (addr_len <= 0) 930 + goto out_clear_request; 931 skb_pull(skb, addr_len); 932 933 /* 934 * Get the length of the facilities, skip past them for the moment 935 * get the call user data because this is needed to determine 936 * the correct listener 937 + * 938 + * Facilities length is mandatory in call request packets 939 */ 940 + if (skb->len < 1) 941 + goto out_clear_request; 942 len = skb->data[0] + 1; 943 + if (skb->len < len) 944 + goto out_clear_request; 945 skb_pull(skb,len); 946 947 /* ··· 1415 if (facilities.winsize_in < 1 || 1416 facilities.winsize_in > 127) 1417 break; 1418 + if (facilities.throughput) { 1419 + int out = facilities.throughput & 0xf0; 1420 + int in = facilities.throughput & 0x0f; 1421 + if (!out) 1422 + facilities.throughput |= 1423 + X25_DEFAULT_THROUGHPUT << 4; 1424 + else if (out < 0x30 || out > 0xD0) 1425 + break; 1426 + if (!in) 1427 + facilities.throughput |= 1428 + X25_DEFAULT_THROUGHPUT; 1429 + else if (in < 0x03 || in > 0x0D) 1430 + break; 1431 + } 1432 if (facilities.reverse && 1433 (facilities.reverse & 0x81) != 0x81) 1434 break;
+23 -4
net/x25/x25_facilities.c
··· 35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 36 { 37 unsigned char *p = skb->data; 38 - unsigned int len = *p++; 39 40 *vc_fac_mask = 0; 41 ··· 49 dte_facs->called_len = 0; 50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 52 53 while (len > 0) { 54 switch (*p & X25_FAC_CLASS_MASK) { ··· 255 memcpy(new, ours, sizeof(*new)); 256 257 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 258 259 /* 260 * They want reverse charging, we won't accept it. ··· 269 new->reverse = theirs.reverse; 270 271 if (theirs.throughput) { 272 - if (theirs.throughput < ours->throughput) { 273 - SOCK_DEBUG(sk, "X.25: throughput negotiated down\n"); 274 - new->throughput = theirs.throughput; 275 } 276 } 277
··· 35 struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) 36 { 37 unsigned char *p = skb->data; 38 + unsigned int len; 39 40 *vc_fac_mask = 0; 41 ··· 49 dte_facs->called_len = 0; 50 memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); 51 memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); 52 + 53 + if (skb->len < 1) 54 + return 0; 55 + 56 + len = *p++; 57 + 58 + if (len >= skb->len) 59 + return -1; 60 61 while (len > 0) { 62 switch (*p & X25_FAC_CLASS_MASK) { ··· 247 memcpy(new, ours, sizeof(*new)); 248 249 len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask); 250 + if (len < 0) 251 + return len; 252 253 /* 254 * They want reverse charging, we won't accept it. ··· 259 new->reverse = theirs.reverse; 260 261 if (theirs.throughput) { 262 + int theirs_in = theirs.throughput & 0x0f; 263 + int theirs_out = theirs.throughput & 0xf0; 264 + int ours_in = ours->throughput & 0x0f; 265 + int ours_out = ours->throughput & 0xf0; 266 + if (!ours_in || theirs_in < ours_in) { 267 + SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n"); 268 + new->throughput = (new->throughput & 0xf0) | theirs_in; 269 + } 270 + if (!ours_out || theirs_out < ours_out) { 271 + SOCK_DEBUG(sk, 272 + "X.25: outbound throughput negotiated\n"); 273 + new->throughput = (new->throughput & 0x0f) | theirs_out; 274 } 275 } 276
+11 -4
net/x25/x25_in.c
··· 90 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) 91 { 92 struct x25_address source_addr, dest_addr; 93 94 switch (frametype) { 95 case X25_CALL_ACCEPTED: { ··· 108 * Parse the data in the frame. 109 */ 110 skb_pull(skb, X25_STD_MIN_LEN); 111 - skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr)); 112 - skb_pull(skb, 113 - x25_parse_facilities(skb, &x25->facilities, 114 &x25->dte_facilities, 115 - &x25->vc_facil_mask)); 116 /* 117 * Copy any Call User Data. 118 */
··· 90 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) 91 { 92 struct x25_address source_addr, dest_addr; 93 + int len; 94 95 switch (frametype) { 96 case X25_CALL_ACCEPTED: { ··· 107 * Parse the data in the frame. 108 */ 109 skb_pull(skb, X25_STD_MIN_LEN); 110 + 111 + len = x25_parse_address_block(skb, &source_addr, 112 + &dest_addr); 113 + if (len > 0) 114 + skb_pull(skb, len); 115 + 116 + len = x25_parse_facilities(skb, &x25->facilities, 117 &x25->dte_facilities, 118 + &x25->vc_facil_mask); 119 + if (len > 0) 120 + skb_pull(skb, len); 121 /* 122 * Copy any Call User Data. 123 */