Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (66 commits)
can-bcm: fix minor heap overflow
gianfar: Do not call device_set_wakeup_enable() under a spinlock
ipv6: Warn users if maximum number of routes is reached.
docs: Add neigh/gc_thresh3 and route/max_size documentation.
axnet_cs: fix resume problem for some Ax88790 chip
ipv6: addrconf: don't remove address state on ifdown if the address is being kept
tcp: Don't change unlocked socket state in tcp_v4_err().
x25: Prevent crashing when parsing bad X.25 facilities
cxgb4vf: add call to Firmware to reset VF State.
cxgb4vf: Fail open if link_start() fails.
cxgb4vf: flesh out PCI Device ID Table ...
cxgb4vf: fix some errors in Gather List to skb conversion
cxgb4vf: fix bug in Generic Receive Offload
cxgb4vf: don't implement trivial (and incorrect) ndo_select_queue()
ixgbe: Look inside vlan when determining offload protocol.
bnx2x: Look inside vlan when determining checksum proto.
vlan: Add function to retrieve EtherType from vlan packets.
virtio-net: init link state correctly
ucc_geth: Fix deadlock
ucc_geth: Do not bring the whole IF down when TX failure.
...

+507 -331
+9
Documentation/networking/ip-sysctl.txt
··· 20 20 min_pmtu - INTEGER 21 21 default 562 - minimum discovered Path MTU 22 22 23 + route/max_size - INTEGER 24 + Maximum number of routes allowed in the kernel. Increase 25 + this when using large numbers of interfaces and/or routes. 26 + 27 + neigh/default/gc_thresh3 - INTEGER 28 + Maximum number of neighbor entries allowed. Increase this 29 + when using large numbers of interfaces and when communicating 30 + with large numbers of directly-connected peers. 31 + 23 32 mtu_expires - INTEGER 24 33 Time, in seconds, that cached PMTU information is kept. 25 34
+1
drivers/atm/solos-attrlist.c
··· 1 1 SOLOS_ATTR_RO(DriverVersion) 2 2 SOLOS_ATTR_RO(APIVersion) 3 3 SOLOS_ATTR_RO(FirmwareVersion) 4 + SOLOS_ATTR_RO(Version) 4 5 // SOLOS_ATTR_RO(DspVersion) 5 6 // SOLOS_ATTR_RO(CommonHandshake) 6 7 SOLOS_ATTR_RO(Connected)
+8
drivers/atm/solos-pci.c
··· 1161 1161 dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", 1162 1162 major_ver, minor_ver, fpga_ver); 1163 1163 1164 + if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade || 1165 + db_fpga_upgrade || db_firmware_upgrade)) { 1166 + dev_warn(&dev->dev, 1167 + "FPGA too old; cannot upgrade flash. Use JTAG.\n"); 1168 + fpga_upgrade = firmware_upgrade = 0; 1169 + db_fpga_upgrade = db_firmware_upgrade = 0; 1170 + } 1171 + 1164 1172 if (card->fpga_version >= DMA_SUPPORTED){ 1165 1173 card->using_dma = 1; 1166 1174 } else {
+5
drivers/bluetooth/btusb.c
··· 68 68 /* Apple MacBookPro6,2 */ 69 69 { USB_DEVICE(0x05ac, 0x8218) }, 70 70 71 + /* Apple MacBookAir3,1, MacBookAir3,2 */ 72 + { USB_DEVICE(0x05ac, 0x821b) }, 73 + 71 74 /* AVM BlueFRITZ! USB v2.0 */ 72 75 { USB_DEVICE(0x057c, 0x3800) }, 73 76 ··· 1031 1028 } 1032 1029 1033 1030 usb_set_intfdata(intf, data); 1031 + 1032 + usb_enable_autosuspend(interface_to_usbdev(intf)); 1034 1033 1035 1034 return 0; 1036 1035 }
+1 -1
drivers/net/bnx2x/bnx2x_cmn.c
··· 1680 1680 rc = XMIT_PLAIN; 1681 1681 1682 1682 else { 1683 - if (skb->protocol == htons(ETH_P_IPV6)) { 1683 + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { 1684 1684 rc = XMIT_CSUM_V6; 1685 1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1686 1686 rc |= XMIT_CSUM_TCP;
+27 -15
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 753 753 if (err) 754 754 return err; 755 755 set_bit(pi->port_id, &adapter->open_device_map); 756 - link_start(dev); 756 + err = link_start(dev); 757 + if (err) 758 + return err; 757 759 netif_tx_start_all_queues(dev); 758 760 return 0; 759 761 } ··· 1103 1101 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1104 1102 pi->xact_addr_filt = ret; 1105 1103 return 0; 1106 - } 1107 - 1108 - /* 1109 - * Return a TX Queue on which to send the specified skb. 1110 - */ 1111 - static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) 1112 - { 1113 - /* 1114 - * XXX For now just use the default hash but we probably want to 1115 - * XXX look at other possibilities ... 1116 - */ 1117 - return skb_tx_hash(dev, skb); 1118 1104 } 1119 1105 1120 1106 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 2065 2075 } 2066 2076 2067 2077 /* 2078 + * Some environments do not properly handle PCIE FLRs -- e.g. in Linux 2079 + * 2.6.31 and later we can't call pci_reset_function() in order to 2080 + * issue an FLR because of a self- deadlock on the device semaphore. 2081 + * Meanwhile, the OS infrastructure doesn't issue FLRs in all the 2082 + * cases where they're needed -- for instance, some versions of KVM 2083 + * fail to reset "Assigned Devices" when the VM reboots. Therefore we 2084 + * use the firmware based reset in order to reset any per function 2085 + * state. 2086 + */ 2087 + err = t4vf_fw_reset(adapter); 2088 + if (err < 0) { 2089 + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); 2090 + return err; 2091 + } 2092 + 2093 + /* 2068 2094 * Grab basic operational parameters. These will predominantly have 2069 2095 * been set up by the Physical Function Driver or will be hard coded 2070 2096 * into the adapter. We just have to live with them ... Note that ··· 2423 2417 .ndo_get_stats = cxgb4vf_get_stats, 2424 2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2425 2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2426 - .ndo_select_queue = cxgb4vf_select_queue, 2427 2420 .ndo_validate_addr = eth_validate_addr, 2428 2421 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2429 2422 .ndo_change_mtu = cxgb4vf_change_mtu, ··· 2629 2624 netdev->do_ioctl = cxgb4vf_do_ioctl; 2630 2625 netdev->change_mtu = cxgb4vf_change_mtu; 2631 2626 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2632 - netdev->select_queue = cxgb4vf_select_queue; 2633 2627 #ifdef CONFIG_NET_POLL_CONTROLLER 2634 2628 netdev->poll_controller = cxgb4vf_poll_controller; 2635 2629 #endif ··· 2847 2843 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2848 2844 CH_DEVICE(0x4801, 0), /* T420-cr */ 2849 2845 CH_DEVICE(0x4802, 0), /* T422-cr */ 2846 + CH_DEVICE(0x4803, 0), /* T440-cr */ 2847 + CH_DEVICE(0x4804, 0), /* T420-bch */ 2848 + CH_DEVICE(0x4805, 0), /* T440-bch */ 2849 + CH_DEVICE(0x4806, 0), /* T460-ch */ 2850 + CH_DEVICE(0x4807, 0), /* T420-so */ 2851 + CH_DEVICE(0x4808, 0), /* T420-cx */ 2852 + CH_DEVICE(0x4809, 0), /* T420-bt */ 2853 + CH_DEVICE(0x480a, 0), /* T404-bt */ 2850 2854 { 0, } 2851 2855 }; 2852 2856
+75 -47
drivers/net/cxgb4vf/sge.c
··· 154 154 */ 155 155 RX_COPY_THRES = 256, 156 156 RX_PULL_LEN = 128, 157 - }; 158 157 159 - /* 160 - * Can't define this in the above enum because PKTSHIFT isn't a constant in 161 - * the VF Driver ... 162 - */ 163 - #define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 158 + /* 159 + * Main body length for sk_buffs used for RX Ethernet packets with 160 + * fragments. Should be >= RX_PULL_LEN but possibly bigger to give 161 + * pskb_may_pull() some room. 162 + */ 163 + RX_SKB_LEN = 512, 164 + }; 164 165 165 166 /* 166 167 * Software state per TX descriptor. ··· 1356 1355 } 1357 1356 1358 1357 /** 1358 + * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list 1359 + * @gl: the gather list 1360 + * @skb_len: size of sk_buff main body if it carries fragments 1361 + * @pull_len: amount of data to move to the sk_buff's main body 1362 + * 1363 + * Builds an sk_buff from the given packet gather list. Returns the 1364 + * sk_buff or %NULL if sk_buff allocation failed. 1365 + */ 1366 + struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, 1367 + unsigned int skb_len, unsigned int pull_len) 1368 + { 1369 + struct sk_buff *skb; 1370 + struct skb_shared_info *ssi; 1371 + 1372 + /* 1373 + * If the ingress packet is small enough, allocate an skb large enough 1374 + * for all of the data and copy it inline. Otherwise, allocate an skb 1375 + * with enough room to pull in the header and reference the rest of 1376 + * the data via the skb fragment list. 1377 + * 1378 + * Below we rely on RX_COPY_THRES being less than the smallest Rx 1379 + * buff! size, which is expected since buffers are at least 1380 + * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one 1381 + * fragment. 1382 + */ 1383 + if (gl->tot_len <= RX_COPY_THRES) { 1384 + /* small packets have only one fragment */ 1385 + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); 1386 + if (unlikely(!skb)) 1387 + goto out; 1388 + __skb_put(skb, gl->tot_len); 1389 + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); 1390 + } else { 1391 + skb = alloc_skb(skb_len, GFP_ATOMIC); 1392 + if (unlikely(!skb)) 1393 + goto out; 1394 + __skb_put(skb, pull_len); 1395 + skb_copy_to_linear_data(skb, gl->va, pull_len); 1396 + 1397 + ssi = skb_shinfo(skb); 1398 + ssi->frags[0].page = gl->frags[0].page; 1399 + ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; 1400 + ssi->frags[0].size = gl->frags[0].size - pull_len; 1401 + if (gl->nfrags > 1) 1402 + memcpy(&ssi->frags[1], &gl->frags[1], 1403 + (gl->nfrags-1) * sizeof(skb_frag_t)); 1404 + ssi->nr_frags = gl->nfrags; 1405 + 1406 + skb->len = gl->tot_len; 1407 + skb->data_len = skb->len - pull_len; 1408 + skb->truesize += skb->data_len; 1409 + 1410 + /* Get a reference for the last page, we don't own it */ 1411 + get_page(gl->frags[gl->nfrags - 1].page); 1412 + } 1413 + 1414 + out: 1415 + return skb; 1416 + } 1417 + 1418 + /** 1359 1419 * t4vf_pktgl_free - free a packet gather list 1360 1420 * @gl: the gather list 1361 1421 * ··· 1525 1463 { 1526 1464 struct sk_buff *skb; 1527 1465 struct port_info *pi; 1528 - struct skb_shared_info *ssi; 1529 1466 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1530 1467 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1531 - unsigned int len = be16_to_cpu(pkt->len); 1532 1468 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1533 1469 1534 1470 /* ··· 1541 1481 } 1542 1482 1543 1483 /* 1544 - * If the ingress packet is small enough, allocate an skb large enough 1545 - * for all of the data and copy it inline. Otherwise, allocate an skb 1546 - * with enough room to pull in the header and reference the rest of 1547 - * the data via the skb fragment list. 1484 + * Convert the Packet Gather List into an skb. 1548 1485 */ 1549 - if (len <= RX_COPY_THRES) { 1550 - /* small packets have only one fragment */ 1551 - skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1552 - if (!skb) 1553 - goto nomem; 1554 - __skb_put(skb, gl->frags[0].size); 1555 - skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); 1556 - } else { 1557 - skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); 1558 - if (!skb) 1559 - goto nomem; 1560 - __skb_put(skb, RX_PKT_PULL_LEN); 1561 - skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); 1562 - 1563 - ssi = skb_shinfo(skb); 1564 - ssi->frags[0].page = gl->frags[0].page; 1565 - ssi->frags[0].page_offset = (gl->frags[0].page_offset + 1566 - RX_PKT_PULL_LEN); 1567 - ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; 1568 - if (gl->nfrags > 1) 1569 - memcpy(&ssi->frags[1], &gl->frags[1], 1570 - (gl->nfrags-1) * sizeof(skb_frag_t)); 1571 - ssi->nr_frags = gl->nfrags; 1572 - skb->len = len + PKTSHIFT; 1573 - skb->data_len = skb->len - RX_PKT_PULL_LEN; 1574 - skb->truesize += skb->data_len; 1575 - 1576 - /* Get a reference for the last page, we don't own it */ 1577 - get_page(gl->frags[gl->nfrags - 1].page); 1486 + skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); 1487 + if (unlikely(!skb)) { 1488 + t4vf_pktgl_free(gl); 1489 + rxq->stats.rx_drops++; 1490 + return 0; 1578 1491 } 1579 - 1580 1492 __skb_pull(skb, PKTSHIFT); 1581 1493 skb->protocol = eth_type_trans(skb, rspq->netdev); 1582 1494 skb_record_rx_queue(skb, rspq->idx); ··· 1580 1548 } else 1581 1549 netif_receive_skb(skb); 1582 1550 1583 - return 0; 1584 - 1585 - nomem: 1586 - t4vf_pktgl_free(gl); 1587 - rxq->stats.rx_drops++; 1588 1551 return 0; 1589 1552 } 1590 1553 ··· 1706 1679 } 1707 1680 len = RSPD_LEN(len); 1708 1681 } 1682 + gl.tot_len = len; 1709 1683 1710 1684 /* 1711 1685 * Gather packet fragments.
+1
drivers/net/cxgb4vf/t4vf_common.h
··· 235 235 int __devinit t4vf_wait_dev_ready(struct adapter *); 236 236 int __devinit t4vf_port_init(struct adapter *, int); 237 237 238 + int t4vf_fw_reset(struct adapter *); 238 239 int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239 240 int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240 241
+19
drivers/net/cxgb4vf/t4vf_hw.c
··· 326 326 } 327 327 328 328 /** 329 + * t4vf_fw_reset - issue a reset to FW 330 + * @adapter: the adapter 331 + * 332 + * Issues a reset command to FW. For a Physical Function this would 333 + * result in the Firmware reseting all of its state. For a Virtual 334 + * Function this just resets the state associated with the VF. 335 + */ 336 + int t4vf_fw_reset(struct adapter *adapter) 337 + { 338 + struct fw_reset_cmd cmd; 339 + 340 + memset(&cmd, 0, sizeof(cmd)); 341 + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | 342 + FW_CMD_WRITE); 343 + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); 344 + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); 345 + } 346 + 347 + /** 329 348 * t4vf_query_params - query FW or device parameters 330 349 * @adapter: the adapter 331 350 * @nparams: the number of parameters
+3 -2
drivers/net/gianfar_ethtool.c
··· 635 635 if (wol->wolopts & ~WAKE_MAGIC) 636 636 return -EINVAL; 637 637 638 + device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 639 + 638 640 spin_lock_irqsave(&priv->bflock, flags); 639 - priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 640 - device_set_wakeup_enable(&dev->dev, priv->wol_en); 641 + priv->wol_en = !!device_may_wakeup(&dev->dev); 641 642 spin_unlock_irqrestore(&priv->bflock, flags); 642 643 643 644 return 0;
+32 -28
drivers/net/ixgbe/ixgbe_main.c
··· 764 764 #ifdef IXGBE_FCOE 765 765 /* adjust for FCoE Sequence Offload */ 766 766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 767 - && (skb->protocol == htons(ETH_P_FCOE)) && 768 - skb_is_gso(skb)) { 767 + && skb_is_gso(skb) 768 + && vlan_get_protocol(skb) == 769 + htons(ETH_P_FCOE)) { 769 770 hlen = skb_transport_offset(skb) + 770 771 sizeof(struct fc_frame_header) + 771 772 sizeof(struct fcoe_crc_eof); ··· 5824 5823 5825 5824 static int ixgbe_tso(struct ixgbe_adapter *adapter, 5826 5825 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5827 - u32 tx_flags, u8 *hdr_len) 5826 + u32 tx_flags, u8 *hdr_len, __be16 protocol) 5828 5827 { 5829 5828 struct ixgbe_adv_tx_context_desc *context_desc; 5830 5829 unsigned int i; ··· 5842 5841 l4len = tcp_hdrlen(skb); 5843 5842 *hdr_len += l4len; 5844 5843 5845 - if (skb->protocol == htons(ETH_P_IP)) { 5844 + if (protocol == htons(ETH_P_IP)) { 5846 5845 struct iphdr *iph = ip_hdr(skb); 5847 5846 iph->tot_len = 0; 5848 5847 iph->check = 0; ··· 5881 5880 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5882 5881 IXGBE_ADVTXD_DTYP_CTXT); 5883 5882 5884 - if (skb->protocol == htons(ETH_P_IP)) 5883 + if (protocol == htons(ETH_P_IP)) 5885 5884 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5886 5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 5887 5886 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); ··· 5907 5906 return false; 5908 5907 } 5909 5908 5910 - static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 5909 + static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, 5910 + __be16 protocol) 5911 5911 { 5912 5912 u32 rtn = 0; 5913 - __be16 protocol; 5914 - 5915 - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) 5916 - protocol = ((const struct vlan_ethhdr *)skb->data)-> 5917 - h_vlan_encapsulated_proto; 5918 - else 5919 - protocol = skb->protocol; 5920 5913 5921 5914 switch (protocol) { 5922 5915 case cpu_to_be16(ETH_P_IP): ··· 5938 5943 default: 5939 5944 if (unlikely(net_ratelimit())) 5940 5945 e_warn(probe, "partial checksum but proto=%x!\n", 5941 - skb->protocol); 5946 + protocol); 5942 5947 break; 5943 5948 } 5944 5949 ··· 5947 5952 5948 5953 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5949 5954 struct ixgbe_ring *tx_ring, 5950 - struct sk_buff *skb, u32 tx_flags) 5955 + struct sk_buff *skb, u32 tx_flags, 5956 + __be16 protocol) 5951 5957 { 5952 5958 struct ixgbe_adv_tx_context_desc *context_desc; 5953 5959 unsigned int i; ··· 5977 5981 IXGBE_ADVTXD_DTYP_CTXT); 5978 5982 5979 5983 if (skb->ip_summed == CHECKSUM_PARTIAL) 5980 - type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 5984 + type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol); 5981 5985 5982 5986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5983 5987 /* use index zero for tx checksum offload */ ··· 6175 6179 } 6176 6180 6177 6181 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6178 - int queue, u32 tx_flags) 6182 + int queue, u32 tx_flags, __be16 protocol) 6179 6183 { 6180 6184 struct ixgbe_atr_input atr_input; 6181 6185 struct tcphdr *th; ··· 6186 6190 u8 l4type = 0; 6187 6191 6188 6192 /* Right now, we support IPv4 only */ 6189 - if (skb->protocol != htons(ETH_P_IP)) 6193 + if (protocol != htons(ETH_P_IP)) 6190 6194 return; 6191 6195 /* check if we're UDP or TCP */ 6192 6196 if (iph->protocol == IPPROTO_TCP) { ··· 6253 6257 { 6254 6258 struct ixgbe_adapter *adapter = netdev_priv(dev); 6255 6259 int txq = smp_processor_id(); 6256 - 6257 6260 #ifdef IXGBE_FCOE 6258 - if ((skb->protocol == htons(ETH_P_FCOE)) || 6259 - (skb->protocol == htons(ETH_P_FIP))) { 6261 + __be16 protocol; 6262 + 6263 + protocol = vlan_get_protocol(skb); 6264 + 6265 + if ((protocol == htons(ETH_P_FCOE)) || 6266 + (protocol == htons(ETH_P_FIP))) { 6260 6267 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6261 6268 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6262 6269 txq += adapter->ring_feature[RING_F_FCOE].mask; ··· 6302 6303 int tso; 6303 6304 int count = 0; 6304 6305 unsigned int f; 6306 + __be16 protocol; 6307 + 6308 + protocol = vlan_get_protocol(skb); 6305 6309 6306 6310 if (vlan_tx_tag_present(skb)) { 6307 6311 tx_flags |= vlan_tx_tag_get(skb); ··· 6325 6323 /* for FCoE with DCB, we force the priority to what 6326 6324 * was specified by the switch */ 6327 6325 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6328 - (skb->protocol == htons(ETH_P_FCOE) || 6329 - skb->protocol == htons(ETH_P_FIP))) { 6326 + (protocol == htons(ETH_P_FCOE) || 6327 + protocol == htons(ETH_P_FIP))) { 6330 6328 #ifdef CONFIG_IXGBE_DCB 6331 6329 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6332 6330 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK ··· 6336 6334 } 6337 6335 #endif 6338 6336 /* flag for FCoE offloads */ 6339 - if (skb->protocol == htons(ETH_P_FCOE)) 6337 + if (protocol == htons(ETH_P_FCOE)) 6340 6338 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6341 6339 } 6342 6340 #endif ··· 6370 6368 tx_flags |= IXGBE_TX_FLAGS_FSO; 6371 6369 #endif /* IXGBE_FCOE */ 6372 6370 } else { 6373 - if (skb->protocol == htons(ETH_P_IP)) 6371 + if (protocol == htons(ETH_P_IP)) 6374 6372 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6375 - tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6373 + tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, 6374 + protocol); 6376 6375 if (tso < 0) { 6377 6376 dev_kfree_skb_any(skb); 6378 6377 return NETDEV_TX_OK; ··· 6381 6378 6382 6379 if (tso) 6383 6380 tx_flags |= IXGBE_TX_FLAGS_TSO; 6384 - else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6381 + else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, 6382 + protocol) && 6385 6383 (skb->ip_summed == CHECKSUM_PARTIAL)) 6386 6384 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6387 6385 } ··· 6396 6392 test_bit(__IXGBE_FDIR_INIT_DONE, 6397 6393 &tx_ring->reinit_state)) { 6398 6394 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6399 - tx_flags); 6395 + tx_flags, protocol); 6400 6396 tx_ring->atr_count = 0; 6401 6397 } 6402 6398 }
+20 -10
drivers/net/pcmcia/axnet_cs.c
··· 111 111 112 112 typedef struct axnet_dev_t { 113 113 struct pcmcia_device *p_dev; 114 - caddr_t base; 115 - struct timer_list watchdog; 116 - int stale, fast_poll; 117 - u_short link_status; 118 - u_char duplex_flag; 119 - int phy_id; 120 - int flags; 114 + caddr_t base; 115 + struct timer_list watchdog; 116 + int stale, fast_poll; 117 + u_short link_status; 118 + u_char duplex_flag; 119 + int phy_id; 120 + int flags; 121 + int active_low; 121 122 } axnet_dev_t; 122 123 123 124 static inline axnet_dev_t *PRIV(struct net_device *dev) ··· 323 322 if (info->flags & IS_AX88790) 324 323 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ 325 324 325 + info->active_low = 0; 326 + 326 327 for (i = 0; i < 32; i++) { 327 328 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 328 329 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); ··· 332 329 if ((j != 0) && (j != 0xffff)) break; 333 330 } 334 331 335 - /* Maybe PHY is in power down mode. (PPD_SET = 1) 336 - Bit 2 of CCSR is active low. */ 337 332 if (i == 32) { 333 + /* Maybe PHY is in power down mode. (PPD_SET = 1) 334 + Bit 2 of CCSR is active low. */ 338 335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 339 336 for (i = 0; i < 32; i++) { 340 337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 341 338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 342 339 if (j == j2) continue; 343 - if ((j != 0) && (j != 0xffff)) break; 340 + if ((j != 0) && (j != 0xffff)) { 341 + info->active_low = 1; 342 + break; 343 + } 344 344 } 345 345 } 346 346 ··· 389 383 static int axnet_resume(struct pcmcia_device *link) 390 384 { 391 385 struct net_device *dev = link->priv; 386 + axnet_dev_t *info = PRIV(dev); 392 387 393 388 if (link->open) { 389 + if (info->active_low == 1) 390 + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 391 + 394 392 axnet_reset_8390(dev); 395 393 AX88190_init(dev, 1); 396 394 netif_device_attach(dev);
+5 -4
drivers/net/r8169.c
··· 846 846 else 847 847 tp->features &= ~RTL_FEATURE_WOL; 848 848 __rtl8169_set_wol(tp, wol->wolopts); 849 - device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 850 - 851 849 spin_unlock_irq(&tp->lock); 850 + 851 + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 852 852 853 853 return 0; 854 854 } ··· 2931 2931 .hw_start = rtl_hw_start_8168, 2932 2932 .region = 2, 2933 2933 .align = 8, 2934 - .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2934 + .intr_event = SYSErr | LinkChg | RxOverflow | 2935 2935 TxErr | TxOK | RxOK | RxErr, 2936 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2937 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, ··· 4588 4588 } 4589 4589 4590 4590 /* Work around for rx fifo overflow */ 4591 - if (unlikely(status & RxFIFOOver)) { 4591 + if (unlikely(status & RxFIFOOver) && 4592 + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4592 4593 netif_stop_queue(dev); 4593 4594 rtl8169_tx_timeout(dev); 4594 4595 break;
-1
drivers/net/skge.c
··· 3858 3858 3859 3859 /* device is off until link detection */ 3860 3860 netif_carrier_off(dev); 3861 - netif_stop_queue(dev); 3862 3861 3863 3862 return dev; 3864 3863 }
+16 -9
drivers/net/ucc_geth.c
··· 2050 2050 2051 2051 ugeth_vdbg("%s: IN", __func__); 2052 2052 2053 + /* 2054 + * Tell the kernel the link is down. 2055 + * Must be done before disabling the controller 2056 + * or deadlock may happen. 2057 + */ 2058 + phy_stop(phydev); 2059 + 2053 2060 /* Disable the controller */ 2054 2061 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2055 - 2056 - /* Tell the kernel the link is down */ 2057 - phy_stop(phydev); 2058 2062 2059 2063 /* Mask all interrupts */ 2060 2064 out_be32(ugeth->uccf->p_uccm, 0x00000000); ··· 2068 2064 2069 2065 /* Disable Rx and Tx */ 2070 2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2071 - 2072 - phy_disconnect(ugeth->phydev); 2073 - ugeth->phydev = NULL; 2074 2067 2075 2068 ucc_geth_memclean(ugeth); 2076 2069 } ··· 3551 3550 3552 3551 napi_disable(&ugeth->napi); 3553 3552 3553 + cancel_work_sync(&ugeth->timeout_work); 3554 3554 ucc_geth_stop(ugeth); 3555 + phy_disconnect(ugeth->phydev); 3556 + ugeth->phydev = NULL; 3555 3557 3556 3558 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3557 3559 ··· 3583 3579 * Must reset MAC *and* PHY. This is done by reopening 3584 3580 * the device. 3585 3581 */ 3586 - ucc_geth_close(dev); 3587 - ucc_geth_open(dev); 3582 + netif_tx_stop_all_queues(dev); 3583 + ucc_geth_stop(ugeth); 3584 + ucc_geth_init_mac(ugeth); 3585 + /* Must start PHY here */ 3586 + phy_start(ugeth->phydev); 3587 + netif_tx_start_all_queues(dev); 3588 3588 } 3589 3589 3590 3590 netif_tx_schedule_all(dev); ··· 3602 3594 { 3603 3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3604 3596 3605 - netif_carrier_off(dev); 3606 3597 schedule_work(&ugeth->timeout_work); 3607 3598 } 3608 3599
+9 -3
drivers/net/virtio_net.c
··· 986 986 goto unregister; 987 987 } 988 988 989 - vi->status = VIRTIO_NET_S_LINK_UP; 990 - virtnet_update_status(vi); 991 - netif_carrier_on(dev); 989 + /* Assume link up if device can't report link status, 990 + otherwise get link status from config. */ 991 + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 992 + netif_carrier_off(dev); 993 + virtnet_update_status(vi); 994 + } else { 995 + vi->status = VIRTIO_NET_S_LINK_UP; 996 + netif_carrier_on(dev); 997 + } 992 998 993 999 pr_debug("virtnet: registered device %s\n", dev->name); 994 1000 return 0;
+3
drivers/net/wireless/ath/ath9k/ar9002_hw.c
··· 410 410 val &= ~(AR_WA_BIT6 | AR_WA_BIT7); 411 411 } 412 412 413 + if (AR_SREV_9280(ah)) 414 + val |= AR_WA_BIT22; 415 + 413 416 if (AR_SREV_9285E_20(ah)) 414 417 val |= AR_WA_BIT23; 415 418
+1
drivers/net/wireless/ath/ath9k/ath9k.h
··· 675 675 } 676 676 677 677 extern struct ieee80211_ops ath9k_ops; 678 + extern struct pm_qos_request_list ath9k_pm_qos_req; 678 679 extern int modparam_nohwcrypt; 679 680 extern int led_blink; 680 681
+15 -16
drivers/net/wireless/ath/ath9k/hif_usb.c
··· 35 35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ 36 36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 37 37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 38 + { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ 38 39 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 39 40 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ 40 41 { }, ··· 541 540 return; 542 541 } 543 542 544 - usb_fill_int_urb(urb, hif_dev->udev, 543 + usb_fill_bulk_urb(urb, hif_dev->udev, 545 544 usb_rcvbulkpipe(hif_dev->udev, 546 545 USB_REG_IN_PIPE), 547 546 nskb->data, MAX_REG_IN_BUF_SIZE, 548 - ath9k_hif_usb_reg_in_cb, nskb, 1); 547 + ath9k_hif_usb_reg_in_cb, nskb); 549 548 550 549 ret = usb_submit_urb(urb, GFP_ATOMIC); 551 550 if (ret) { ··· 721 720 if (!skb) 722 721 goto err; 723 722 724 - usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, 723 + usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev, 725 724 usb_rcvbulkpipe(hif_dev->udev, 726 725 USB_REG_IN_PIPE), 727 726 skb->data, MAX_REG_IN_BUF_SIZE, 728 - ath9k_hif_usb_reg_in_cb, skb, 1); 727 + ath9k_hif_usb_reg_in_cb, skb); 729 728 730 729 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) 731 730 goto err; ··· 844 843 goto err_fw_req; 845 844 } 846 845 847 - /* Alloc URBs */ 848 - ret = ath9k_hif_usb_alloc_urbs(hif_dev); 849 - if (ret) { 850 - dev_err(&hif_dev->udev->dev, 851 - "ath9k_htc: Unable to allocate URBs\n"); 852 - goto err_urb; 853 - } 854 - 855 846 /* Download firmware */ 856 847 ret = ath9k_hif_usb_download_fw(hif_dev); 857 848 if (ret) { ··· 859 866 */ 860 867 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { 861 868 endp = &alt->endpoint[idx].desc; 862 - if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 863 - == 0x04) && 864 - ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 865 - == USB_ENDPOINT_XFER_INT)) { 869 + if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 870 + == USB_ENDPOINT_XFER_INT) { 866 871 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; 867 872 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; 868 873 endp->bInterval = 0; 869 874 } 875 + } 876 + 877 + /* Alloc URBs */ 878 + ret = ath9k_hif_usb_alloc_urbs(hif_dev); 879 + if (ret) { 880 + dev_err(&hif_dev->udev->dev, 881 + "ath9k_htc: Unable to allocate URBs\n"); 882 + goto err_urb; 870 883 } 871 884 872 885 return 0;
+13 -2
drivers/net/wireless/ath/ath9k/hw.c
··· 484 484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 485 485 "Failed allocating banks for " 486 486 "external radio\n"); 487 + ath9k_hw_rf_free_ext_banks(ah); 487 488 return ecode; 488 489 } 489 490 ··· 953 952 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 954 953 break; 955 954 case NL80211_IFTYPE_STATION: 956 - case NL80211_IFTYPE_MONITOR: 957 955 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 956 + break; 957 + default: 958 + if (ah->is_monitoring) 959 + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 958 960 break; 959 961 } 960 962 } ··· 1638 1634 1639 1635 switch (ah->opmode) { 1640 1636 case NL80211_IFTYPE_STATION: 1641 - case NL80211_IFTYPE_MONITOR: 1642 1637 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 1643 1638 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); 1644 1639 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); ··· 1666 1663 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1667 1664 break; 1668 1665 default: 1666 + if (ah->is_monitoring) { 1667 + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, 1668 + TU_TO_USEC(next_beacon)); 1669 + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); 1670 + REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); 1671 + flags |= AR_TBTT_TIMER_EN; 1672 + break; 1673 + } 1669 1674 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, 1670 1675 "%s: unsupported opmode: %d\n", 1671 1676 __func__, ah->opmode);
+1
drivers/net/wireless/ath/ath9k/hw.h
··· 622 622 623 623 bool sw_mgmt_crypto; 624 624 bool is_pciexpress; 625 + bool is_monitoring; 625 626 bool need_an_top2_fixup; 626 627 u16 tx_trig_level; 627 628
+8
drivers/net/wireless/ath/ath9k/init.c
··· 15 15 */ 16 16 17 17 #include <linux/slab.h> 18 + #include <linux/pm_qos_params.h> 18 19 19 20 #include "ath9k.h" 20 21 ··· 179 178 .read = ath9k_ioread32, 180 179 .write = ath9k_iowrite32, 181 180 }; 181 + 182 + struct pm_qos_request_list ath9k_pm_qos_req; 182 183 183 184 /**************************/ 184 185 /* Initialization */ ··· 759 756 ath_init_leds(sc); 760 757 ath_start_rfkill_poll(sc); 761 758 759 + pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 760 + PM_QOS_DEFAULT_VALUE); 761 + 762 762 return 0; 763 763 764 764 error_world: ··· 816 810 int i = 0; 817 811 818 812 ath9k_ps_wakeup(sc); 813 + 814 + pm_qos_remove_request(&ath9k_pm_qos_req); 819 815 820 816 wiphy_rfkill_stop_polling(sc->hw->wiphy); 821 817 ath_deinit_leds(sc);
+21 -8
drivers/net/wireless/ath/ath9k/main.c
··· 15 15 */ 16 16 17 17 #include <linux/nl80211.h> 18 + #include <linux/pm_qos_params.h> 18 19 #include "ath9k.h" 19 20 #include "btcoex.h" 20 21 ··· 94 93 { 95 94 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 96 95 unsigned long flags; 96 + enum ath9k_power_mode power_mode; 97 97 98 98 spin_lock_irqsave(&sc->sc_pm_lock, flags); 99 99 if (++sc->ps_usecount != 1) 100 100 goto unlock; 101 101 102 + power_mode = sc->sc_ah->power_mode; 102 103 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 103 104 104 105 /* ··· 108 105 * useful data. Better clear them now so that they don't mess up 109 106 * survey data results. 110 107 */ 111 - spin_lock(&common->cc_lock); 112 - ath_hw_cycle_counters_update(common); 113 - memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 114 - spin_unlock(&common->cc_lock); 108 + if (power_mode != ATH9K_PM_AWAKE) { 109 + spin_lock(&common->cc_lock); 110 + ath_hw_cycle_counters_update(common); 111 + memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 112 + spin_unlock(&common->cc_lock); 113 + } 115 114 116 115 unlock: 117 116 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); ··· 1222 1217 ah->imask |= ATH9K_INT_CST; 1223 1218 1224 1219 sc->sc_flags &= ~SC_OP_INVALID; 1220 + sc->sc_ah->is_monitoring = false; 1225 1221 1226 1222 /* Disable BMISS interrupt when we're not associated */ 1227 1223 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); ··· 1243 1237 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 1244 1238 ath9k_btcoex_timer_resume(sc); 1245 1239 } 1240 + 1241 + pm_qos_update_request(&ath9k_pm_qos_req, 55); 1246 1242 1247 1243 mutex_unlock: 1248 1244 mutex_unlock(&sc->mutex); ··· 1423 1415 1424 1416 sc->sc_flags |= SC_OP_INVALID; 1425 1417 1418 + pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE); 1419 + 1426 1420 mutex_unlock(&sc->mutex); 1427 1421 1428 1422 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); ··· 1503 1493 ath9k_hw_set_interrupts(ah, ah->imask); 1504 1494 1505 1495 if (vif->type == NL80211_IFTYPE_AP || 1506 - vif->type == NL80211_IFTYPE_ADHOC || 1507 - vif->type == NL80211_IFTYPE_MONITOR) { 1496 + vif->type == NL80211_IFTYPE_ADHOC) { 1508 1497 sc->sc_flags |= SC_OP_ANI_RUN; 1509 1498 ath_start_ani(common); 1510 1499 } ··· 1653 1644 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1654 1645 if (conf->flags & IEEE80211_CONF_MONITOR) { 1655 1646 ath_print(common, ATH_DBG_CONFIG, 1656 - "HW opmode set to Monitor mode\n"); 1657 - sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; 1647 + "Monitor mode is enabled\n"); 1648 + sc->sc_ah->is_monitoring = true; 1649 + } else { 1650 + ath_print(common, ATH_DBG_CONFIG, 1651 + "Monitor mode is disabled\n"); 1652 + sc->sc_ah->is_monitoring = false; 1658 1653 } 1659 1654 } 1660 1655
+2 -2
drivers/net/wireless/ath/ath9k/recv.c
··· 441 441 */ 442 442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 443 443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 444 - (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 444 + (sc->sc_ah->is_monitoring)) 445 445 rfilt |= ATH9K_RX_FILTER_PROM; 446 446 447 447 if (sc->rx.rxfilter & FIF_CONTROL) ··· 897 897 * decryption and MIC failures. For monitor mode, 898 898 * we also ignore the CRC error. 899 899 */ 900 - if (ah->opmode == NL80211_IFTYPE_MONITOR) { 900 + if (ah->is_monitoring) { 901 901 if (rx_stats->rs_status & 902 902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 903 903 ATH9K_RXERR_CRC))
+1
drivers/net/wireless/ath/ath9k/reg.h
··· 703 703 #define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ 704 704 #define AR_WA_ANALOG_SHIFT (1 << 20) 705 705 #define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ 706 + #define AR_WA_BIT22 (1 << 22) 706 707 #define AR9285_WA_DEFAULT 0x004a050b 707 708 #define AR9280_WA_DEFAULT 0x0040073b 708 709 #define AR_WA_DEFAULT 0x0000073f
+3 -1
drivers/net/wireless/ath/carl9170/usb.c
··· 82 82 { USB_DEVICE(0x07d1, 0x3c10) }, 83 83 /* D-Link DWA 160 A2 */ 84 84 { USB_DEVICE(0x07d1, 0x3a09) }, 85 + /* D-Link DWA 130 D */ 86 + { USB_DEVICE(0x07d1, 0x3a0f) }, 85 87 /* Netgear WNA1000 */ 86 88 { USB_DEVICE(0x0846, 0x9040) }, 87 - /* Netgear WNDA3100 */ 89 + /* Netgear WNDA3100 (v1) */ 88 90 { USB_DEVICE(0x0846, 0x9010) }, 89 91 /* Netgear WN111 v2 */ 90 92 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
+2 -1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 4000 4000 * "the hard way", rather than using device's scan. 4001 4001 */ 4002 4002 if (iwl3945_mod_params.disable_hw_scan) { 4003 - IWL_ERR(priv, "sw scan support is deprecated\n"); 4003 + dev_printk(KERN_DEBUG, &(pdev->dev), 4004 + "sw scan support is deprecated\n"); 4004 4005 iwl3945_hw_ops.hw_scan = NULL; 4005 4006 } 4006 4007
+3 -2
drivers/net/wireless/libertas/cfg.c
··· 700 700 701 701 if (priv->scan_channel < priv->scan_req->n_channels) { 702 702 cancel_delayed_work(&priv->scan_work); 703 - queue_delayed_work(priv->work_thread, &priv->scan_work, 704 - msecs_to_jiffies(300)); 703 + if (!priv->stopping) 704 + queue_delayed_work(priv->work_thread, &priv->scan_work, 705 + msecs_to_jiffies(300)); 705 706 } 706 707 707 708 /* This is the final data we are about to send */
+1
drivers/net/wireless/libertas/dev.h
··· 36 36 /* CFG80211 */ 37 37 struct wireless_dev *wdev; 38 38 bool wiphy_registered; 39 + bool stopping; 39 40 struct cfg80211_scan_request *scan_req; 40 41 u8 assoc_bss[ETH_ALEN]; 41 42 u8 disassoc_reason;
+7
drivers/net/wireless/libertas/main.c
··· 104 104 lbs_deb_enter(LBS_DEB_NET); 105 105 106 106 spin_lock_irq(&priv->driver_lock); 107 + priv->stopping = false; 107 108 108 109 if (priv->connect_status == LBS_CONNECTED) 109 110 netif_carrier_on(dev); ··· 132 131 lbs_deb_enter(LBS_DEB_NET); 133 132 134 133 spin_lock_irq(&priv->driver_lock); 134 + priv->stopping = true; 135 135 netif_stop_queue(dev); 136 136 spin_unlock_irq(&priv->driver_lock); 137 137 138 138 schedule_work(&priv->mcast_work); 139 + cancel_delayed_work_sync(&priv->scan_work); 140 + if (priv->scan_req) { 141 + cfg80211_scan_done(priv->scan_req, false); 142 + priv->scan_req = NULL; 143 + } 139 144 140 145 lbs_deb_leave(LBS_DEB_NET); 141 146 return 0;
-9
drivers/s390/net/qeth_core.h
··· 440 440 * index of buffer to be filled by driver; state EMPTY or PACKING 441 441 */ 442 442 int next_buf_to_fill; 443 - int sync_iqdio_error; 444 443 /* 445 444 * number of buffers that are currently filled (PRIMED) 446 445 * -> these buffers are hardware-owned ··· 693 694 unsigned char mc_addrlen; 694 695 int is_vmac; 695 696 }; 696 - 697 - struct qeth_skb_data { 698 - __u32 magic; 699 - int count; 700 - }; 701 - 702 - #define QETH_SKB_MAGIC 0x71657468 703 - #define QETH_SIGA_CC2_RETRIES 3 704 697 705 698 struct qeth_rx { 706 699 int b_count;
+9 -46
drivers/s390/net/qeth_core_main.c
··· 877 877 return; 878 878 } 879 879 880 - static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 881 - struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) 880 + static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 881 + struct qeth_qdio_out_buffer *buf) 882 882 { 883 883 int i; 884 884 struct sk_buff *skb; ··· 887 887 if (buf->buffer->element[0].flags & 0x40) 888 888 atomic_dec(&queue->set_pci_flags_count); 889 889 890 - if (!qeth_skip_skb) { 890 + skb = skb_dequeue(&buf->skb_list); 891 + while (skb) { 892 + atomic_dec(&skb->users); 893 + dev_kfree_skb_any(skb); 891 894 skb = skb_dequeue(&buf->skb_list); 892 - while (skb) { 893 - atomic_dec(&skb->users); 894 - dev_kfree_skb_any(skb); 895 - skb = skb_dequeue(&buf->skb_list); 896 - } 897 895 } 898 896 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 899 897 if (buf->buffer->element[i].addr && buf->is_header[i]) ··· 905 907 buf->buffer->element[15].flags = 0; 906 908 buf->next_element_to_fill = 0; 907 909 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 908 - } 909 - 910 - static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 911 - struct qeth_qdio_out_buffer *buf) 912 - { 913 - __qeth_clear_output_buffer(queue, buf, 0); 914 910 } 915 911 916 912 void qeth_clear_qdio_buffers(struct qeth_card *card) ··· 2825 2833 } 2826 2834 } 2827 2835 2828 - queue->sync_iqdio_error = 0; 2829 2836 queue->card->dev->trans_start = jiffies; 2830 2837 if (queue->card->options.performance_stats) { 2831 2838 queue->card->perf_stats.outbound_do_qdio_cnt++; ··· 2840 2849 queue->card->perf_stats.outbound_do_qdio_time += 2841 2850 qeth_get_micros() - 2842 2851 queue->card->perf_stats.outbound_do_qdio_start_time; 2843 - if (rc > 0) { 2844 - if (!(rc & QDIO_ERROR_SIGA_BUSY)) 2845 - queue->sync_iqdio_error = rc & 3; 2846 - } 2847 2852 if (rc) { 2848 2853 queue->card->stats.tx_errors += count; 2849 2854 /* ignore temporary SIGA errors without busy condition */ ··· 2903 2916 { 2904 2917 struct qeth_card *card = (struct qeth_card *)card_ptr; 2905 2918 2906 - if (card->dev) 2919 + if (card->dev && (card->dev->flags & IFF_UP)) 2907 2920 napi_schedule(&card->napi); 2908 2921 } 2909 2922 EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); ··· 2927 2940 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2928 2941 struct qeth_qdio_out_buffer *buffer; 2929 2942 int i; 2930 - unsigned qeth_send_err; 2931 2943 2932 2944 QETH_CARD_TEXT(card, 6, "qdouhdl"); 2933 2945 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { ··· 2942 2956 } 2943 2957 for (i = first_element; i < (first_element + count); ++i) { 2944 2958 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2945 - qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); 2946 - __qeth_clear_output_buffer(queue, buffer, 2947 - (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0); 2959 + qeth_handle_send_error(card, buffer, qdio_error); 2960 + qeth_clear_output_buffer(queue, buffer); 2948 2961 } 2949 2962 atomic_sub(count, &queue->used_buffers); 2950 2963 /* check if we need to do something on this outbound queue */ ··· 3168 3183 int offset, int hd_len) 3169 3184 { 3170 3185 struct qeth_qdio_out_buffer *buffer; 3171 - struct sk_buff *skb1; 3172 - struct qeth_skb_data *retry_ctrl; 3173 3186 int index; 3174 - int rc; 3175 3187 3176 3188 /* spin until we get the queue ... */ 3177 3189 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, ··· 3187 3205 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3188 3206 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3189 3207 qeth_flush_buffers(queue, index, 1); 3190 - if (queue->sync_iqdio_error == 2) { 3191 - skb1 = skb_dequeue(&buffer->skb_list); 3192 - while (skb1) { 3193 - atomic_dec(&skb1->users); 3194 - skb1 = skb_dequeue(&buffer->skb_list); 3195 - } 3196 - retry_ctrl = (struct qeth_skb_data *) &skb->cb[16]; 3197 - if (retry_ctrl->magic != QETH_SKB_MAGIC) { 3198 - retry_ctrl->magic = QETH_SKB_MAGIC; 3199 - retry_ctrl->count = 0; 3200 - } 3201 - if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) { 3202 - retry_ctrl->count++; 3203 - rc = dev_queue_xmit(skb); 3204 - } else { 3205 - dev_kfree_skb_any(skb); 3206 - QETH_CARD_TEXT(card, 2, "qrdrop"); 3207 - } 3208 - } 3209 3208 return 0; 3210 3209 out: 3211 3210 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+25
include/linux/if_vlan.h
··· 339 339 } 340 340 } 341 341 342 + /** 343 + * vlan_get_protocol - get protocol EtherType. 344 + * @skb: skbuff to query 345 + * 346 + * Returns the EtherType of the packet, regardless of whether it is 347 + * vlan encapsulated (normal or hardware accelerated) or not. 348 + */ 349 + static inline __be16 vlan_get_protocol(const struct sk_buff *skb) 350 + { 351 + __be16 protocol = 0; 352 + 353 + if (vlan_tx_tag_present(skb) || 354 + skb->protocol != cpu_to_be16(ETH_P_8021Q)) 355 + protocol = skb->protocol; 356 + else { 357 + __be16 proto, *protop; 358 + protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr, 359 + h_vlan_encapsulated_proto), 360 + sizeof(proto), &proto); 361 + if (likely(protop)) 362 + protocol = *protop; 363 + } 364 + 365 + return protocol; 366 + } 342 367 #endif /* __KERNEL__ */ 343 368 344 369 /* VLAN IOCTLs are found in sockios.h */
+5
include/linux/netdevice.h
··· 1554 1554 1555 1555 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1556 1556 { 1557 + if (WARN_ON(!dev_queue)) { 1558 + printk(KERN_INFO "netif_stop_queue() cannot be called before " 1559 + "register_netdev()"); 1560 + return; 1561 + } 1557 1562 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1558 1563 } 1559 1564
+1 -1
include/linux/netfilter.h
··· 215 215 int ret; 216 216 217 217 if (!cond || 218 - (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) 218 + ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) 219 219 ret = okfn(skb); 220 220 return ret; 221 221 }
+1 -1
include/net/dn.h
··· 225 225 extern int decnet_dr_count; 226 226 extern int decnet_no_fc_max_cwnd; 227 227 228 - extern int sysctl_decnet_mem[3]; 228 + extern long sysctl_decnet_mem[3]; 229 229 extern int sysctl_decnet_wmem[3]; 230 230 extern int sysctl_decnet_rmem[3]; 231 231
+1
include/net/dst_ops.h
··· 2 2 #define _NET_DST_OPS_H 3 3 #include <linux/types.h> 4 4 #include <linux/percpu_counter.h> 5 + #include <linux/cache.h> 5 6 6 7 struct dst_entry; 7 8 struct kmem_cachep;
+2 -2
include/net/sock.h
··· 762 762 763 763 /* Memory pressure */ 764 764 void (*enter_memory_pressure)(struct sock *sk); 765 - atomic_t *memory_allocated; /* Current allocated memory. */ 765 + atomic_long_t *memory_allocated; /* Current allocated memory. */ 766 766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 767 767 /* 768 768 * Pressure flag: try to collapse. ··· 771 771 * is strict, actions are advisory and have some latency. 772 772 */ 773 773 int *memory_pressure; 774 - int *sysctl_mem; 774 + long *sysctl_mem; 775 775 int *sysctl_wmem; 776 776 int *sysctl_rmem; 777 777 int max_header;
+3 -3
include/net/tcp.h
··· 224 224 extern int sysctl_tcp_reordering; 225 225 extern int sysctl_tcp_ecn; 226 226 extern int sysctl_tcp_dsack; 227 - extern int sysctl_tcp_mem[3]; 227 + extern long sysctl_tcp_mem[3]; 228 228 extern int sysctl_tcp_wmem[3]; 229 229 extern int sysctl_tcp_rmem[3]; 230 230 extern int sysctl_tcp_app_win; ··· 247 247 extern int sysctl_tcp_thin_linear_timeouts; 248 248 extern int sysctl_tcp_thin_dupack; 249 249 250 - extern atomic_t tcp_memory_allocated; 250 + extern atomic_long_t tcp_memory_allocated; 251 251 extern struct percpu_counter tcp_sockets_allocated; 252 252 extern int tcp_memory_pressure; 253 253 ··· 280 280 } 281 281 282 282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 283 - atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 283 + atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 284 284 return true; 285 285 return false; 286 286 }
+2 -2
include/net/udp.h
··· 105 105 106 106 extern struct proto udp_prot; 107 107 108 - extern atomic_t udp_memory_allocated; 108 + extern atomic_long_t udp_memory_allocated; 109 109 110 110 /* sysctl variables for udp */ 111 - extern int sysctl_udp_mem[3]; 111 + extern long sysctl_udp_mem[3]; 112 112 extern int sysctl_udp_rmem_min; 113 113 extern int sysctl_udp_wmem_min; 114 114
+1 -1
net/ax25/af_ax25.c
··· 1392 1392 ax25_cb *ax25; 1393 1393 int err = 0; 1394 1394 1395 + memset(fsa, 0, sizeof(fsa)); 1395 1396 lock_sock(sk); 1396 1397 ax25 = ax25_sk(sk); 1397 1398 ··· 1404 1403 1405 1404 fsa->fsa_ax25.sax25_family = AF_AX25; 1406 1405 fsa->fsa_ax25.sax25_call = ax25->dest_addr; 1407 - fsa->fsa_ax25.sax25_ndigis = 0; 1408 1406 1409 1407 if (ax25->digipeat != NULL) { 1410 1408 ndigi = ax25->digipeat->ndigi;
+6
net/bluetooth/hci_event.c
··· 1175 1175 hci_send_cmd(hdev, 1176 1176 HCI_OP_READ_REMOTE_EXT_FEATURES, 1177 1177 sizeof(cp), &cp); 1178 + } else if (!ev->status && conn->out && 1179 + conn->sec_level == BT_SECURITY_HIGH) { 1180 + struct hci_cp_auth_requested cp; 1181 + cp.handle = ev->handle; 1182 + hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, 1183 + sizeof(cp), &cp); 1178 1184 } else { 1179 1185 conn->state = BT_CONNECTED; 1180 1186 hci_proto_connect_cfm(conn, ev->status);
+1 -1
net/bluetooth/hidp/Kconfig
··· 1 1 config BT_HIDP 2 2 tristate "HIDP protocol support" 3 - depends on BT && BT_L2CAP && INPUT 3 + depends on BT && BT_L2CAP && INPUT && HID_SUPPORT 4 4 select HID 5 5 help 6 6 HIDP (Human Interface Device Protocol) is a transport layer
+4 -4
net/bluetooth/l2cap.c
··· 2421 2421 break; 2422 2422 2423 2423 case 2: 2424 - *val = __le16_to_cpu(*((__le16 *) opt->val)); 2424 + *val = get_unaligned_le16(opt->val); 2425 2425 break; 2426 2426 2427 2427 case 4: 2428 - *val = __le32_to_cpu(*((__le32 *) opt->val)); 2428 + *val = get_unaligned_le32(opt->val); 2429 2429 break; 2430 2430 2431 2431 default: ··· 2452 2452 break; 2453 2453 2454 2454 case 2: 2455 - *((__le16 *) opt->val) = cpu_to_le16(val); 2455 + put_unaligned_le16(val, opt->val); 2456 2456 break; 2457 2457 2458 2458 case 4: 2459 - *((__le32 *) opt->val) = cpu_to_le32(val); 2459 + put_unaligned_le32(val, opt->val); 2460 2460 break; 2461 2461 2462 2462 default:
+10 -3
net/bluetooth/rfcomm/core.c
··· 79 79 80 80 static void rfcomm_process_connect(struct rfcomm_session *s); 81 81 82 - static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); 82 + static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, 83 + bdaddr_t *dst, 84 + u8 sec_level, 85 + int *err); 83 86 static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 84 87 static void rfcomm_session_del(struct rfcomm_session *s); 85 88 ··· 404 401 405 402 s = rfcomm_session_get(src, dst); 406 403 if (!s) { 407 - s = rfcomm_session_create(src, dst, &err); 404 + s = rfcomm_session_create(src, dst, d->sec_level, &err); 408 405 if (!s) 409 406 return err; 410 407 } ··· 682 679 rfcomm_session_put(s); 683 680 } 684 681 685 - static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) 682 + static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, 683 + bdaddr_t *dst, 684 + u8 sec_level, 685 + int *err) 686 686 { 687 687 struct rfcomm_session *s = NULL; 688 688 struct sockaddr_l2 addr; ··· 710 704 sk = sock->sk; 711 705 lock_sock(sk); 712 706 l2cap_pi(sk)->imtu = l2cap_mtu; 707 + l2cap_pi(sk)->sec_level = sec_level; 713 708 if (l2cap_ertm) 714 709 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 715 710 release_sock(sk);
+1 -1
net/can/bcm.c
··· 125 125 struct list_head tx_ops; 126 126 unsigned long dropped_usr_msgs; 127 127 struct proc_dir_entry *bcm_proc_read; 128 - char procname [9]; /* pointer printed in ASCII with \0 */ 128 + char procname [20]; /* pointer printed in ASCII with \0 */ 129 129 }; 130 130 131 131 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
+1
net/core/dst.c
··· 370 370 371 371 static struct notifier_block dst_dev_notifier = { 372 372 .notifier_call = dst_dev_event, 373 + .priority = -10, /* must be called after other network notifiers */ 373 374 }; 374 375 375 376 void __init dst_init(void)
+35 -29
net/core/filter.c
··· 112 112 */ 113 113 unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 114 114 { 115 - struct sock_filter *fentry; /* We walk down these */ 116 115 void *ptr; 117 116 u32 A = 0; /* Accumulator */ 118 117 u32 X = 0; /* Index Register */ 119 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 119 + unsigned long memvalid = 0; 120 120 u32 tmp; 121 121 int k; 122 122 int pc; 123 123 124 + BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); 124 125 /* 125 126 * Process array of filter instructions. 126 127 */ 127 128 for (pc = 0; pc < flen; pc++) { 128 - fentry = &filter[pc]; 129 + const struct sock_filter *fentry = &filter[pc]; 130 + u32 f_k = fentry->k; 129 131 130 132 switch (fentry->code) { 131 133 case BPF_S_ALU_ADD_X: 132 134 A += X; 133 135 continue; 134 136 case BPF_S_ALU_ADD_K: 135 - A += fentry->k; 137 + A += f_k; 136 138 continue; 137 139 case BPF_S_ALU_SUB_X: 138 140 A -= X; 139 141 continue; 140 142 case BPF_S_ALU_SUB_K: 141 - A -= fentry->k; 143 + A -= f_k; 142 144 continue; 143 145 case BPF_S_ALU_MUL_X: 144 146 A *= X; 145 147 continue; 146 148 case BPF_S_ALU_MUL_K: 147 - A *= fentry->k; 149 + A *= f_k; 148 150 continue; 149 151 case BPF_S_ALU_DIV_X: 150 152 if (X == 0) ··· 154 152 A /= X; 155 153 continue; 156 154 case BPF_S_ALU_DIV_K: 157 - A /= fentry->k; 155 + A /= f_k; 158 156 continue; 159 157 case BPF_S_ALU_AND_X: 160 158 A &= X; 161 159 continue; 162 160 case BPF_S_ALU_AND_K: 163 - A &= fentry->k; 161 + A &= f_k; 164 162 continue; 165 163 case BPF_S_ALU_OR_X: 166 164 A |= X; 167 165 continue; 168 166 case BPF_S_ALU_OR_K: 169 - A |= fentry->k; 167 + A |= f_k; 170 168 continue; 171 169 case BPF_S_ALU_LSH_X: 172 170 A <<= X; 173 171 continue; 174 172 case BPF_S_ALU_LSH_K: 175 - A <<= fentry->k; 173 + A <<= f_k; 176 174 continue; 177 175 case BPF_S_ALU_RSH_X: 178 176 A >>= X; 179 177 continue; 180 178 case BPF_S_ALU_RSH_K: 181 - A >>= fentry->k; 179 + A >>= f_k; 182 180 continue; 183 181 case BPF_S_ALU_NEG: 184 182 A = -A; 185 183 continue; 186 184 case BPF_S_JMP_JA: 187 - pc += fentry->k; 185 + pc += f_k; 188 186 continue; 189 187 case BPF_S_JMP_JGT_K: 190 - pc += (A > fentry->k) ? fentry->jt : fentry->jf; 188 + pc += (A > f_k) ? fentry->jt : fentry->jf; 191 189 continue; 192 190 case BPF_S_JMP_JGE_K: 193 - pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 191 + pc += (A >= f_k) ? fentry->jt : fentry->jf; 194 192 continue; 195 193 case BPF_S_JMP_JEQ_K: 196 - pc += (A == fentry->k) ? fentry->jt : fentry->jf; 194 + pc += (A == f_k) ? fentry->jt : fentry->jf; 197 195 continue; 198 196 case BPF_S_JMP_JSET_K: 199 - pc += (A & fentry->k) ? fentry->jt : fentry->jf; 197 + pc += (A & f_k) ? fentry->jt : fentry->jf; 200 198 continue; 201 199 case BPF_S_JMP_JGT_X: 202 200 pc += (A > X) ? fentry->jt : fentry->jf; ··· 211 209 pc += (A & X) ? fentry->jt : fentry->jf; 212 210 continue; 213 211 case BPF_S_LD_W_ABS: 214 - k = fentry->k; 212 + k = f_k; 215 213 load_w: 216 214 ptr = load_pointer(skb, k, 4, &tmp); 217 215 if (ptr != NULL) { ··· 220 218 } 221 219 break; 222 220 case BPF_S_LD_H_ABS: 223 - k = fentry->k; 221 + k = f_k; 224 222 load_h: 225 223 ptr = load_pointer(skb, k, 2, &tmp); 226 224 if (ptr != NULL) { ··· 229 227 } 230 228 break; 231 229 case BPF_S_LD_B_ABS: 232 - k = fentry->k; 230 + k = f_k; 233 231 load_b: 234 232 ptr = load_pointer(skb, k, 1, &tmp); 235 233 if (ptr != NULL) { ··· 244 242 X = skb->len; 245 243 continue; 246 244 case BPF_S_LD_W_IND: 247 - k = X + fentry->k; 245 + k = X + f_k; 248 246 goto load_w; 249 247 case BPF_S_LD_H_IND: 250 - k = X + fentry->k; 248 + k = X + f_k; 251 249 goto load_h; 252 250 case BPF_S_LD_B_IND: 253 - k = X + fentry->k; 251 + k = X + f_k; 254 252 goto load_b; 255 253 case BPF_S_LDX_B_MSH: 256 - ptr = load_pointer(skb, fentry->k, 1, &tmp); 254 + ptr = load_pointer(skb, f_k, 1, &tmp); 257 255 if (ptr != NULL) { 258 256 X = (*(u8 *)ptr & 0xf) << 2; 259 257 continue; 260 258 } 261 259 return 0; 262 260 case BPF_S_LD_IMM: 263 - A = fentry->k; 261 + A = f_k; 264 262 continue; 265 263 case BPF_S_LDX_IMM: 266 - X = fentry->k; 264 + X = f_k; 267 265 continue; 268 266 case BPF_S_LD_MEM: 269 - A = mem[fentry->k]; 267 + A = (memvalid & (1UL << f_k)) ? 268 + mem[f_k] : 0; 270 269 continue; 271 270 case BPF_S_LDX_MEM: 272 - X = mem[fentry->k]; 271 + X = (memvalid & (1UL << f_k)) ? 272 + mem[f_k] : 0; 273 273 continue; 274 274 case BPF_S_MISC_TAX: 275 275 X = A; ··· 280 276 A = X; 281 277 continue; 282 278 case BPF_S_RET_K: 283 - return fentry->k; 279 + return f_k; 284 280 case BPF_S_RET_A: 285 281 return A; 286 282 case BPF_S_ST: 287 - mem[fentry->k] = A; 283 + memvalid |= 1UL << f_k; 284 + mem[f_k] = A; 288 285 continue; 289 286 case BPF_S_STX: 290 - mem[fentry->k] = X; 287 + memvalid |= 1UL << f_k; 288 + mem[f_k] = X; 291 289 continue; 292 290 default: 293 291 WARN_ON(1);
+3 -3
net/core/pktgen.c
··· 887 887 i += len; 888 888 889 889 if (debug) { 890 - size_t copy = min(count, 1023); 890 + size_t copy = min_t(size_t, count, 1023); 891 891 char tb[copy + 1]; 892 892 if (copy_from_user(tb, user_buffer, copy)) 893 893 return -EFAULT; ··· 2612 2612 /* Update any of the values, used when we're incrementing various 2613 2613 * fields. 2614 2614 */ 2615 - queue_map = pkt_dev->cur_queue_map; 2616 2615 mod_cur_headers(pkt_dev); 2616 + queue_map = pkt_dev->cur_queue_map; 2617 2617 2618 2618 datalen = (odev->hard_header_len + 16) & ~0xf; 2619 2619 ··· 2976 2976 /* Update any of the values, used when we're incrementing various 2977 2977 * fields. 2978 2978 */ 2979 - queue_map = pkt_dev->cur_queue_map; 2980 2979 mod_cur_headers(pkt_dev); 2980 + queue_map = pkt_dev->cur_queue_map; 2981 2981 2982 2982 skb = __netdev_alloc_skb(odev, 2983 2983 pkt_dev->cur_pkt_size + 64
+5 -4
net/core/rtnetlink.c
··· 347 347 if (!ops) 348 348 return 0; 349 349 350 - size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 351 - nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 350 + size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 351 + nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 352 352 353 353 if (ops->get_size) 354 354 /* IFLA_INFO_DATA + nested data */ 355 - size += nlmsg_total_size(sizeof(struct nlattr)) + 355 + size += nla_total_size(sizeof(struct nlattr)) + 356 356 ops->get_size(dev); 357 357 358 358 if (ops->get_xstats_size) 359 - size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 + /* IFLA_INFO_XSTATS */ 360 + size += nla_total_size(ops->get_xstats_size(dev)); 360 361 361 362 return size; 362 363 }
+7 -7
net/core/sock.c
··· 1653 1653 { 1654 1654 struct proto *prot = sk->sk_prot; 1655 1655 int amt = sk_mem_pages(size); 1656 - int allocated; 1656 + long allocated; 1657 1657 1658 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1659 - allocated = atomic_add_return(amt, prot->memory_allocated); 1659 + allocated = atomic_long_add_return(amt, prot->memory_allocated); 1660 1660 1661 1661 /* Under limit. */ 1662 1662 if (allocated <= prot->sysctl_mem[0]) { ··· 1714 1714 1715 1715 /* Alas. Undo changes. */ 1716 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1717 - atomic_sub(amt, prot->memory_allocated); 1717 + atomic_long_sub(amt, prot->memory_allocated); 1718 1718 return 0; 1719 1719 } 1720 1720 EXPORT_SYMBOL(__sk_mem_schedule); ··· 1727 1727 { 1728 1728 struct proto *prot = sk->sk_prot; 1729 1729 1730 - atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1730 + atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1731 1731 prot->memory_allocated); 1732 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1733 1733 1734 1734 if (prot->memory_pressure && *prot->memory_pressure && 1735 - (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1735 + (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1736 1736 *prot->memory_pressure = 0; 1737 1737 } 1738 1738 EXPORT_SYMBOL(__sk_mem_reclaim); ··· 2452 2452 2453 2453 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2454 2454 { 2455 - seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2455 + seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2456 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2457 2457 proto->name, 2458 2458 proto->obj_size, 2459 2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2460 - proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2460 + proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, 2461 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2462 2462 proto->max_header, 2463 2463 proto->slab == NULL ? "no" : "yes",
+1 -1
net/decnet/af_decnet.c
··· 155 155 static DEFINE_RWLOCK(dn_hash_lock); 156 156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 157 157 static struct hlist_head dn_wild_sk; 158 - static atomic_t decnet_memory_allocated; 158 + static atomic_long_t decnet_memory_allocated; 159 159 160 160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); 161 161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
+2 -2
net/decnet/sysctl_net_decnet.c
··· 38 38 int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 39 39 40 40 /* Reasonable defaults, I hope, based on tcp's defaults */ 41 - int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41 + long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 42 42 int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 43 43 int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 44 44 ··· 324 324 .data = &sysctl_decnet_mem, 325 325 .maxlen = sizeof(sysctl_decnet_mem), 326 326 .mode = 0644, 327 - .proc_handler = proc_dointvec, 327 + .proc_handler = proc_doulongvec_minmax 328 328 }, 329 329 { 330 330 .procname = "decnet_rmem",
+1 -3
net/ipv4/igmp.c
··· 2306 2306 2307 2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2308 2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2309 - if (in_dev != NULL) { 2309 + if (in_dev != NULL) 2310 2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2311 - in_dev_put(in_dev); 2312 - } 2313 2311 /* decrease mem now to avoid the memleak warning */ 2314 2312 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2315 2313 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
+4 -4
net/ipv4/proc.c
··· 59 59 local_bh_enable(); 60 60 61 61 socket_seq_show(seq); 62 - seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 + seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", 63 63 sock_prot_inuse_get(net, &tcp_prot), orphans, 64 64 tcp_death_row.tw_count, sockets, 65 - atomic_read(&tcp_memory_allocated)); 66 - seq_printf(seq, "UDP: inuse %d mem %d\n", 65 + atomic_long_read(&tcp_memory_allocated)); 66 + seq_printf(seq, "UDP: inuse %d mem %ld\n", 67 67 sock_prot_inuse_get(net, &udp_prot), 68 - atomic_read(&udp_memory_allocated)); 68 + atomic_long_read(&udp_memory_allocated)); 69 69 seq_printf(seq, "UDPLITE: inuse %d\n", 70 70 sock_prot_inuse_get(net, &udplite_prot)); 71 71 seq_printf(seq, "RAW: inuse %d\n",
+2 -3
net/ipv4/sysctl_net_ipv4.c
··· 398 398 .data = &sysctl_tcp_mem, 399 399 .maxlen = sizeof(sysctl_tcp_mem), 400 400 .mode = 0644, 401 - .proc_handler = proc_dointvec 401 + .proc_handler = proc_doulongvec_minmax 402 402 }, 403 403 { 404 404 .procname = "tcp_wmem", ··· 602 602 .data = &sysctl_udp_mem, 603 603 .maxlen = sizeof(sysctl_udp_mem), 604 604 .mode = 0644, 605 - .proc_handler = proc_dointvec_minmax, 606 - .extra1 = &zero 605 + .proc_handler = proc_doulongvec_minmax, 607 606 }, 608 607 { 609 608 .procname = "udp_rmem_min",
+3 -3
net/ipv4/tcp.c
··· 282 282 struct percpu_counter tcp_orphan_count; 283 283 EXPORT_SYMBOL_GPL(tcp_orphan_count); 284 284 285 - int sysctl_tcp_mem[3] __read_mostly; 285 + long sysctl_tcp_mem[3] __read_mostly; 286 286 int sysctl_tcp_wmem[3] __read_mostly; 287 287 int sysctl_tcp_rmem[3] __read_mostly; 288 288 ··· 290 290 EXPORT_SYMBOL(sysctl_tcp_rmem); 291 291 EXPORT_SYMBOL(sysctl_tcp_wmem); 292 292 293 - atomic_t tcp_memory_allocated; /* Current allocated memory. */ 293 + atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ 294 294 EXPORT_SYMBOL(tcp_memory_allocated); 295 295 296 296 /* ··· 2246 2246 /* Values greater than interface MTU won't take effect. However 2247 2247 * at the point when this call is done we typically don't yet 2248 2248 * know which interface is going to be used */ 2249 - if (val < 8 || val > MAX_TCP_WINDOW) { 2249 + if (val < 64 || val > MAX_TCP_WINDOW) { 2250 2250 err = -EINVAL; 2251 2251 break; 2252 2252 }
+7 -4
net/ipv4/tcp_input.c
··· 259 259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 260 260 sizeof(struct sk_buff); 261 261 262 - if (sk->sk_sndbuf < 3 * sndmem) 263 - sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 262 + if (sk->sk_sndbuf < 3 * sndmem) { 263 + sk->sk_sndbuf = 3 * sndmem; 264 + if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) 265 + sk->sk_sndbuf = sysctl_tcp_wmem[2]; 266 + } 264 267 } 265 268 266 269 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) ··· 399 396 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 400 397 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 401 398 !tcp_memory_pressure && 402 - atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 399 + atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 403 400 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 404 401 sysctl_tcp_rmem[2]); 405 402 } ··· 4864 4861 return 0; 4865 4862 4866 4863 /* If we are under soft global TCP memory pressure, do not expand. */ 4867 - if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4864 + if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4868 4865 return 0; 4869 4866 4870 4867 /* If we filled the congestion window, do not expand. */
+3 -5
net/ipv4/tcp_ipv4.c
··· 415 415 !icsk->icsk_backoff) 416 416 break; 417 417 418 + if (sock_owned_by_user(sk)) 419 + break; 420 + 418 421 icsk->icsk_backoff--; 419 422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 420 423 icsk->icsk_backoff; ··· 432 429 if (remaining) { 433 430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 434 431 remaining, TCP_RTO_MAX); 435 - } else if (sock_owned_by_user(sk)) { 436 - /* RTO revert clocked out retransmission, 437 - * but socket is locked. Will defer. */ 438 - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 439 - HZ/20, TCP_RTO_MAX); 440 432 } else { 441 433 /* RTO revert clocked out retransmission. 442 434 * Will retransmit now */
+2 -2
net/ipv4/udp.c
··· 110 110 struct udp_table udp_table __read_mostly; 111 111 EXPORT_SYMBOL(udp_table); 112 112 113 - int sysctl_udp_mem[3] __read_mostly; 113 + long sysctl_udp_mem[3] __read_mostly; 114 114 EXPORT_SYMBOL(sysctl_udp_mem); 115 115 116 116 int sysctl_udp_rmem_min __read_mostly; ··· 119 119 int sysctl_udp_wmem_min __read_mostly; 120 120 EXPORT_SYMBOL(sysctl_udp_wmem_min); 121 121 122 - atomic_t udp_memory_allocated; 122 + atomic_long_t udp_memory_allocated; 123 123 EXPORT_SYMBOL(udp_memory_allocated); 124 124 125 125 #define MAX_UDP_PORTS 65536
+8 -16
net/ipv6/addrconf.c
··· 2740 2740 /* Flag it for later restoration when link comes up */ 2741 2741 ifa->flags |= IFA_F_TENTATIVE; 2742 2742 ifa->state = INET6_IFADDR_STATE_DAD; 2743 - 2744 - write_unlock_bh(&idev->lock); 2745 - 2746 - in6_ifa_hold(ifa); 2747 2743 } else { 2748 2744 list_del(&ifa->if_list); 2749 2745 ··· 2754 2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2755 2759 spin_unlock_bh(&ifa->state_lock); 2756 2760 2757 - if (state == INET6_IFADDR_STATE_DEAD) 2758 - goto put_ifa; 2761 + if (state == INET6_IFADDR_STATE_DEAD) { 2762 + in6_ifa_put(ifa); 2763 + } else { 2764 + __ipv6_ifa_notify(RTM_DELADDR, ifa); 2765 + atomic_notifier_call_chain(&inet6addr_chain, 2766 + NETDEV_DOWN, ifa); 2767 + } 2768 + write_lock_bh(&idev->lock); 2759 2769 } 2760 - 2761 - __ipv6_ifa_notify(RTM_DELADDR, ifa); 2762 - if (ifa->state == INET6_IFADDR_STATE_DEAD) 2763 - atomic_notifier_call_chain(&inet6addr_chain, 2764 - NETDEV_DOWN, ifa); 2765 - 2766 - put_ifa: 2767 - in6_ifa_put(ifa); 2768 - 2769 - write_lock_bh(&idev->lock); 2770 2770 } 2771 2771 2772 2772 list_splice(&keep_list, &idev->addr_list);
+1 -1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 286 286 287 287 /* Check for overlap with preceding fragment. */ 288 288 if (prev && 289 - (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) 289 + (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) 290 290 goto discard_fq; 291 291 292 292 /* Look for overlap with succeeding segment. */
+1 -1
net/ipv6/reassembly.c
··· 349 349 350 350 /* Check for overlap with preceding fragment. */ 351 351 if (prev && 352 - (FRAG6_CB(prev)->offset + prev->len) - offset > 0) 352 + (FRAG6_CB(prev)->offset + prev->len) > offset) 353 353 goto discard_fq; 354 354 355 355 /* Look for overlap with succeeding segment. */
+5 -1
net/ipv6/route.c
··· 1945 1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1946 1946 struct neighbour *neigh; 1947 1947 1948 - if (rt == NULL) 1948 + if (rt == NULL) { 1949 + if (net_ratelimit()) 1950 + pr_warning("IPv6: Maximum number of routes reached," 1951 + " consider increasing route/max_size.\n"); 1949 1952 return ERR_PTR(-ENOMEM); 1953 + } 1950 1954 1951 1955 dev_hold(net->loopback_dev); 1952 1956 in6_dev_hold(idev);
+3 -3
net/mac80211/iface.c
··· 391 391 u32 hw_reconf_flags = 0; 392 392 int i; 393 393 394 + if (local->scan_sdata == sdata) 395 + ieee80211_scan_cancel(local); 396 + 394 397 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 395 398 396 399 /* ··· 525 522 */ 526 523 synchronize_rcu(); 527 524 skb_queue_purge(&sdata->skb_queue); 528 - 529 - if (local->scan_sdata == sdata) 530 - ieee80211_scan_cancel(local); 531 525 532 526 /* 533 527 * Disable beaconing here for mesh only, AP and IBSS
+5 -2
net/packet/af_packet.c
··· 1610 1610 1611 1611 err = -EINVAL; 1612 1612 vnet_hdr_len = sizeof(vnet_hdr); 1613 - if ((len -= vnet_hdr_len) < 0) 1613 + if (len < vnet_hdr_len) 1614 1614 goto out_free; 1615 + 1616 + len -= vnet_hdr_len; 1615 1617 1616 1618 if (skb_is_gso(skb)) { 1617 1619 struct skb_shared_info *sinfo = skb_shinfo(skb); ··· 1721 1719 rcu_read_lock(); 1722 1720 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 1723 1721 if (dev) 1724 - strlcpy(uaddr->sa_data, dev->name, 15); 1722 + strncpy(uaddr->sa_data, dev->name, 14); 1725 1723 else 1726 1724 memset(uaddr->sa_data, 0, 14); 1727 1725 rcu_read_unlock(); ··· 1744 1742 sll->sll_family = AF_PACKET; 1745 1743 sll->sll_ifindex = po->ifindex; 1746 1744 sll->sll_protocol = po->num; 1745 + sll->sll_pkttype = 0; 1747 1746 rcu_read_lock(); 1748 1747 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 1749 1748 if (dev) {
+3 -1
net/rds/message.c
··· 249 249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 250 250 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 251 251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 252 - if (!rm->data.op_sg) 252 + if (!rm->data.op_sg) { 253 + rds_message_put(rm); 253 254 return ERR_PTR(-ENOMEM); 255 + } 254 256 255 257 for (i = 0; i < rm->data.op_nents; ++i) { 256 258 sg_set_page(&rm->data.op_sg[i],
+4
net/sched/cls_basic.c
··· 268 268 goto nla_put_failure; 269 269 270 270 nla_nest_end(skb, nest); 271 + 272 + if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0) 273 + goto nla_put_failure; 274 + 271 275 return skb->len; 272 276 273 277 nla_put_failure:
+1 -1
net/sctp/protocol.c
··· 92 92 struct kmem_cache *sctp_chunk_cachep __read_mostly; 93 93 struct kmem_cache *sctp_bucket_cachep __read_mostly; 94 94 95 - int sysctl_sctp_mem[3]; 95 + long sysctl_sctp_mem[3]; 96 96 int sysctl_sctp_rmem[3]; 97 97 int sysctl_sctp_wmem[3]; 98 98
+2 -2
net/sctp/socket.c
··· 111 111 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 112 112 113 113 extern struct kmem_cache *sctp_bucket_cachep; 114 - extern int sysctl_sctp_mem[3]; 114 + extern long sysctl_sctp_mem[3]; 115 115 extern int sysctl_sctp_rmem[3]; 116 116 extern int sysctl_sctp_wmem[3]; 117 117 118 118 static int sctp_memory_pressure; 119 - static atomic_t sctp_memory_allocated; 119 + static atomic_long_t sctp_memory_allocated; 120 120 struct percpu_counter sctp_sockets_allocated; 121 121 122 122 static void sctp_enter_memory_pressure(struct sock *sk)
+2 -2
net/sctp/sysctl.c
··· 54 54 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 55 55 static int rwnd_scale_max = 16; 56 56 57 - extern int sysctl_sctp_mem[3]; 57 + extern long sysctl_sctp_mem[3]; 58 58 extern int sysctl_sctp_rmem[3]; 59 59 extern int sysctl_sctp_wmem[3]; 60 60 ··· 203 203 .data = &sysctl_sctp_mem, 204 204 .maxlen = sizeof(sysctl_sctp_mem), 205 205 .mode = 0644, 206 - .proc_handler = proc_dointvec, 206 + .proc_handler = proc_doulongvec_minmax 207 207 }, 208 208 { 209 209 .procname = "sctp_rmem",
+1
net/tipc/socket.c
··· 396 396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 397 397 struct tipc_sock *tsock = tipc_sk(sock->sk); 398 398 399 + memset(addr, 0, sizeof(*addr)); 399 400 if (peer) { 400 401 if ((sock->state != SS_CONNECTED) && 401 402 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
+2 -2
net/wireless/nl80211.c
··· 224 224 } 225 225 226 226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); 227 - if (IS_ERR(dev)) { 228 - err = PTR_ERR(dev); 227 + if (IS_ERR(*rdev)) { 228 + err = PTR_ERR(*rdev); 229 229 goto out_rtnl; 230 230 } 231 231
+9 -3
net/x25/x25_facilities.c
··· 61 61 while (len > 0) { 62 62 switch (*p & X25_FAC_CLASS_MASK) { 63 63 case X25_FAC_CLASS_A: 64 + if (len < 2) 65 + return 0; 64 66 switch (*p) { 65 67 case X25_FAC_REVERSE: 66 68 if((p[1] & 0x81) == 0x81) { ··· 106 104 len -= 2; 107 105 break; 108 106 case X25_FAC_CLASS_B: 107 + if (len < 3) 108 + return 0; 109 109 switch (*p) { 110 110 case X25_FAC_PACKET_SIZE: 111 111 facilities->pacsize_in = p[1]; ··· 129 125 len -= 3; 130 126 break; 131 127 case X25_FAC_CLASS_C: 128 + if (len < 4) 129 + return 0; 132 130 printk(KERN_DEBUG "X.25: unknown facility %02X, " 133 131 "values %02X, %02X, %02X\n", 134 132 p[0], p[1], p[2], p[3]); ··· 138 132 len -= 4; 139 133 break; 140 134 case X25_FAC_CLASS_D: 135 + if (len < p[1] + 2) 136 + return 0; 141 137 switch (*p) { 142 138 case X25_FAC_CALLING_AE: 143 139 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) ··· 157 149 break; 158 150 default: 159 151 printk(KERN_DEBUG "X.25: unknown facility %02X," 160 - "length %d, values %02X, %02X, " 161 - "%02X, %02X\n", 162 - p[0], p[1], p[2], p[3], p[4], p[5]); 152 + "length %d\n", p[0], p[1]); 163 153 break; 164 154 } 165 155 len -= p[1] + 2;