Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (92 commits)
gianfar: Revive VLAN support
vlan: Export symbols as non GPL symbols.
bnx2x: tx_has_work should not wait for FW
netxen: reduce memory footprint
netxen: fix vlan tso/checksum offload
net: Fix linux/if_frad.h's suitability for userspace.
net: Move config NET_NS to from net/Kconfig to init/Kconfig
isdn: Fix missing ifdef in isdn_ppp
networking: document "nc" in addition to "netcat" in netconsole.txt
e1000e: workaround hw errata
af_key: initialize xfrm encap_oa
virtio_net: Fix MAX_PACKET_LEN to support 802.1Q VLANs
lcs: fix compilation for !CONFIG_IP_MULTICAST
rtl8187: Add termination packet to prevent stall
iwlwifi: fix rs_get_rate WARN_ON()
p54usb: fix packet loss with first generation devices
sctp: Fix another socket race during accept/peeloff
sctp: Properly timestamp outgoing data chunks for rtx purposes
sctp: Correctly start rtx timer on new packet transmissions.
sctp: Fix crc32c calculations on big-endian arhes.
...

+815 -478
+2 -1
Documentation/networking/netconsole.txt
··· 51 initialized and attempts to bring up the supplied dev at the supplied 52 address. 53 54 - The remote host can run either 'netcat -u -l -p <port>' or syslogd. 55 56 Dynamic reconfiguration: 57 ========================
··· 51 initialized and attempts to bring up the supplied dev at the supplied 52 address. 53 54 + The remote host can run either 'netcat -u -l -p <port>', 55 + 'nc -l -u <port>' or syslogd. 56 57 Dynamic reconfiguration: 58 ========================
+2
drivers/isdn/i4l/isdn_ppp.c
··· 431 return 0; 432 } 433 434 static int get_filter(void __user *arg, struct sock_filter **p) 435 { 436 struct sock_fprog uprog; ··· 466 *p = code; 467 return uprog.len; 468 } 469 470 /* 471 * ippp device ioctl
··· 431 return 0; 432 } 433 434 + #ifdef CONFIG_IPPP_FILTER 435 static int get_filter(void __user *arg, struct sock_filter **p) 436 { 437 struct sock_fprog uprog; ··· 465 *p = code; 466 return uprog.len; 467 } 468 + #endif /* CONFIG_IPPP_FILTER */ 469 470 /* 471 * ippp device ioctl
+2 -9
drivers/net/bnx2x.h
··· 1 /* bnx2x.h: Broadcom Everest network driver. 2 * 3 - * Copyright (c) 2007-2008 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by ··· 271 272 #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 273 274 - #define BNX2X_HAS_TX_WORK(fp) \ 275 - ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \ 276 - (fp->tx_pkt_prod != fp->tx_pkt_cons)) 277 - 278 - #define BNX2X_HAS_RX_WORK(fp) \ 279 - (fp->rx_comp_cons != rx_cons_sb) 280 - 281 - #define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) 282 283 284 /* MC hsi */
··· 1 /* bnx2x.h: Broadcom Everest network driver. 2 * 3 + * Copyright (c) 2007-2009 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by ··· 271 272 #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 273 274 + #define BNX2X_HAS_WORK(fp) (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)) 275 276 277 /* MC hsi */
+46 -18
drivers/net/bnx2x_link.c
··· 1 - /* Copyright 2008 Broadcom Corporation 2 * 3 * Unless you and Broadcom execute a separate written software license 4 * agreement governing use of this software, this software is licensed to you ··· 316 else 317 val &= ~0x810; 318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 319 320 /* enable emac for jumbo packets */ 321 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, ··· 1612 u32 gp_status) 1613 { 1614 struct bnx2x *bp = params->bp; 1615 - 1616 u8 rc = 0; 1617 vars->link_status = 0; 1618 ··· 1632 1633 switch (gp_status & GP_STATUS_SPEED_MASK) { 1634 case GP_STATUS_10M: 1635 - vars->line_speed = SPEED_10; 1636 if (vars->duplex == DUPLEX_FULL) 1637 vars->link_status |= LINK_10TFD; 1638 else ··· 1640 break; 1641 1642 case GP_STATUS_100M: 1643 - vars->line_speed = SPEED_100; 1644 if (vars->duplex == DUPLEX_FULL) 1645 vars->link_status |= LINK_100TXFD; 1646 else ··· 1649 1650 case GP_STATUS_1G: 1651 case GP_STATUS_1G_KX: 1652 - vars->line_speed = SPEED_1000; 1653 if (vars->duplex == DUPLEX_FULL) 1654 vars->link_status |= LINK_1000TFD; 1655 else ··· 1657 break; 1658 1659 case GP_STATUS_2_5G: 1660 - vars->line_speed = SPEED_2500; 1661 if (vars->duplex == DUPLEX_FULL) 1662 vars->link_status |= LINK_2500TFD; 1663 else ··· 1674 case GP_STATUS_10G_KX4: 1675 case GP_STATUS_10G_HIG: 1676 case GP_STATUS_10G_CX4: 1677 - vars->line_speed = SPEED_10000; 1678 vars->link_status |= LINK_10GTFD; 1679 break; 1680 1681 case GP_STATUS_12G_HIG: 1682 - vars->line_speed = SPEED_12000; 1683 vars->link_status |= LINK_12GTFD; 1684 break; 1685 1686 case GP_STATUS_12_5G: 1687 - vars->line_speed = SPEED_12500; 1688 vars->link_status |= LINK_12_5GTFD; 1689 break; 1690 1691 case GP_STATUS_13G: 1692 - vars->line_speed = SPEED_13000; 1693 vars->link_status |= LINK_13GTFD; 1694 break; 1695 1696 case GP_STATUS_15G: 1697 - vars->line_speed = SPEED_15000; 1698 vars->link_status |= LINK_15GTFD; 1699 break; 1700 1701 case GP_STATUS_16G: 1702 - vars->line_speed = SPEED_16000; 1703 vars->link_status |= LINK_16GTFD; 1704 break; 1705 ··· 1711 break; 1712 } 1713 1714 vars->link_status |= LINK_STATUS_SERDES_LINK; 1715 1716 if ((params->req_line_speed == SPEED_AUTO_NEG) && ··· 3583 (MDIO_REG_BANK_CL73_IEEEB0 + 3584 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3585 0x6041); 3586 - 3587 /* set aer mmd back */ 3588 bnx2x_set_aer_mmd(params, vars); 3589 ··· 3882 } 3883 3884 if (vars->phy_flags & PHY_XGXS_FLAG) { 3885 - if (params->req_line_speed && 3886 ((params->req_line_speed == SPEED_100) || 3887 - (params->req_line_speed == SPEED_10))) { 3888 vars->phy_flags |= PHY_SGMII_FLAG; 3889 } else { 3890 vars->phy_flags &= ~PHY_SGMII_FLAG; ··· 4212 /* activate nig drain */ 4213 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4214 4215 /* reset BigMac */ 4216 bnx2x_bmac_rx_disable(bp, params->port); 4217 REG_WR(bp, GRCBASE_MISC + ··· 4261 4262 /* update shared memory */ 4263 bnx2x_update_mng(params, vars->link_status); 4264 return rc; 4265 } 4266 /* This function should called upon link interrupt */ ··· 4299 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 4300 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4301 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4302 4303 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4304 ··· 4404 ext_phy_addr[port], 4405 MDIO_PMA_DEVAD, 4406 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 4407 - if (fw_ver1 == 0) { 4408 DP(NETIF_MSG_LINK, 4409 - "bnx2x_8073_common_init_phy port %x " 4410 - "fw Download failed\n", port); 4411 return -EINVAL; 4412 } 4413
··· 1 + /* Copyright 2008-2009 Broadcom Corporation 2 * 3 * Unless you and Broadcom execute a separate written software license 4 * agreement governing use of this software, this software is licensed to you ··· 316 else 317 val &= ~0x810; 318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 319 + 320 + /* enable emac */ 321 + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); 322 323 /* enable emac for jumbo packets */ 324 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, ··· 1609 u32 gp_status) 1610 { 1611 struct bnx2x *bp = params->bp; 1612 + u16 new_line_speed; 1613 u8 rc = 0; 1614 vars->link_status = 0; 1615 ··· 1629 1630 switch (gp_status & GP_STATUS_SPEED_MASK) { 1631 case GP_STATUS_10M: 1632 + new_line_speed = SPEED_10; 1633 if (vars->duplex == DUPLEX_FULL) 1634 vars->link_status |= LINK_10TFD; 1635 else ··· 1637 break; 1638 1639 case GP_STATUS_100M: 1640 + new_line_speed = SPEED_100; 1641 if (vars->duplex == DUPLEX_FULL) 1642 vars->link_status |= LINK_100TXFD; 1643 else ··· 1646 1647 case GP_STATUS_1G: 1648 case GP_STATUS_1G_KX: 1649 + new_line_speed = SPEED_1000; 1650 if (vars->duplex == DUPLEX_FULL) 1651 vars->link_status |= LINK_1000TFD; 1652 else ··· 1654 break; 1655 1656 case GP_STATUS_2_5G: 1657 + new_line_speed = SPEED_2500; 1658 if (vars->duplex == DUPLEX_FULL) 1659 vars->link_status |= LINK_2500TFD; 1660 else ··· 1671 case GP_STATUS_10G_KX4: 1672 case GP_STATUS_10G_HIG: 1673 case GP_STATUS_10G_CX4: 1674 + new_line_speed = SPEED_10000; 1675 vars->link_status |= LINK_10GTFD; 1676 break; 1677 1678 case GP_STATUS_12G_HIG: 1679 + new_line_speed = SPEED_12000; 1680 vars->link_status |= LINK_12GTFD; 1681 break; 1682 1683 case GP_STATUS_12_5G: 1684 + new_line_speed = SPEED_12500; 1685 vars->link_status |= LINK_12_5GTFD; 1686 break; 1687 1688 case GP_STATUS_13G: 1689 + new_line_speed = SPEED_13000; 1690 vars->link_status |= LINK_13GTFD; 1691 break; 1692 1693 case GP_STATUS_15G: 1694 + new_line_speed = SPEED_15000; 1695 vars->link_status |= LINK_15GTFD; 1696 break; 1697 1698 case GP_STATUS_16G: 1699 + new_line_speed = SPEED_16000; 1700 vars->link_status |= LINK_16GTFD; 1701 break; 1702 ··· 1708 break; 1709 } 1710 1711 + /* Upon link speed change set the NIG into drain mode. 1712 + Comes to deals with possible FIFO glitch due to clk change 1713 + when speed is decreased without link down indicator */ 1714 + if (new_line_speed != vars->line_speed) { 1715 + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE 1716 + + params->port*4, 0); 1717 + msleep(1); 1718 + } 1719 + vars->line_speed = new_line_speed; 1720 vars->link_status |= LINK_STATUS_SERDES_LINK; 1721 1722 if ((params->req_line_speed == SPEED_AUTO_NEG) && ··· 3571 (MDIO_REG_BANK_CL73_IEEEB0 + 3572 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3573 0x6041); 3574 + msleep(200); 3575 /* set aer mmd back */ 3576 bnx2x_set_aer_mmd(params, vars); 3577 ··· 3870 } 3871 3872 if (vars->phy_flags & PHY_XGXS_FLAG) { 3873 + if ((params->req_line_speed && 3874 ((params->req_line_speed == SPEED_100) || 3875 + (params->req_line_speed == SPEED_10))) || 3876 + (!params->req_line_speed && 3877 + (params->speed_cap_mask >= 3878 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 3879 + (params->speed_cap_mask < 3880 + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 3881 + )) { 3882 vars->phy_flags |= PHY_SGMII_FLAG; 3883 } else { 3884 vars->phy_flags &= ~PHY_SGMII_FLAG; ··· 4194 /* activate nig drain */ 4195 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4196 4197 + /* disable emac */ 4198 + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 4199 + 4200 + msleep(10); 4201 + 4202 /* reset BigMac */ 4203 bnx2x_bmac_rx_disable(bp, params->port); 4204 REG_WR(bp, GRCBASE_MISC + ··· 4238 4239 /* update shared memory */ 4240 bnx2x_update_mng(params, vars->link_status); 4241 + msleep(20); 4242 return rc; 4243 } 4244 /* This function should called upon link interrupt */ ··· 4275 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 4276 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4278 + 4279 + /* disable emac */ 4280 + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 4281 4282 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4283 ··· 4377 ext_phy_addr[port], 4378 MDIO_PMA_DEVAD, 4379 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 4380 + if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 4381 DP(NETIF_MSG_LINK, 4382 + "bnx2x_8073_common_init_phy port %x:" 4383 + "Download failed. fw version = 0x%x\n", 4384 + port, fw_ver1); 4385 return -EINVAL; 4386 } 4387
+191 -127
drivers/net/bnx2x_main.c
··· 1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 - * Copyright (c) 2007-2008 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by ··· 57 #include "bnx2x.h" 58 #include "bnx2x_init.h" 59 60 - #define DRV_MODULE_VERSION "1.45.23" 61 - #define DRV_MODULE_RELDATE "2008/11/03" 62 #define BNX2X_BC_VER 0x040200 63 64 /* Time in jiffies before concluding the transmitter is hung */ ··· 69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 70 71 MODULE_AUTHOR("Eliezer Tamir"); 72 - MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); 73 MODULE_LICENSE("GPL"); 74 MODULE_VERSION(DRV_MODULE_VERSION); 75 ··· 732 /* 733 * fast path service functions 734 */ 735 736 /* free skb in the packet ring at pos idx 737 * return idx of last bd freed ··· 5155 } 5156 5157 5158 static int bnx2x_init_common(struct bnx2x *bp) 5159 { 5160 u32 val, i; 5161 5162 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 5163 5164 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 5166 ··· 6150 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 6151 { 6152 bnx2x_int_disable_sync(bp, disable_hw); 6153 if (netif_running(bp->dev)) { 6154 - bnx2x_napi_disable(bp); 6155 netif_tx_disable(bp->dev); 6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6157 } ··· 6171 * multicast 64-127:port0 128-191:port1 6172 */ 6173 config->hdr.length_6b = 2; 6174 - config->hdr.offset = port ? 31 : 0; 6175 config->hdr.client_id = BP_CL_ID(bp); 6176 config->hdr.reserved1 = 0; 6177 ··· 6335 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 6336 { 6337 u32 load_code; 6338 - int i, rc; 6339 #ifdef BNX2X_STOP_ON_ERROR 6340 if (unlikely(bp->panic)) 6341 return -EPERM; 6342 #endif 6343 6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 6345 6346 /* Send LOAD_REQUEST command to MCP 6347 Returns the type of LOAD command: ··· 6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6424 if (!load_code) { 6425 BNX2X_ERR("MCP response failure, aborting\n"); 6426 - return -EBUSY; 6427 } 6428 - if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6429 - return -EBUSY; /* other port in diagnostic mode */ 6430 6431 } else { 6432 int port = BP_PORT(bp); ··· 6455 bp->port.pmf = 0; 6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 6457 6458 - /* if we can't use MSI-X we only need one fp, 6459 - * so try to enable MSI-X with the requested number of fp's 6460 - * and fallback to inta with one fp 6461 - */ 6462 - if (use_inta) { 6463 - bp->num_queues = 1; 6464 - 6465 - } else { 6466 - if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp))) 6467 - /* user requested number */ 6468 - bp->num_queues = use_multi; 6469 - 6470 - else if (use_multi) 6471 - bp->num_queues = min_t(u32, num_online_cpus(), 6472 - BP_MAX_QUEUES(bp)); 6473 - else 6474 - bp->num_queues = 1; 6475 - 6476 - if (bnx2x_enable_msix(bp)) { 6477 - /* failed to enable MSI-X */ 6478 - bp->num_queues = 1; 6479 - if (use_multi) 6480 - BNX2X_ERR("Multi requested but failed" 6481 - " to enable MSI-X\n"); 6482 - } 6483 - } 6484 - DP(NETIF_MSG_IFUP, 6485 - "set number of queues to %d\n", bp->num_queues); 6486 - 6487 - if (bnx2x_alloc_mem(bp)) 6488 - return -ENOMEM; 6489 - 6490 - for_each_queue(bp, i) 6491 - bnx2x_fp(bp, i, disable_tpa) = 6492 - ((bp->flags & TPA_ENABLE_FLAG) == 0); 6493 - 6494 - if (bp->flags & USING_MSIX_FLAG) { 6495 - rc = bnx2x_req_msix_irqs(bp); 6496 - if (rc) { 6497 - pci_disable_msix(bp->pdev); 6498 - goto load_error; 6499 - } 6500 - } else { 6501 - bnx2x_ack_int(bp); 6502 - rc = bnx2x_req_irq(bp); 6503 - if (rc) { 6504 - BNX2X_ERR("IRQ request failed, aborting\n"); 6505 - goto load_error; 6506 - } 6507 - } 6508 - 6509 - for_each_queue(bp, i) 6510 - netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 6511 - bnx2x_poll, 128); 6512 - 6513 /* Initialize HW */ 6514 rc = bnx2x_init_hw(bp, load_code); 6515 if (rc) { 6516 BNX2X_ERR("HW init failed, aborting\n"); 6517 - goto load_int_disable; 6518 } 6519 6520 /* Setup NIC internals and enable interrupts */ ··· 6471 if (!load_code) { 6472 BNX2X_ERR("MCP response failure, aborting\n"); 6473 rc = -EBUSY; 6474 - goto load_rings_free; 6475 } 6476 } 6477 ··· 6480 rc = bnx2x_setup_leading(bp); 6481 if (rc) { 6482 BNX2X_ERR("Setup leading failed!\n"); 6483 - goto load_netif_stop; 6484 } 6485 6486 if (CHIP_IS_E1H(bp)) ··· 6493 for_each_nondefault_queue(bp, i) { 6494 rc = bnx2x_setup_multi(bp, i); 6495 if (rc) 6496 - goto load_netif_stop; 6497 } 6498 6499 if (CHIP_IS_E1(bp)) ··· 6509 case LOAD_NORMAL: 6510 /* Tx queue should be only reenabled */ 6511 netif_wake_queue(bp->dev); 6512 bnx2x_set_rx_mode(bp->dev); 6513 break; 6514 6515 case LOAD_OPEN: 6516 netif_start_queue(bp->dev); 6517 bnx2x_set_rx_mode(bp->dev); 6518 - if (bp->flags & USING_MSIX_FLAG) 6519 - printk(KERN_INFO PFX "%s: using MSI-X\n", 6520 - bp->dev->name); 6521 break; 6522 6523 case LOAD_DIAG: 6524 bnx2x_set_rx_mode(bp->dev); 6525 bp->state = BNX2X_STATE_DIAG; 6526 break; ··· 6538 6539 return 0; 6540 6541 - load_netif_stop: 6542 - bnx2x_napi_disable(bp); 6543 - load_rings_free: 6544 /* Free SKBs, SGEs, TPA pool and driver internals */ 6545 bnx2x_free_skbs(bp); 6546 for_each_queue(bp, i) 6547 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6548 - load_int_disable: 6549 - bnx2x_int_disable_sync(bp, 1); 6550 /* Release IRQs */ 6551 bnx2x_free_irq(bp); 6552 - load_error: 6553 bnx2x_free_mem(bp); 6554 - bp->port.pmf = 0; 6555 6556 /* TBD we really need to reset the chip 6557 if we want to recover from this */ ··· 6629 } 6630 cnt--; 6631 msleep(1); 6632 } 6633 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6634 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; ··· 6681 /* TODO: Close Doorbell port? */ 6682 } 6683 6684 - static void bnx2x_reset_common(struct bnx2x *bp) 6685 - { 6686 - /* reset_common */ 6687 - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6688 - 0xd3ffff7f); 6689 - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); 6690 - } 6691 - 6692 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6693 { 6694 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", ··· 6721 bnx2x_set_storm_rx_mode(bp); 6722 6723 bnx2x_netif_stop(bp, 1); 6724 - if (!netif_running(bp->dev)) 6725 - bnx2x_napi_disable(bp); 6726 del_timer_sync(&bp->timer); 6727 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6728 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6729 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6730 6731 /* Wait until tx fast path tasks complete */ 6732 for_each_queue(bp, i) { ··· 6736 6737 cnt = 1000; 6738 smp_rmb(); 6739 - while (BNX2X_HAS_TX_WORK(fp)) { 6740 6741 bnx2x_tx_int(fp, 1000); 6742 if (!cnt) { ··· 6756 } 6757 /* Give HW time to discard old tx messages */ 6758 msleep(1); 6759 - 6760 - /* Release IRQs */ 6761 - bnx2x_free_irq(bp); 6762 6763 if (CHIP_IS_E1(bp)) { 6764 struct mac_configuration_cmd *config = ··· 6865 bnx2x_free_skbs(bp); 6866 for_each_queue(bp, i) 6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6868 bnx2x_free_mem(bp); 6869 6870 bp->state = BNX2X_STATE_CLOSED; ··· 6919 */ 6920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6922 - if (val == 0x7) 6923 - REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 6924 - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6925 - 6926 if (val == 0x7) { 6927 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6928 /* save our func */ 6929 int func = BP_FUNC(bp); 6930 u32 swap_en; 6931 u32 swap_val; 6932 6933 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6934 ··· 6953 6954 bnx2x_fw_command(bp, reset_code); 6955 } 6956 6957 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : 6958 HC_REG_CONFIG_0), 0x1000); ··· 7001 bp->fw_seq = 7002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7003 DRV_MSG_SEQ_NUMBER_MASK); 7004 - } 7005 } 7006 } 7007 ··· 7020 id |= ((val & 0xf) << 12); 7021 val = REG_RD(bp, MISC_REG_CHIP_METAL); 7022 id |= ((val & 0xff) << 4); 7023 - REG_RD(bp, MISC_REG_BOND_ID); 7024 id |= (val & 0xf); 7025 bp->common.chip_id = id; 7026 bp->link_params.chip_id = bp->common.chip_id; ··· 8152 struct bnx2x *bp = netdev_priv(dev); 8153 int rc; 8154 8155 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 8156 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 8157 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, ··· 8757 8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8759 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8760 - bnx2x_acquire_phy_lock(bp); 8761 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8762 - bnx2x_release_phy_lock(bp); 8763 8764 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8765 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8766 - bnx2x_acquire_phy_lock(bp); 8767 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8768 - bnx2x_release_phy_lock(bp); 8769 /* wait until link state is restored */ 8770 - bnx2x_wait_for_link(bp, link_up); 8771 - 8772 } else 8773 return -EINVAL; 8774 ··· 8873 return BNX2X_LOOPBACK_FAILED; 8874 8875 bnx2x_netif_stop(bp, 1); 8876 8877 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8878 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); ··· 8885 rc |= BNX2X_PHY_LOOPBACK_FAILED; 8886 } 8887 8888 bnx2x_netif_start(bp); 8889 8890 return rc; ··· 8959 return -ENODEV; 8960 8961 config->hdr.length_6b = 0; 8962 - config->hdr.offset = 0; 8963 config->hdr.client_id = BP_CL_ID(bp); 8964 config->hdr.reserved1 = 0; 8965 ··· 9327 return 0; 9328 } 9329 9330 /* 9331 * net_device service functions 9332 */ ··· 9349 napi); 9350 struct bnx2x *bp = fp->bp; 9351 int work_done = 0; 9352 - u16 rx_cons_sb; 9353 9354 #ifdef BNX2X_STOP_ON_ERROR 9355 if (unlikely(bp->panic)) ··· 9361 9362 bnx2x_update_fpsb_idx(fp); 9363 9364 - if (BNX2X_HAS_TX_WORK(fp)) 9365 bnx2x_tx_int(fp, budget); 9366 9367 - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 9368 - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 9369 - rx_cons_sb++; 9370 - if (BNX2X_HAS_RX_WORK(fp)) 9371 work_done = bnx2x_rx_int(fp, budget); 9372 - 9373 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9374 - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 9375 - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 9376 - rx_cons_sb++; 9377 9378 /* must not complete if we consumed full budget */ 9379 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { ··· 9477 return rc; 9478 } 9479 9480 /* check if packet requires linearization (packet is too fragmented) */ 9481 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 9482 u32 xmit_type) ··· 9555 9556 return to_copy; 9557 } 9558 9559 /* called with netif_tx_lock 9560 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call ··· 9596 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9598 9599 /* First, check if we need to linearize the skb 9600 (due to FW restrictions) */ 9601 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { ··· 9609 return NETDEV_TX_OK; 9610 } 9611 } 9612 9613 /* 9614 Please read carefully. First we use one BD which we mark as start, ··· 9840 { 9841 struct bnx2x *bp = netdev_priv(dev); 9842 9843 bnx2x_set_power_state(bp, PCI_D0); 9844 9845 return bnx2x_nic_load(bp, LOAD_OPEN); ··· 9925 for (; i < old; i++) { 9926 if (CAM_IS_INVALID(config-> 9927 config_table[i])) { 9928 - i--; /* already invalidated */ 9929 break; 9930 } 9931 /* invalidate */ ··· 10335 return rc; 10336 } 10337 10338 rc = register_netdev(dev); 10339 if (rc) { 10340 dev_err(&pdev->dev, "Cannot register net device\n"); 10341 goto init_one_exit; 10342 } 10343 - 10344 - pci_set_drvdata(pdev, dev); 10345 - 10346 - rc = bnx2x_init_bp(bp); 10347 - if (rc) { 10348 - unregister_netdev(dev); 10349 - goto init_one_exit; 10350 - } 10351 - 10352 - netif_carrier_off(dev); 10353 10354 bp->common.name = board_info[ent->driver_data].name; 10355 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," ··· 10494 bnx2x_free_skbs(bp); 10495 for_each_queue(bp, i) 10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 10497 bnx2x_free_mem(bp); 10498 10499 bp->state = BNX2X_STATE_CLOSED;
··· 1 /* bnx2x_main.c: Broadcom Everest network driver. 2 * 3 + * Copyright (c) 2007-2009 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by ··· 57 #include "bnx2x.h" 58 #include "bnx2x_init.h" 59 60 + #define DRV_MODULE_VERSION "1.45.26" 61 + #define DRV_MODULE_RELDATE "2009/01/26" 62 #define BNX2X_BC_VER 0x040200 63 64 /* Time in jiffies before concluding the transmitter is hung */ ··· 69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 70 71 MODULE_AUTHOR("Eliezer Tamir"); 72 + MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 73 MODULE_LICENSE("GPL"); 74 MODULE_VERSION(DRV_MODULE_VERSION); 75 ··· 732 /* 733 * fast path service functions 734 */ 735 + 736 + static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) 737 + { 738 + u16 tx_cons_sb; 739 + 740 + /* Tell compiler that status block fields can change */ 741 + barrier(); 742 + tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb); 743 + return (fp->tx_pkt_cons != tx_cons_sb); 744 + } 745 + 746 + static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 747 + { 748 + /* Tell compiler that consumer and producer can change */ 749 + barrier(); 750 + return (fp->tx_pkt_prod != fp->tx_pkt_cons); 751 + 752 + } 753 754 /* free skb in the packet ring at pos idx 755 * return idx of last bd freed ··· 5137 } 5138 5139 5140 + static void bnx2x_reset_common(struct bnx2x *bp) 5141 + { 5142 + /* reset_common */ 5143 + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 5144 + 0xd3ffff7f); 5145 + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); 5146 + } 5147 + 5148 static int bnx2x_init_common(struct bnx2x *bp) 5149 { 5150 u32 val, i; 5151 5152 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 5153 5154 + bnx2x_reset_common(bp); 5155 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 5157 ··· 6123 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 6124 { 6125 bnx2x_int_disable_sync(bp, disable_hw); 6126 + bnx2x_napi_disable(bp); 6127 if (netif_running(bp->dev)) { 6128 netif_tx_disable(bp->dev); 6129 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6130 } ··· 6144 * multicast 64-127:port0 128-191:port1 6145 */ 6146 config->hdr.length_6b = 2; 6147 + config->hdr.offset = port ? 32 : 0; 6148 config->hdr.client_id = BP_CL_ID(bp); 6149 config->hdr.reserved1 = 0; 6150 ··· 6308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 6309 { 6310 u32 load_code; 6311 + int i, rc = 0; 6312 #ifdef BNX2X_STOP_ON_ERROR 6313 if (unlikely(bp->panic)) 6314 return -EPERM; 6315 #endif 6316 6317 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 6318 + 6319 + if (use_inta) { 6320 + bp->num_queues = 1; 6321 + 6322 + } else { 6323 + if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp))) 6324 + /* user requested number */ 6325 + bp->num_queues = use_multi; 6326 + 6327 + else if (use_multi) 6328 + bp->num_queues = min_t(u32, num_online_cpus(), 6329 + BP_MAX_QUEUES(bp)); 6330 + else 6331 + bp->num_queues = 1; 6332 + 6333 + DP(NETIF_MSG_IFUP, 6334 + "set number of queues to %d\n", bp->num_queues); 6335 + 6336 + /* if we can't use MSI-X we only need one fp, 6337 + * so try to enable MSI-X with the requested number of fp's 6338 + * and fallback to MSI or legacy INTx with one fp 6339 + */ 6340 + rc = bnx2x_enable_msix(bp); 6341 + if (rc) { 6342 + /* failed to enable MSI-X */ 6343 + bp->num_queues = 1; 6344 + if (use_multi) 6345 + BNX2X_ERR("Multi requested but failed" 6346 + " to enable MSI-X\n"); 6347 + } 6348 + } 6349 + 6350 + if (bnx2x_alloc_mem(bp)) 6351 + return -ENOMEM; 6352 + 6353 + for_each_queue(bp, i) 6354 + bnx2x_fp(bp, i, disable_tpa) = 6355 + ((bp->flags & TPA_ENABLE_FLAG) == 0); 6356 + 6357 + for_each_queue(bp, i) 6358 + netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 6359 + bnx2x_poll, 128); 6360 + 6361 + #ifdef BNX2X_STOP_ON_ERROR 6362 + for_each_queue(bp, i) { 6363 + struct bnx2x_fastpath *fp = &bp->fp[i]; 6364 + 6365 + fp->poll_no_work = 0; 6366 + fp->poll_calls = 0; 6367 + fp->poll_max_calls = 0; 6368 + fp->poll_complete = 0; 6369 + fp->poll_exit = 0; 6370 + } 6371 + #endif 6372 + bnx2x_napi_enable(bp); 6373 + 6374 + if (bp->flags & USING_MSIX_FLAG) { 6375 + rc = bnx2x_req_msix_irqs(bp); 6376 + if (rc) { 6377 + pci_disable_msix(bp->pdev); 6378 + goto load_error1; 6379 + } 6380 + printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name); 6381 + } else { 6382 + bnx2x_ack_int(bp); 6383 + rc = bnx2x_req_irq(bp); 6384 + if (rc) { 6385 + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); 6386 + goto load_error1; 6387 + } 6388 + } 6389 6390 /* Send LOAD_REQUEST command to MCP 6391 Returns the type of LOAD command: ··· 6325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6326 if (!load_code) { 6327 BNX2X_ERR("MCP response failure, aborting\n"); 6328 + rc = -EBUSY; 6329 + goto load_error2; 6330 } 6331 + if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { 6332 + rc = -EBUSY; /* other port in diagnostic mode */ 6333 + goto load_error2; 6334 + } 6335 6336 } else { 6337 int port = BP_PORT(bp); ··· 6354 bp->port.pmf = 0; 6355 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); 6356 6357 /* Initialize HW */ 6358 rc = bnx2x_init_hw(bp, load_code); 6359 if (rc) { 6360 BNX2X_ERR("HW init failed, aborting\n"); 6361 + goto load_error2; 6362 } 6363 6364 /* Setup NIC internals and enable interrupts */ ··· 6425 if (!load_code) { 6426 BNX2X_ERR("MCP response failure, aborting\n"); 6427 rc = -EBUSY; 6428 + goto load_error3; 6429 } 6430 } 6431 ··· 6434 rc = bnx2x_setup_leading(bp); 6435 if (rc) { 6436 BNX2X_ERR("Setup leading failed!\n"); 6437 + goto load_error3; 6438 } 6439 6440 if (CHIP_IS_E1H(bp)) ··· 6447 for_each_nondefault_queue(bp, i) { 6448 rc = bnx2x_setup_multi(bp, i); 6449 if (rc) 6450 + goto load_error3; 6451 } 6452 6453 if (CHIP_IS_E1(bp)) ··· 6463 case LOAD_NORMAL: 6464 /* Tx queue should be only reenabled */ 6465 netif_wake_queue(bp->dev); 6466 + /* Initialize the receive filter. */ 6467 bnx2x_set_rx_mode(bp->dev); 6468 break; 6469 6470 case LOAD_OPEN: 6471 netif_start_queue(bp->dev); 6472 + /* Initialize the receive filter. */ 6473 bnx2x_set_rx_mode(bp->dev); 6474 break; 6475 6476 case LOAD_DIAG: 6477 + /* Initialize the receive filter. */ 6478 bnx2x_set_rx_mode(bp->dev); 6479 bp->state = BNX2X_STATE_DIAG; 6480 break; ··· 6492 6493 return 0; 6494 6495 + load_error3: 6496 + bnx2x_int_disable_sync(bp, 1); 6497 + if (!BP_NOMCP(bp)) { 6498 + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 6499 + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 6500 + } 6501 + bp->port.pmf = 0; 6502 /* Free SKBs, SGEs, TPA pool and driver internals */ 6503 bnx2x_free_skbs(bp); 6504 for_each_queue(bp, i) 6505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6506 + load_error2: 6507 /* Release IRQs */ 6508 bnx2x_free_irq(bp); 6509 + load_error1: 6510 + bnx2x_napi_disable(bp); 6511 + for_each_queue(bp, i) 6512 + netif_napi_del(&bnx2x_fp(bp, i, napi)); 6513 bnx2x_free_mem(bp); 6514 6515 /* TBD we really need to reset the chip 6516 if we want to recover from this */ ··· 6578 } 6579 cnt--; 6580 msleep(1); 6581 + rmb(); /* Refresh the dsb_sp_prod */ 6582 } 6583 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6584 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; ··· 6629 /* TODO: Close Doorbell port? */ 6630 } 6631 6632 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6633 { 6634 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", ··· 6677 bnx2x_set_storm_rx_mode(bp); 6678 6679 bnx2x_netif_stop(bp, 1); 6680 + 6681 del_timer_sync(&bp->timer); 6682 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6683 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6684 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6685 + 6686 + /* Release IRQs */ 6687 + bnx2x_free_irq(bp); 6688 6689 /* Wait until tx fast path tasks complete */ 6690 for_each_queue(bp, i) { ··· 6690 6691 cnt = 1000; 6692 smp_rmb(); 6693 + while (bnx2x_has_tx_work_unload(fp)) { 6694 6695 bnx2x_tx_int(fp, 1000); 6696 if (!cnt) { ··· 6710 } 6711 /* Give HW time to discard old tx messages */ 6712 msleep(1); 6713 6714 if (CHIP_IS_E1(bp)) { 6715 struct mac_configuration_cmd *config = ··· 6822 bnx2x_free_skbs(bp); 6823 for_each_queue(bp, i) 6824 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6825 + for_each_queue(bp, i) 6826 + netif_napi_del(&bnx2x_fp(bp, i, napi)); 6827 bnx2x_free_mem(bp); 6828 6829 bp->state = BNX2X_STATE_CLOSED; ··· 6874 */ 6875 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6876 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6877 if (val == 0x7) { 6878 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6879 /* save our func */ 6880 int func = BP_FUNC(bp); 6881 u32 swap_en; 6882 u32 swap_val; 6883 + 6884 + /* clear the UNDI indication */ 6885 + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); 6886 6887 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6888 ··· 6909 6910 bnx2x_fw_command(bp, reset_code); 6911 } 6912 + 6913 + /* now it's safe to release the lock */ 6914 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6915 6916 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : 6917 HC_REG_CONFIG_0), 0x1000); ··· 6954 bp->fw_seq = 6955 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 6956 DRV_MSG_SEQ_NUMBER_MASK); 6957 + 6958 + } else 6959 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6960 } 6961 } 6962 ··· 6971 id |= ((val & 0xf) << 12); 6972 val = REG_RD(bp, MISC_REG_CHIP_METAL); 6973 id |= ((val & 0xff) << 4); 6974 + val = REG_RD(bp, MISC_REG_BOND_ID); 6975 id |= (val & 0xf); 6976 bp->common.chip_id = id; 6977 bp->link_params.chip_id = bp->common.chip_id; ··· 8103 struct bnx2x *bp = netdev_priv(dev); 8104 int rc; 8105 8106 + if (!netif_running(dev)) 8107 + return -EAGAIN; 8108 + 8109 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 8110 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 8111 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, ··· 8705 8706 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8707 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8708 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8709 8710 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8711 + u16 cnt = 1000; 8712 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8713 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8714 /* wait until link state is restored */ 8715 + if (link_up) 8716 + while (cnt-- && bnx2x_test_link(&bp->link_params, 8717 + &bp->link_vars)) 8718 + msleep(10); 8719 } else 8720 return -EINVAL; 8721 ··· 8822 return BNX2X_LOOPBACK_FAILED; 8823 8824 bnx2x_netif_stop(bp, 1); 8825 + bnx2x_acquire_phy_lock(bp); 8826 8827 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8828 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); ··· 8833 rc |= BNX2X_PHY_LOOPBACK_FAILED; 8834 } 8835 8836 + bnx2x_release_phy_lock(bp); 8837 bnx2x_netif_start(bp); 8838 8839 return rc; ··· 8906 return -ENODEV; 8907 8908 config->hdr.length_6b = 0; 8909 + if (CHIP_IS_E1(bp)) 8910 + config->hdr.offset = (BP_PORT(bp) ? 32 : 0); 8911 + else 8912 + config->hdr.offset = BP_FUNC(bp); 8913 config->hdr.client_id = BP_CL_ID(bp); 8914 config->hdr.reserved1 = 0; 8915 ··· 9271 return 0; 9272 } 9273 9274 + static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 9275 + { 9276 + u16 rx_cons_sb; 9277 + 9278 + /* Tell compiler that status block fields can change */ 9279 + barrier(); 9280 + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 9281 + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 9282 + rx_cons_sb++; 9283 + return (fp->rx_comp_cons != rx_cons_sb); 9284 + } 9285 + 9286 /* 9287 * net_device service functions 9288 */ ··· 9281 napi); 9282 struct bnx2x *bp = fp->bp; 9283 int work_done = 0; 9284 9285 #ifdef BNX2X_STOP_ON_ERROR 9286 if (unlikely(bp->panic)) ··· 9294 9295 bnx2x_update_fpsb_idx(fp); 9296 9297 + if (bnx2x_has_tx_work(fp)) 9298 bnx2x_tx_int(fp, budget); 9299 9300 + if (bnx2x_has_rx_work(fp)) 9301 work_done = bnx2x_rx_int(fp, budget); 9302 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9303 9304 /* must not complete if we consumed full budget */ 9305 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { ··· 9417 return rc; 9418 } 9419 9420 + #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 9421 /* check if packet requires linearization (packet is too fragmented) */ 9422 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 9423 u32 xmit_type) ··· 9494 9495 return to_copy; 9496 } 9497 + #endif 9498 9499 /* called with netif_tx_lock 9500 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call ··· 9534 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9535 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9536 9537 + #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 9538 /* First, check if we need to linearize the skb 9539 (due to FW restrictions) */ 9540 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { ··· 9546 return NETDEV_TX_OK; 9547 } 9548 } 9549 + #endif 9550 9551 /* 9552 Please read carefully. First we use one BD which we mark as start, ··· 9776 { 9777 struct bnx2x *bp = netdev_priv(dev); 9778 9779 + netif_carrier_off(dev); 9780 + 9781 bnx2x_set_power_state(bp, PCI_D0); 9782 9783 return bnx2x_nic_load(bp, LOAD_OPEN); ··· 9859 for (; i < old; i++) { 9860 if (CAM_IS_INVALID(config-> 9861 config_table[i])) { 9862 + /* already invalidated */ 9863 break; 9864 } 9865 /* invalidate */ ··· 10269 return rc; 10270 } 10271 10272 + pci_set_drvdata(pdev, dev); 10273 + 10274 + rc = bnx2x_init_bp(bp); 10275 + if (rc) 10276 + goto init_one_exit; 10277 + 10278 rc = register_netdev(dev); 10279 if (rc) { 10280 dev_err(&pdev->dev, "Cannot register net device\n"); 10281 goto init_one_exit; 10282 } 10283 10284 bp->common.name = board_info[ent->driver_data].name; 10285 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," ··· 10432 bnx2x_free_skbs(bp); 10433 for_each_queue(bp, i) 10434 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 10435 + for_each_queue(bp, i) 10436 + netif_napi_del(&bnx2x_fp(bp, i, napi)); 10437 bnx2x_free_mem(bp); 10438 10439 bp->state = BNX2X_STATE_CLOSED;
+1 -1
drivers/net/bnx2x_reg.h
··· 1 /* bnx2x_reg.h: Broadcom Everest network driver. 2 * 3 - * Copyright (c) 2007-2008 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by
··· 1 /* bnx2x_reg.h: Broadcom Everest network driver. 2 * 3 + * Copyright (c) 2007-2009 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by
+1
drivers/net/cxgb3/sge.c
··· 2104 { 2105 lro_mgr->dev = qs->netdev; 2106 lro_mgr->features = LRO_F_NAPI; 2107 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; 2108 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2109 lro_mgr->max_desc = T3_MAX_LRO_SES;
··· 2104 { 2105 lro_mgr->dev = qs->netdev; 2106 lro_mgr->features = LRO_F_NAPI; 2107 + lro_mgr->frag_align_pad = NET_IP_ALIGN; 2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; 2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
+5 -1
drivers/net/e1000e/82571.c
··· 981 ew32(PBA_ECC, reg); 982 } 983 984 - /* PCI-Ex Control Register */ 985 if (hw->mac.type == e1000_82574) { 986 reg = er32(GCR); 987 reg |= (1 << 22); 988 ew32(GCR, reg); 989 } 990 991 return;
··· 981 ew32(PBA_ECC, reg); 982 } 983 984 + /* PCI-Ex Control Registers */ 985 if (hw->mac.type == e1000_82574) { 986 reg = er32(GCR); 987 reg |= (1 << 22); 988 ew32(GCR, reg); 989 + 990 + reg = er32(GCR2); 991 + reg |= 1; 992 + ew32(GCR2, reg); 993 } 994 995 return;
+1
drivers/net/e1000e/hw.h
··· 206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 208 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 209 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 210 E1000_SWSM = 0x05B50, /* SW Semaphore */ 211 E1000_FWSM = 0x05B54, /* FW Semaphore */
··· 206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 208 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 209 + E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ 210 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 211 E1000_SWSM = 0x05B50, /* SW Semaphore */ 212 E1000_FWSM = 0x05B54, /* FW Semaphore */
+1 -5
drivers/net/gianfar.c
··· 1423 { 1424 struct gfar_private *priv = netdev_priv(dev); 1425 unsigned long flags; 1426 - struct vlan_group *old_grp; 1427 u32 tempval; 1428 1429 spin_lock_irqsave(&priv->rxlock, flags); 1430 1431 - old_grp = priv->vlgrp; 1432 - 1433 - if (old_grp == grp) 1434 - return; 1435 1436 if (grp) { 1437 /* Enable VLAN tag insertion */
··· 1423 { 1424 struct gfar_private *priv = netdev_priv(dev); 1425 unsigned long flags; 1426 u32 tempval; 1427 1428 spin_lock_irqsave(&priv->rxlock, flags); 1429 1430 + priv->vlgrp = grp; 1431 1432 if (grp) { 1433 /* Enable VLAN tag insertion */
+39 -35
drivers/net/ixgbe/ixgbe_main.c
··· 318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 322 rx_ring->cpu = cpu; 323 } ··· 1744 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1745 } 1746 1747 - static void ixgbe_vlan_rx_register(struct net_device *netdev, 1748 - struct vlan_group *grp) 1749 - { 1750 - struct ixgbe_adapter *adapter = netdev_priv(netdev); 1751 - u32 ctrl; 1752 - 1753 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1754 - ixgbe_irq_disable(adapter); 1755 - adapter->vlgrp = grp; 1756 - 1757 - /* 1758 - * For a DCB driver, always enable VLAN tag stripping so we can 1759 - * still receive traffic from a DCB-enabled host even if we're 1760 - * not in DCB mode. 1761 - */ 1762 - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1763 - ctrl |= IXGBE_VLNCTRL_VME; 1764 - ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1765 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1766 - 1767 - if (grp) { 1768 - /* enable VLAN tag insert/strip */ 1769 - ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1770 - ctrl |= IXGBE_VLNCTRL_VME; 1771 - ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1772 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1773 - } 1774 - 1775 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1776 - ixgbe_irq_enable(adapter); 1777 - } 1778 - 1779 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1780 { 1781 struct ixgbe_adapter *adapter = netdev_priv(netdev); ··· 1768 1769 /* remove VID from filter table */ 1770 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 1771 } 1772 1773 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) ··· 2077 IXGBE_READ_REG(hw, IXGBE_EICR); 2078 2079 ixgbe_irq_enable(adapter); 2080 2081 /* bring the link up in the watchdog, this could race with our first 2082 * link up interrupt but shouldn't be a problem */ ··· 3482 (FLOW_TX ? "TX" : "None")))); 3483 3484 netif_carrier_on(netdev); 3485 - netif_tx_wake_all_queues(netdev); 3486 } else { 3487 /* Force detection of hung controller */ 3488 adapter->detect_tx_hung = true; ··· 3493 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 3494 netdev->name); 3495 netif_carrier_off(netdev); 3496 - netif_tx_stop_all_queues(netdev); 3497 } 3498 } 3499 ··· 4223 } 4224 4225 netif_carrier_off(netdev); 4226 - netif_tx_stop_all_queues(netdev); 4227 4228 strcpy(netdev->name, "eth%d"); 4229 err = register_netdev(netdev);
··· 318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 321 + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 322 + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 323 + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 324 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 325 rx_ring->cpu = cpu; 326 } ··· 1741 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1742 } 1743 1744 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1745 { 1746 struct ixgbe_adapter *adapter = netdev_priv(netdev); ··· 1797 1798 /* remove VID from filter table */ 1799 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 1800 + } 1801 + 1802 + static void ixgbe_vlan_rx_register(struct net_device *netdev, 1803 + struct vlan_group *grp) 1804 + { 1805 + struct ixgbe_adapter *adapter = netdev_priv(netdev); 1806 + u32 ctrl; 1807 + 1808 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1809 + ixgbe_irq_disable(adapter); 1810 + adapter->vlgrp = grp; 1811 + 1812 + /* 1813 + * For a DCB driver, always enable VLAN tag stripping so we can 1814 + * still receive traffic from a DCB-enabled host even if we're 1815 + * not in DCB mode. 1816 + */ 1817 + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1818 + ctrl |= IXGBE_VLNCTRL_VME; 1819 + ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1820 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1821 + ixgbe_vlan_rx_add_vid(netdev, 0); 1822 + 1823 + if (grp) { 1824 + /* enable VLAN tag insert/strip */ 1825 + ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1826 + ctrl |= IXGBE_VLNCTRL_VME; 1827 + ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1828 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1829 + } 1830 + 1831 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1832 + ixgbe_irq_enable(adapter); 1833 } 1834 1835 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) ··· 2073 IXGBE_READ_REG(hw, IXGBE_EICR); 2074 2075 ixgbe_irq_enable(adapter); 2076 + 2077 + /* enable transmits */ 2078 + netif_tx_start_all_queues(netdev); 2079 2080 /* bring the link up in the watchdog, this could race with our first 2081 * link up interrupt but shouldn't be a problem */ ··· 3475 (FLOW_TX ? "TX" : "None")))); 3476 3477 netif_carrier_on(netdev); 3478 } else { 3479 /* Force detection of hung controller */ 3480 adapter->detect_tx_hung = true; ··· 3487 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 3488 netdev->name); 3489 netif_carrier_off(netdev); 3490 } 3491 } 3492 ··· 4218 } 4219 4220 netif_carrier_off(netdev); 4221 4222 strcpy(netdev->name, "eth%d"); 4223 err = register_netdev(netdev);
+3
drivers/net/ixgbe/ixgbe_type.h
··· 404 #define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 405 #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 406 #define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 407 408 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 409 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
··· 404 #define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 405 #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 406 #define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 407 + #define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ 408 + #define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ 409 + #define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ 410 411 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 412 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+9 -6
drivers/net/korina.c
··· 416 if (devcs & ETH_RX_MP) 417 dev->stats.multicast++; 418 419 lp->rx_skb[lp->rx_next_done] = skb_new; 420 } 421 ··· 743 static void korina_alloc_ring(struct net_device *dev) 744 { 745 struct korina_private *lp = netdev_priv(dev); 746 int i; 747 748 /* Initialize the transmit descriptors */ ··· 759 760 /* Initialize the receive descriptors */ 761 for (i = 0; i < KORINA_NUM_RDS; i++) { 762 - struct sk_buff *skb = lp->rx_skb[i]; 763 - 764 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 765 if (!skb) 766 break; ··· 771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 772 } 773 774 - /* loop back */ 775 - lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]); 776 - lp->rx_next_done = 0; 777 778 - lp->rd_ring[i].control |= DMA_DESC_COD; 779 lp->rx_chain_head = 0; 780 lp->rx_chain_tail = 0; 781 lp->rx_chain_status = desc_empty;
··· 416 if (devcs & ETH_RX_MP) 417 dev->stats.multicast++; 418 419 + /* 16 bit align */ 420 + skb_reserve(skb_new, 2); 421 + 422 lp->rx_skb[lp->rx_next_done] = skb_new; 423 } 424 ··· 740 static void korina_alloc_ring(struct net_device *dev) 741 { 742 struct korina_private *lp = netdev_priv(dev); 743 + struct sk_buff *skb; 744 int i; 745 746 /* Initialize the transmit descriptors */ ··· 755 756 /* Initialize the receive descriptors */ 757 for (i = 0; i < KORINA_NUM_RDS; i++) { 758 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 759 if (!skb) 760 break; ··· 769 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 770 } 771 772 + /* loop back receive descriptors, so the last 773 + * descriptor points to the first one */ 774 + lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]); 775 + lp->rd_ring[i - 1].control |= DMA_DESC_COD; 776 777 + lp->rx_next_done = 0; 778 lp->rx_chain_head = 0; 779 lp->rx_chain_tail = 0; 780 lp->rx_chain_status = desc_empty;
+8
drivers/net/macb.c
··· 321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n", 322 bp->dev->name); 323 324 head = bp->tx_head; 325 326 /*Mark all the buffer as used to avoid sending a lost buffer*/ ··· 347 } 348 349 bp->tx_head = bp->tx_tail = 0; 350 } 351 352 if (!(status & MACB_BIT(COMP)))
··· 321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n", 322 bp->dev->name); 323 324 + /* Transfer ongoing, disable transmitter, to avoid confusion */ 325 + if (status & MACB_BIT(TGO)) 326 + macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); 327 + 328 head = bp->tx_head; 329 330 /*Mark all the buffer as used to avoid sending a lost buffer*/ ··· 343 } 344 345 bp->tx_head = bp->tx_tail = 0; 346 + 347 + /* Enable the transmitter again */ 348 + if (status & MACB_BIT(TGO)) 349 + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); 350 } 351 352 if (!(status & MACB_BIT(COMP)))
+10 -7
drivers/net/mv643xx_eth.c
··· 136 /* 137 * SDMA configuration register. 138 */ 139 #define RX_BURST_SIZE_16_64BIT (4 << 1) 140 #define BLM_RX_NO_SWAP (1 << 4) 141 #define BLM_TX_NO_SWAP (1 << 5) 142 #define TX_BURST_SIZE_16_64BIT (4 << 22) 143 144 #if defined(__BIG_ENDIAN) 145 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 146 - (RX_BURST_SIZE_16_64BIT | \ 147 - TX_BURST_SIZE_16_64BIT) 148 #elif defined(__LITTLE_ENDIAN) 149 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 150 - (RX_BURST_SIZE_16_64BIT | \ 151 - BLM_RX_NO_SWAP | \ 152 - BLM_TX_NO_SWAP | \ 153 - TX_BURST_SIZE_16_64BIT) 154 #else 155 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 156 #endif ··· 1596 entry = addr_crc(a); 1597 } 1598 1599 - table[entry >> 2] |= 1 << (entry & 3); 1600 } 1601 1602 for (i = 0; i < 0x100; i += 4) { ··· 2212 struct mv643xx_eth_private *mp = netdev_priv(dev); 2213 int i; 2214 2215 wrlp(mp, INT_MASK, 0x00000000); 2216 rdlp(mp, INT_MASK); 2217
··· 136 /* 137 * SDMA configuration register. 138 */ 139 + #define RX_BURST_SIZE_4_64BIT (2 << 1) 140 #define RX_BURST_SIZE_16_64BIT (4 << 1) 141 #define BLM_RX_NO_SWAP (1 << 4) 142 #define BLM_TX_NO_SWAP (1 << 5) 143 + #define TX_BURST_SIZE_4_64BIT (2 << 22) 144 #define TX_BURST_SIZE_16_64BIT (4 << 22) 145 146 #if defined(__BIG_ENDIAN) 147 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 148 + (RX_BURST_SIZE_4_64BIT | \ 149 + TX_BURST_SIZE_4_64BIT) 150 #elif defined(__LITTLE_ENDIAN) 151 #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 152 + (RX_BURST_SIZE_4_64BIT | \ 153 + BLM_RX_NO_SWAP | \ 154 + BLM_TX_NO_SWAP | \ 155 + TX_BURST_SIZE_4_64BIT) 156 #else 157 #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 158 #endif ··· 1594 entry = addr_crc(a); 1595 } 1596 1597 + table[entry >> 2] |= 1 << (8 * (entry & 3)); 1598 } 1599 1600 for (i = 0; i < 0x100; i += 4) { ··· 2210 struct mv643xx_eth_private *mp = netdev_priv(dev); 2211 int i; 2212 2213 + wrlp(mp, INT_MASK_EXT, 0x00000000); 2214 wrlp(mp, INT_MASK, 0x00000000); 2215 rdlp(mp, INT_MASK); 2216
+9 -6
drivers/net/myri10ge/myri10ge.c
··· 1 /************************************************************************* 2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 3 * 4 - * Copyright (C) 2005 - 2007 Myricom, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without ··· 75 #include "myri10ge_mcp.h" 76 #include "myri10ge_mcp_gen_header.h" 77 78 - #define MYRI10GE_VERSION_STR "1.4.4-1.398" 79 80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 81 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 3786 if (status != 0) { 3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 3788 status); 3789 - goto abort_with_netdev; 3790 } 3791 3792 pci_set_master(pdev); ··· 3801 } 3802 if (status != 0) { 3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3804 - goto abort_with_netdev; 3805 } 3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3808 &mgp->cmd_bus, GFP_KERNEL); 3809 if (mgp->cmd == NULL) 3810 - goto abort_with_netdev; 3811 3812 mgp->board_span = pci_resource_len(pdev, 0); 3813 mgp->iomem_base = pci_resource_start(pdev, 0); ··· 3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3944 mgp->cmd, mgp->cmd_bus); 3945 3946 - abort_with_netdev: 3947 3948 free_netdev(netdev); 3949 return status; 3950 } ··· 3992 mgp->cmd, mgp->cmd_bus); 3993 3994 free_netdev(netdev); 3995 pci_set_drvdata(pdev, NULL); 3996 } 3997
··· 1 /************************************************************************* 2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 3 * 4 + * Copyright (C) 2005 - 2009 Myricom, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without ··· 75 #include "myri10ge_mcp.h" 76 #include "myri10ge_mcp_gen_header.h" 77 78 + #define MYRI10GE_VERSION_STR "1.4.4-1.401" 79 80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 81 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 3786 if (status != 0) { 3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 3788 status); 3789 + goto abort_with_enabled; 3790 } 3791 3792 pci_set_master(pdev); ··· 3801 } 3802 if (status != 0) { 3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3804 + goto abort_with_enabled; 3805 } 3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3808 &mgp->cmd_bus, GFP_KERNEL); 3809 if (mgp->cmd == NULL) 3810 + goto abort_with_enabled; 3811 3812 mgp->board_span = pci_resource_len(pdev, 0); 3813 mgp->iomem_base = pci_resource_start(pdev, 0); ··· 3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3944 mgp->cmd, mgp->cmd_bus); 3945 3946 + abort_with_enabled: 3947 + pci_disable_device(pdev); 3948 3949 + abort_with_netdev: 3950 free_netdev(netdev); 3951 return status; 3952 } ··· 3990 mgp->cmd, mgp->cmd_bus); 3991 3992 free_netdev(netdev); 3993 + pci_disable_device(pdev); 3994 pci_set_drvdata(pdev, NULL); 3995 } 3996
+6 -6
drivers/net/netxen/netxen_nic.h
··· 146 147 #define MAX_RX_BUFFER_LENGTH 1760 148 #define MAX_RX_JUMBO_BUFFER_LENGTH 8062 149 - #define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) 150 #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) 151 #define RX_JUMBO_DMA_MAP_LEN \ 152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2) ··· 207 208 #define MAX_CMD_DESCRIPTORS 4096 209 #define MAX_RCV_DESCRIPTORS 16384 210 - #define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) 211 - #define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) 212 - #define MAX_RCV_DESCRIPTORS_10G 8192 213 - #define MAX_JUMBO_RCV_DESCRIPTORS 1024 214 - #define MAX_LRO_RCV_DESCRIPTORS 64 215 #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 216 #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 217 #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
··· 146 147 #define MAX_RX_BUFFER_LENGTH 1760 148 #define MAX_RX_JUMBO_BUFFER_LENGTH 8062 149 + #define MAX_RX_LRO_BUFFER_LENGTH (8062) 150 #define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) 151 #define RX_JUMBO_DMA_MAP_LEN \ 152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2) ··· 207 208 #define MAX_CMD_DESCRIPTORS 4096 209 #define MAX_RCV_DESCRIPTORS 16384 210 + #define MAX_CMD_DESCRIPTORS_HOST 1024 211 + #define MAX_RCV_DESCRIPTORS_1G 2048 212 + #define MAX_RCV_DESCRIPTORS_10G 4096 213 + #define MAX_JUMBO_RCV_DESCRIPTORS 512 214 + #define MAX_LRO_RCV_DESCRIPTORS 8 215 #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 216 #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 217 #define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
+4 -1
drivers/net/netxen/netxen_nic_ethtool.c
··· 561 } 562 ring->tx_pending = adapter->max_tx_desc_count; 563 564 - ring->rx_max_pending = MAX_RCV_DESCRIPTORS; 565 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; 566 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; 567 ring->rx_mini_max_pending = 0;
··· 561 } 562 ring->tx_pending = adapter->max_tx_desc_count; 563 564 + if (adapter->ahw.board_type == NETXEN_NIC_GBE) 565 + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G; 566 + else 567 + ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G; 568 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; 569 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; 570 ring->rx_mini_max_pending = 0;
+20 -11
drivers/net/netxen/netxen_nic_main.c
··· 735 736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 737 738 - /* ScatterGather support */ 739 - netdev->features = NETIF_F_SG; 740 - netdev->features |= NETIF_F_IP_CSUM; 741 - netdev->features |= NETIF_F_TSO; 742 if (NX_IS_REVISION_P3(revision_id)) { 743 - netdev->features |= NETIF_F_IPV6_CSUM; 744 - netdev->features |= NETIF_F_TSO6; 745 } 746 747 - if (adapter->pci_using_dac) 748 netdev->features |= NETIF_F_HIGHDMA; 749 750 /* 751 * Set the CRB window to invalid. If any register in window 0 is ··· 1167 { 1168 bool tso = false; 1169 u8 opcode = TX_ETHER_PKT; 1170 1171 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1172 skb_shinfo(skb)->gso_size > 0) { ··· 1183 desc->total_hdr_length = 1184 skb_transport_offset(skb) + tcp_hdrlen(skb); 1185 1186 - opcode = (skb->protocol == htons(ETH_P_IPV6)) ? 1187 TX_TCP_LSO6 : TX_TCP_LSO; 1188 tso = true; 1189 1190 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1191 u8 l4proto; 1192 1193 - if (skb->protocol == htons(ETH_P_IP)) { 1194 l4proto = ip_hdr(skb)->protocol; 1195 1196 if (l4proto == IPPROTO_TCP) 1197 opcode = TX_TCP_PKT; 1198 else if(l4proto == IPPROTO_UDP) 1199 opcode = TX_UDP_PKT; 1200 - } else if (skb->protocol == htons(ETH_P_IPV6)) { 1201 l4proto = ipv6_hdr(skb)->nexthdr; 1202 1203 if (l4proto == IPPROTO_TCP) ··· 1208 } 1209 desc->tcp_hdr_offset = skb_transport_offset(skb); 1210 desc->ip_hdr_offset = skb_network_offset(skb); 1211 - netxen_set_tx_flags_opcode(desc, 0, opcode); 1212 return tso; 1213 } 1214
··· 735 736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 737 738 + netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 739 + netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 740 + 741 if (NX_IS_REVISION_P3(revision_id)) { 742 + netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 743 + netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 744 } 745 746 + if (adapter->pci_using_dac) { 747 netdev->features |= NETIF_F_HIGHDMA; 748 + netdev->vlan_features |= NETIF_F_HIGHDMA; 749 + } 750 751 /* 752 * Set the CRB window to invalid. If any register in window 0 is ··· 1166 { 1167 bool tso = false; 1168 u8 opcode = TX_ETHER_PKT; 1169 + __be16 protocol = skb->protocol; 1170 + u16 flags = 0; 1171 + 1172 + if (protocol == __constant_htons(ETH_P_8021Q)) { 1173 + struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data; 1174 + protocol = vh->h_vlan_encapsulated_proto; 1175 + flags = FLAGS_VLAN_TAGGED; 1176 + } 1177 1178 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1179 skb_shinfo(skb)->gso_size > 0) { ··· 1174 desc->total_hdr_length = 1175 skb_transport_offset(skb) + tcp_hdrlen(skb); 1176 1177 + opcode = (protocol == __constant_htons(ETH_P_IPV6)) ? 1178 TX_TCP_LSO6 : TX_TCP_LSO; 1179 tso = true; 1180 1181 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1182 u8 l4proto; 1183 1184 + if (protocol == __constant_htons(ETH_P_IP)) { 1185 l4proto = ip_hdr(skb)->protocol; 1186 1187 if (l4proto == IPPROTO_TCP) 1188 opcode = TX_TCP_PKT; 1189 else if(l4proto == IPPROTO_UDP) 1190 opcode = TX_UDP_PKT; 1191 + } else if (protocol == __constant_htons(ETH_P_IPV6)) { 1192 l4proto = ipv6_hdr(skb)->nexthdr; 1193 1194 if (l4proto == IPPROTO_TCP) ··· 1199 } 1200 desc->tcp_hdr_offset = skb_transport_offset(skb); 1201 desc->ip_hdr_offset = skb_network_offset(skb); 1202 + netxen_set_tx_flags_opcode(desc, flags, opcode); 1203 return tso; 1204 } 1205
+3 -5
drivers/net/phy/mdio_bus.c
··· 296 struct phy_driver *phydrv = to_phy_driver(drv); 297 struct phy_device *phydev = to_phy_device(dev); 298 299 - if ((!device_may_wakeup(phydev->dev.parent)) && 300 - (phydrv && phydrv->suspend)) 301 - ret = phydrv->suspend(phydev); 302 303 return ret; 304 } ··· 309 struct phy_driver *phydrv = to_phy_driver(drv); 310 struct phy_device *phydev = to_phy_device(dev); 311 312 - if ((!device_may_wakeup(phydev->dev.parent)) && 313 - (phydrv && phydrv->resume)) 314 ret = phydrv->resume(phydev); 315 316 return ret;
··· 296 struct phy_driver *phydrv = to_phy_driver(drv); 297 struct phy_device *phydev = to_phy_device(dev); 298 299 + if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent)) 300 + ret = phydrv->suspend(phydev); 301 302 return ret; 303 } ··· 310 struct phy_driver *phydrv = to_phy_driver(drv); 311 struct phy_device *phydev = to_phy_device(dev); 312 313 + if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent)) 314 ret = phydrv->resume(phydev); 315 316 return ret;
+12
drivers/net/phy/smsc.c
··· 81 .ack_interrupt = smsc_phy_ack_interrupt, 82 .config_intr = smsc_phy_config_intr, 83 84 .driver = { .owner = THIS_MODULE, } 85 }; 86 ··· 104 /* IRQ related */ 105 .ack_interrupt = smsc_phy_ack_interrupt, 106 .config_intr = smsc_phy_config_intr, 107 108 .driver = { .owner = THIS_MODULE, } 109 }; ··· 129 .ack_interrupt = smsc_phy_ack_interrupt, 130 .config_intr = smsc_phy_config_intr, 131 132 .driver = { .owner = THIS_MODULE, } 133 }; 134 ··· 152 /* IRQ related */ 153 .ack_interrupt = smsc_phy_ack_interrupt, 154 .config_intr = smsc_phy_config_intr, 155 156 .driver = { .owner = THIS_MODULE, } 157 };
··· 81 .ack_interrupt = smsc_phy_ack_interrupt, 82 .config_intr = smsc_phy_config_intr, 83 84 + .suspend = genphy_suspend, 85 + .resume = genphy_resume, 86 + 87 .driver = { .owner = THIS_MODULE, } 88 }; 89 ··· 101 /* IRQ related */ 102 .ack_interrupt = smsc_phy_ack_interrupt, 103 .config_intr = smsc_phy_config_intr, 104 + 105 + .suspend = genphy_suspend, 106 + .resume = genphy_resume, 107 108 .driver = { .owner = THIS_MODULE, } 109 }; ··· 123 .ack_interrupt = smsc_phy_ack_interrupt, 124 .config_intr = smsc_phy_config_intr, 125 126 + .suspend = genphy_suspend, 127 + .resume = genphy_resume, 128 + 129 .driver = { .owner = THIS_MODULE, } 130 }; 131 ··· 143 /* IRQ related */ 144 .ack_interrupt = smsc_phy_ack_interrupt, 145 .config_intr = smsc_phy_config_intr, 146 + 147 + .suspend = genphy_suspend, 148 + .resume = genphy_resume, 149 150 .driver = { .owner = THIS_MODULE, } 151 };
+50 -31
drivers/net/tg3.c
··· 7535 return err; 7536 } 7537 7538 static int tg3_open(struct net_device *dev) 7539 { 7540 struct tg3 *tp = netdev_priv(dev); 7541 int err; 7542 7543 netif_carrier_off(tp->dev); 7544 ··· 12981 struct net_device *dev; 12982 struct tg3 *tp; 12983 int err, pm_cap; 12984 - const char *fw_name = NULL; 12985 char str[40]; 12986 u64 dma_mask, persist_dma_mask; 12987 ··· 13137 tg3_init_bufmgr_config(tp); 13138 13139 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 13140 - fw_name = FIRMWARE_TG3; 13141 13142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 13143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; ··· 13150 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 13151 } else { 13152 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; 13153 - } 13154 - if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) { 13155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13156 - fw_name = FIRMWARE_TG3TSO5; 13157 else 13158 - fw_name = FIRMWARE_TG3TSO; 13159 - } 13160 - 13161 - if (fw_name) { 13162 - const __be32 *fw_data; 13163 - 13164 - err = request_firmware(&tp->fw, fw_name, &tp->pdev->dev); 13165 - if (err) { 13166 - printk(KERN_ERR "tg3: Failed to load firmware \"%s\"\n", 13167 - fw_name); 13168 - goto err_out_iounmap; 13169 - } 13170 - 13171 - fw_data = (void *)tp->fw->data; 13172 - 13173 - /* Firmware blob starts with version numbers, followed by 13174 - start address and _full_ length including BSS sections 13175 - (which must be longer than the actual data, of course */ 13176 - 13177 - tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 13178 - if (tp->fw_len < (tp->fw->size - 12)) { 13179 - printk(KERN_ERR "tg3: bogus length %d in \"%s\"\n", 13180 - tp->fw_len, fw_name); 13181 - err = -EINVAL; 13182 - goto err_out_fw; 13183 - } 13184 } 13185 13186 /* TSO is on by default on chips that support hardware TSO.
··· 7535 return err; 7536 } 7537 7538 + static int tg3_request_firmware(struct tg3 *tp) 7539 + { 7540 + const __be32 *fw_data; 7541 + 7542 + if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 7543 + printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", 7544 + tp->dev->name, tp->fw_needed); 7545 + return -ENOENT; 7546 + } 7547 + 7548 + fw_data = (void *)tp->fw->data; 7549 + 7550 + /* Firmware blob starts with version numbers, followed by 7551 + * start address and _full_ length including BSS sections 7552 + * (which must be longer than the actual data, of course 7553 + */ 7554 + 7555 + tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 7556 + if (tp->fw_len < (tp->fw->size - 12)) { 7557 + printk(KERN_ERR "%s: bogus length %d in \"%s\"\n", 7558 + tp->dev->name, tp->fw_len, tp->fw_needed); 7559 + release_firmware(tp->fw); 7560 + tp->fw = NULL; 7561 + return -EINVAL; 7562 + } 7563 + 7564 + /* We no longer need firmware; we have it. */ 7565 + tp->fw_needed = NULL; 7566 + return 0; 7567 + } 7568 + 7569 static int tg3_open(struct net_device *dev) 7570 { 7571 struct tg3 *tp = netdev_priv(dev); 7572 int err; 7573 + 7574 + if (tp->fw_needed) { 7575 + err = tg3_request_firmware(tp); 7576 + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) { 7577 + if (err) 7578 + return err; 7579 + } else if (err) { 7580 + printk(KERN_WARNING "%s: TSO capability disabled.\n", 7581 + tp->dev->name); 7582 + tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 7583 + } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) { 7584 + printk(KERN_NOTICE "%s: TSO capability restored.\n", 7585 + tp->dev->name); 7586 + tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 7587 + } 7588 + } 7589 7590 netif_carrier_off(tp->dev); 7591 ··· 12934 struct net_device *dev; 12935 struct tg3 *tp; 12936 int err, pm_cap; 12937 char str[40]; 12938 u64 dma_mask, persist_dma_mask; 12939 ··· 13091 tg3_init_bufmgr_config(tp); 13092 13093 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 13094 + tp->fw_needed = FIRMWARE_TG3; 13095 13096 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 13097 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; ··· 13104 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 13105 } else { 13106 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; 13107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13108 + tp->fw_needed = FIRMWARE_TG3TSO5; 13109 else 13110 + tp->fw_needed = FIRMWARE_TG3TSO; 13111 } 13112 13113 /* TSO is on by default on chips that support hardware TSO.
+1
drivers/net/tg3.h
··· 2764 struct ethtool_coalesce coal; 2765 2766 /* firmware info */ 2767 const struct firmware *fw; 2768 u32 fw_len; /* includes BSS */ 2769 };
··· 2764 struct ethtool_coalesce coal; 2765 2766 /* firmware info */ 2767 + const char *fw_needed; 2768 const struct firmware *fw; 2769 u32 fw_len; /* includes BSS */ 2770 };
+18 -2
drivers/net/usb/mcs7830.c
··· 94 { 95 struct usb_device *xdev = dev->udev; 96 int ret; 97 98 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, 99 - MCS7830_RD_BMREQ, 0x0000, index, data, 100 size, MCS7830_CTRL_TIMEOUT); 101 return ret; 102 } 103 ··· 113 { 114 struct usb_device *xdev = dev->udev; 115 int ret; 116 117 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, 118 - MCS7830_WR_BMREQ, 0x0000, index, data, 119 size, MCS7830_CTRL_TIMEOUT); 120 return ret; 121 } 122
··· 94 { 95 struct usb_device *xdev = dev->udev; 96 int ret; 97 + void *buffer; 98 + 99 + buffer = kmalloc(size, GFP_NOIO); 100 + if (buffer == NULL) 101 + return -ENOMEM; 102 103 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, 104 + MCS7830_RD_BMREQ, 0x0000, index, buffer, 105 size, MCS7830_CTRL_TIMEOUT); 106 + memcpy(data, buffer, size); 107 + kfree(buffer); 108 + 109 return ret; 110 } 111 ··· 105 { 106 struct usb_device *xdev = dev->udev; 107 int ret; 108 + void *buffer; 109 + 110 + buffer = kmalloc(size, GFP_NOIO); 111 + if (buffer == NULL) 112 + return -ENOMEM; 113 + 114 + memcpy(buffer, data, size); 115 116 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, 117 + MCS7830_WR_BMREQ, 0x0000, index, buffer, 118 size, MCS7830_CTRL_TIMEOUT); 119 + kfree(buffer); 120 return ret; 121 } 122
+1 -1
drivers/net/via-velocity.c
··· 1302 static int velocity_init_td_ring(struct velocity_info *vptr) 1303 { 1304 dma_addr_t curr; 1305 - unsigned int j; 1306 1307 /* Init the TD ring entries */ 1308 for (j = 0; j < vptr->tx.numq; j++) {
··· 1302 static int velocity_init_td_ring(struct velocity_info *vptr) 1303 { 1304 dma_addr_t curr; 1305 + int j; 1306 1307 /* Init the TD ring entries */ 1308 for (j = 0; j < vptr->tx.numq; j++) {
+2 -1
drivers/net/virtio_net.c
··· 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 28 static int napi_weight = 128; 29 module_param(napi_weight, int, 0444); ··· 34 module_param(gso, bool, 0444); 35 36 /* FIXME: MTU in config. */ 37 - #define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 38 #define GOOD_COPY_LEN 128 39 40 struct virtnet_info
··· 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 + #include <linux/if_vlan.h> 28 29 static int napi_weight = 128; 30 module_param(napi_weight, int, 0444); ··· 33 module_param(gso, bool, 0444); 34 35 /* FIXME: MTU in config. */ 36 + #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 37 #define GOOD_COPY_LEN 128 38 39 struct virtnet_info
+1 -1
drivers/net/wimax/i2400m/control.c
··· 609 spin_lock_irqsave(&i2400m->rx_lock, flags); 610 ack_skb = i2400m->ack_skb; 611 if (ack_skb && !IS_ERR(ack_skb)) 612 - kfree(ack_skb); 613 i2400m->ack_skb = ERR_PTR(code); 614 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 615 }
··· 609 spin_lock_irqsave(&i2400m->rx_lock, flags); 610 ack_skb = i2400m->ack_skb; 611 if (ack_skb && !IS_ERR(ack_skb)) 612 + kfree_skb(ack_skb); 613 i2400m->ack_skb = ERR_PTR(code); 614 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 615 }
+6 -3
drivers/net/wimax/i2400m/usb-rx.c
··· 184 * NOTE: this function might realloc the skb (if it is too small), 185 * so always update with the one returned. 186 * ERR_PTR() is < 0 on error. 187 */ 188 static 189 struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) ··· 245 if (printk_ratelimit()) 246 dev_err(dev, "RX: Can't reallocate skb to %d; " 247 "RX dropped\n", rx_size); 248 - kfree(rx_skb); 249 - result = 0; 250 goto out; /* drop it...*/ 251 } 252 kfree_skb(rx_skb); ··· 346 if (IS_ERR(rx_skb)) 347 goto out; 348 atomic_dec(&i2400mu->rx_pending_count); 349 - if (rx_skb->len == 0) { /* some ignorable condition */ 350 kfree_skb(rx_skb); 351 continue; 352 }
··· 184 * NOTE: this function might realloc the skb (if it is too small), 185 * so always update with the one returned. 186 * ERR_PTR() is < 0 on error. 187 + * Will return NULL if it cannot reallocate -- this can be 188 + * considered a transient retryable error. 189 */ 190 static 191 struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) ··· 243 if (printk_ratelimit()) 244 dev_err(dev, "RX: Can't reallocate skb to %d; " 245 "RX dropped\n", rx_size); 246 + kfree_skb(rx_skb); 247 + rx_skb = NULL; 248 goto out; /* drop it...*/ 249 } 250 kfree_skb(rx_skb); ··· 344 if (IS_ERR(rx_skb)) 345 goto out; 346 atomic_dec(&i2400mu->rx_pending_count); 347 + if (rx_skb == NULL || rx_skb->len == 0) { 348 + /* some "ignorable" condition */ 349 kfree_skb(rx_skb); 350 continue; 351 }
+1 -1
drivers/net/wireless/ath9k/rc.c
··· 490 491 static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) 492 { 493 - if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG)) 494 return 0; 495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 496 return 0;
··· 490 491 static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) 492 { 493 + if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG)) 494 return 0; 495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 496 return 0;
+1 -1
drivers/net/wireless/ath9k/regd_common.h
··· 228 }; 229 230 #define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ 231 - (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB))) 232 #define REG_DOMAIN_5GHZ_MASK REQ_MASK 233 234 static struct reg_dmn_pair_mapping regDomainPairs[] = {
··· 228 }; 229 230 #define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ 231 + (~(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB))) 232 #define REG_DOMAIN_5GHZ_MASK REQ_MASK 233 234 static struct reg_dmn_pair_mapping regDomainPairs[] = {
+11 -3
drivers/net/wireless/iwlwifi/iwl-3945-rs.c
··· 638 s8 scale_action = 0; 639 unsigned long flags; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 641 - u16 fc, rate_mask; 642 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 644 645 IWL_DEBUG_RATE("enter\n"); 646 647 /* Send management frames and broadcast/multicast data using lowest 648 * rate. */ ··· 655 is_multicast_ether_addr(hdr->addr1) || 656 !sta || !priv_sta) { 657 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 658 - info->control.rates[0].idx = rate_lowest_index(sband, sta); 659 return; 660 } 661 662 - rate_mask = sta->supp_rates[sband->band]; 663 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 664 665 if (sband->band == IEEE80211_BAND_5GHZ)
··· 638 s8 scale_action = 0; 639 unsigned long flags; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 641 + u16 fc; 642 + u16 rate_mask = 0; 643 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 645 646 IWL_DEBUG_RATE("enter\n"); 647 + 648 + if (sta) 649 + rate_mask = sta->supp_rates[sband->band]; 650 651 /* Send management frames and broadcast/multicast data using lowest 652 * rate. */ ··· 651 is_multicast_ether_addr(hdr->addr1) || 652 !sta || !priv_sta) { 653 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 654 + if (!rate_mask) 655 + info->control.rates[0].idx = 656 + rate_lowest_index(sband, NULL); 657 + else 658 + info->control.rates[0].idx = 659 + rate_lowest_index(sband, sta); 660 return; 661 } 662 663 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 664 665 if (sband->band == IEEE80211_BAND_5GHZ)
+12 -2
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
··· 944 } 945 946 /* See if there's a better rate or modulation mode to try. */ 947 - rs_rate_scale_perform(priv, hdr, sta, lq_sta); 948 out: 949 return; 950 } ··· 2102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2103 struct iwl_lq_sta *lq_sta = priv_sta; 2104 int rate_idx; 2105 2106 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2107 2108 /* Send management frames and broadcast/multicast data using lowest 2109 * rate. */ 2110 if (!ieee80211_is_data(hdr->frame_control) || 2111 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) { 2112 - info->control.rates[0].idx = rate_lowest_index(sband, sta); 2113 return; 2114 } 2115
··· 944 } 945 946 /* See if there's a better rate or modulation mode to try. */ 947 + if (sta && sta->supp_rates[sband->band]) 948 + rs_rate_scale_perform(priv, hdr, sta, lq_sta); 949 out: 950 return; 951 } ··· 2101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2102 struct iwl_lq_sta *lq_sta = priv_sta; 2103 int rate_idx; 2104 + u64 mask_bit = 0; 2105 2106 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2107 + 2108 + if (sta) 2109 + mask_bit = sta->supp_rates[sband->band]; 2110 2111 /* Send management frames and broadcast/multicast data using lowest 2112 * rate. */ 2113 if (!ieee80211_is_data(hdr->frame_control) || 2114 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) { 2115 + if (!mask_bit) 2116 + info->control.rates[0].idx = 2117 + rate_lowest_index(sband, NULL); 2118 + else 2119 + info->control.rates[0].idx = 2120 + rate_lowest_index(sband, sta); 2121 return; 2122 } 2123
+1 -1
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 2482 dev_kfree_skb_any(skb); 2483 2484 IWL_DEBUG_MACDUMP("leave\n"); 2485 - return 0; 2486 } 2487 2488 static int iwl_mac_add_interface(struct ieee80211_hw *hw,
··· 2482 dev_kfree_skb_any(skb); 2483 2484 IWL_DEBUG_MACDUMP("leave\n"); 2485 + return NETDEV_TX_OK; 2486 } 2487 2488 static int iwl_mac_add_interface(struct ieee80211_hw *hw,
+1 -1
drivers/net/wireless/iwlwifi/iwl-hcmd.c
··· 224 IWL_ERROR("Error: Response NULL in '%s'\n", 225 get_cmd_string(cmd->id)); 226 ret = -EIO; 227 - goto out; 228 } 229 230 ret = 0;
··· 224 IWL_ERROR("Error: Response NULL in '%s'\n", 225 get_cmd_string(cmd->id)); 226 ret = -EIO; 227 + goto cancel; 228 } 229 230 ret = 0;
+2 -2
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 745 IWL_ERROR("Error: Response NULL in '%s'\n", 746 get_cmd_string(cmd->id)); 747 ret = -EIO; 748 - goto out; 749 } 750 751 ret = 0; ··· 6538 dev_kfree_skb_any(skb); 6539 6540 IWL_DEBUG_MAC80211("leave\n"); 6541 - return 0; 6542 } 6543 6544 static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
··· 745 IWL_ERROR("Error: Response NULL in '%s'\n", 746 get_cmd_string(cmd->id)); 747 ret = -EIO; 748 + goto cancel; 749 } 750 751 ret = 0; ··· 6538 dev_kfree_skb_any(skb); 6539 6540 IWL_DEBUG_MAC80211("leave\n"); 6541 + return NETDEV_TX_OK; 6542 } 6543 6544 static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
+45 -46
drivers/net/wireless/libertas/hostcmd.h
··· 32 u8 pktdelay_2ms; 33 /* reserved */ 34 u8 reserved1; 35 - }; 36 37 /* RxPD Descriptor */ 38 struct rxpd { ··· 63 /* Pkt Priority */ 64 u8 priority; 65 u8 reserved[3]; 66 - }; 67 68 struct cmd_header { 69 __le16 command; ··· 97 struct lbs_offset_value { 98 u32 offset; 99 u32 value; 100 - }; 101 102 /* Define general data structure */ 103 /* cmd_DS_GEN */ ··· 107 __le16 seqnum; 108 __le16 result; 109 void *cmdresp[0]; 110 - }; 111 112 #define S_DS_GEN sizeof(struct cmd_ds_gen) 113 ··· 163 * bump this up a bit. 164 */ 165 uint8_t tlv[128]; 166 - }; 167 168 /* 169 * This scan handle Country Information IE(802.11d compliant) ··· 180 mrvlietypes_chanlistparamset_t ChanListParamSet; 181 mrvlietypes_ratesparamset_t OpRateSet; 182 #endif 183 - }; 184 185 struct cmd_ds_802_11_scan_rsp { 186 struct cmd_header hdr; ··· 188 __le16 bssdescriptsize; 189 uint8_t nr_sets; 190 uint8_t bssdesc_and_tlvbuffer[0]; 191 - }; 192 193 struct cmd_ds_802_11_get_log { 194 struct cmd_header hdr; ··· 206 __le32 fcserror; 207 __le32 txframe; 208 __le32 wepundecryptable; 209 - }; 210 211 struct cmd_ds_mac_control { 212 struct cmd_header hdr; 213 __le16 action; 214 u16 reserved; 215 - }; 216 217 struct cmd_ds_mac_multicast_adr { 218 struct cmd_header hdr; 219 __le16 action; 220 __le16 nr_of_adrs; 221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 222 - }; 223 224 struct cmd_ds_802_11_authenticate { 225 u8 macaddr[ETH_ALEN]; 226 u8 authtype; 227 u8 reserved[10]; 228 - }; 229 230 struct cmd_ds_802_11_deauthenticate { 231 struct cmd_header hdr; 232 233 u8 macaddr[ETH_ALEN]; 234 __le16 reasoncode; 235 - }; 236 237 struct cmd_ds_802_11_associate { 238 u8 peerstaaddr[6]; ··· 251 252 struct cmd_ds_802_11_associate_rsp { 253 struct ieeetypes_assocrsp assocRsp; 254 - }; 255 256 struct cmd_ds_802_11_set_wep { 257 struct cmd_header hdr; ··· 265 /* 40, 128bit or TXWEP */ 266 uint8_t keytype[4]; 267 uint8_t keymaterial[4][16]; 268 - }; 269 270 struct cmd_ds_802_3_get_stat { 271 __le32 xmitok; ··· 274 __le32 rcverror; 275 __le32 rcvnobuffer; 276 __le32 rcvcrcerror; 277 - }; 278 279 struct cmd_ds_802_11_get_stat { 280 __le32 txfragmentcnt; ··· 294 __le32 txbeacon; 295 __le32 rxbeacon; 296 __le32 wepundecryptable; 297 - }; 298 299 struct cmd_ds_802_11_snmp_mib { 300 struct cmd_header hdr; ··· 303 __le16 oid; 304 __le16 bufsize; 305 u8 value[128]; 306 - }; 307 308 struct cmd_ds_mac_reg_map { 309 __le16 buffersize; 310 u8 regmap[128]; 311 __le16 reserved; 312 - }; 313 314 struct cmd_ds_bbp_reg_map { 315 __le16 buffersize; 316 u8 regmap[128]; 317 __le16 reserved; 318 - }; 319 320 struct cmd_ds_rf_reg_map { 321 __le16 buffersize; 322 u8 regmap[64]; 323 __le16 reserved; 324 - }; 325 326 struct cmd_ds_mac_reg_access { 327 __le16 action; 328 __le16 offset; 329 __le32 value; 330 - }; 331 332 struct cmd_ds_bbp_reg_access { 333 __le16 action; 334 __le16 offset; 335 u8 value; 336 u8 reserved[3]; 337 - }; 338 339 struct cmd_ds_rf_reg_access { 340 __le16 action; 341 __le16 offset; 342 u8 value; 343 u8 reserved[3]; 344 - }; 345 346 struct cmd_ds_802_11_radio_control { 347 struct cmd_header hdr; 348 349 __le16 action; 350 __le16 control; 351 - }; 352 353 struct cmd_ds_802_11_beacon_control { 354 __le16 action; 355 __le16 beacon_enable; 356 __le16 beacon_period; 357 - }; 358 359 struct cmd_ds_802_11_sleep_params { 360 struct cmd_header hdr; ··· 379 380 /* reserved field, should be set to zero */ 381 __le16 reserved; 382 - }; 383 384 struct cmd_ds_802_11_inactivity_timeout { 385 struct cmd_header hdr; ··· 389 390 /* Inactivity timeout in msec */ 391 __le16 timeout; 392 - }; 393 394 struct cmd_ds_802_11_rf_channel { 395 struct cmd_header hdr; ··· 399 __le16 rftype; /* unused */ 400 __le16 reserved; /* unused */ 401 u8 channellist[32]; /* unused */ 402 - }; 403 404 struct cmd_ds_802_11_rssi { 405 /* weighting factor */ ··· 408 __le16 reserved_0; 409 __le16 reserved_1; 410 __le16 reserved_2; 411 - }; 412 413 struct cmd_ds_802_11_rssi_rsp { 414 __le16 SNR; 415 __le16 noisefloor; 416 __le16 avgSNR; 417 __le16 avgnoisefloor; 418 - }; 419 420 struct cmd_ds_802_11_mac_address { 421 struct cmd_header hdr; 422 423 __le16 action; 424 u8 macadd[ETH_ALEN]; 425 - }; 426 427 struct cmd_ds_802_11_rf_tx_power { 428 struct cmd_header hdr; ··· 431 __le16 curlevel; 432 s8 maxlevel; 433 s8 minlevel; 434 - }; 435 436 struct cmd_ds_802_11_rf_antenna { 437 __le16 action; ··· 439 /* Number of antennas or 0xffff(diversity) */ 440 __le16 antennamode; 441 442 - }; 443 444 struct cmd_ds_802_11_monitor_mode { 445 __le16 action; 446 __le16 mode; 447 - }; 448 449 struct cmd_ds_set_boot2_ver { 450 struct cmd_header hdr; 451 452 __le16 action; 453 __le16 version; 454 - }; 455 456 struct cmd_ds_802_11_fw_wake_method { 457 struct cmd_header hdr; 458 459 __le16 action; 460 __le16 method; 461 - }; 462 463 struct cmd_ds_802_11_sleep_period { 464 struct cmd_header hdr; 465 466 __le16 action; 467 __le16 period; 468 - }; 469 470 struct cmd_ds_802_11_ps_mode { 471 __le16 action; ··· 473 __le16 multipledtim; 474 __le16 reserved; 475 __le16 locallisteninterval; 476 - }; 477 478 struct cmd_confirm_sleep { 479 struct cmd_header hdr; ··· 483 __le16 multipledtim; 484 __le16 reserved; 485 __le16 locallisteninterval; 486 - }; 487 488 struct cmd_ds_802_11_data_rate { 489 struct cmd_header hdr; ··· 491 __le16 action; 492 __le16 reserved; 493 u8 rates[MAX_RATES]; 494 - }; 495 496 struct cmd_ds_802_11_rate_adapt_rateset { 497 struct cmd_header hdr; 498 __le16 action; 499 __le16 enablehwauto; 500 __le16 bitmap; 501 - }; 502 503 struct cmd_ds_802_11_ad_hoc_start { 504 struct cmd_header hdr; ··· 520 521 u8 pad[3]; 522 u8 bssid[ETH_ALEN]; 523 - }; 524 525 struct adhoc_bssdesc { 526 u8 bssid[ETH_ALEN]; ··· 578 579 /* key material of size keylen */ 580 u8 key[32]; 581 - }; 582 583 #define MAX_WOL_RULES 16 584 ··· 590 __le16 reserve; 591 __be32 sig_mask; 592 __be32 signature; 593 - }; 594 595 struct wol_config { 596 uint8_t action; ··· 598 uint8_t no_rules_in_cmd; 599 uint8_t result; 600 struct host_wol_rule rule[MAX_WOL_RULES]; 601 - }; 602 - 603 604 struct cmd_ds_host_sleep { 605 struct cmd_header hdr;
··· 32 u8 pktdelay_2ms; 33 /* reserved */ 34 u8 reserved1; 35 + } __attribute__ ((packed)); 36 37 /* RxPD Descriptor */ 38 struct rxpd { ··· 63 /* Pkt Priority */ 64 u8 priority; 65 u8 reserved[3]; 66 + } __attribute__ ((packed)); 67 68 struct cmd_header { 69 __le16 command; ··· 97 struct lbs_offset_value { 98 u32 offset; 99 u32 value; 100 + } __attribute__ ((packed)); 101 102 /* Define general data structure */ 103 /* cmd_DS_GEN */ ··· 107 __le16 seqnum; 108 __le16 result; 109 void *cmdresp[0]; 110 + } __attribute__ ((packed)); 111 112 #define S_DS_GEN sizeof(struct cmd_ds_gen) 113 ··· 163 * bump this up a bit. 164 */ 165 uint8_t tlv[128]; 166 + } __attribute__ ((packed)); 167 168 /* 169 * This scan handle Country Information IE(802.11d compliant) ··· 180 mrvlietypes_chanlistparamset_t ChanListParamSet; 181 mrvlietypes_ratesparamset_t OpRateSet; 182 #endif 183 + } __attribute__ ((packed)); 184 185 struct cmd_ds_802_11_scan_rsp { 186 struct cmd_header hdr; ··· 188 __le16 bssdescriptsize; 189 uint8_t nr_sets; 190 uint8_t bssdesc_and_tlvbuffer[0]; 191 + } __attribute__ ((packed)); 192 193 struct cmd_ds_802_11_get_log { 194 struct cmd_header hdr; ··· 206 __le32 fcserror; 207 __le32 txframe; 208 __le32 wepundecryptable; 209 + } __attribute__ ((packed)); 210 211 struct cmd_ds_mac_control { 212 struct cmd_header hdr; 213 __le16 action; 214 u16 reserved; 215 + } __attribute__ ((packed)); 216 217 struct cmd_ds_mac_multicast_adr { 218 struct cmd_header hdr; 219 __le16 action; 220 __le16 nr_of_adrs; 221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 222 + } __attribute__ ((packed)); 223 224 struct cmd_ds_802_11_authenticate { 225 u8 macaddr[ETH_ALEN]; 226 u8 authtype; 227 u8 reserved[10]; 228 + } __attribute__ ((packed)); 229 230 struct cmd_ds_802_11_deauthenticate { 231 struct cmd_header hdr; 232 233 u8 macaddr[ETH_ALEN]; 234 __le16 reasoncode; 235 + } __attribute__ ((packed)); 236 237 struct cmd_ds_802_11_associate { 238 u8 peerstaaddr[6]; ··· 251 252 struct cmd_ds_802_11_associate_rsp { 253 struct ieeetypes_assocrsp assocRsp; 254 + } __attribute__ ((packed)); 255 256 struct cmd_ds_802_11_set_wep { 257 struct cmd_header hdr; ··· 265 /* 40, 128bit or TXWEP */ 266 uint8_t keytype[4]; 267 uint8_t keymaterial[4][16]; 268 + } __attribute__ ((packed)); 269 270 struct cmd_ds_802_3_get_stat { 271 __le32 xmitok; ··· 274 __le32 rcverror; 275 __le32 rcvnobuffer; 276 __le32 rcvcrcerror; 277 + } __attribute__ ((packed)); 278 279 struct cmd_ds_802_11_get_stat { 280 __le32 txfragmentcnt; ··· 294 __le32 txbeacon; 295 __le32 rxbeacon; 296 __le32 wepundecryptable; 297 + } __attribute__ ((packed)); 298 299 struct cmd_ds_802_11_snmp_mib { 300 struct cmd_header hdr; ··· 303 __le16 oid; 304 __le16 bufsize; 305 u8 value[128]; 306 + } __attribute__ ((packed)); 307 308 struct cmd_ds_mac_reg_map { 309 __le16 buffersize; 310 u8 regmap[128]; 311 __le16 reserved; 312 + } __attribute__ ((packed)); 313 314 struct cmd_ds_bbp_reg_map { 315 __le16 buffersize; 316 u8 regmap[128]; 317 __le16 reserved; 318 + } __attribute__ ((packed)); 319 320 struct cmd_ds_rf_reg_map { 321 __le16 buffersize; 322 u8 regmap[64]; 323 __le16 reserved; 324 + } __attribute__ ((packed)); 325 326 struct cmd_ds_mac_reg_access { 327 __le16 action; 328 __le16 offset; 329 __le32 value; 330 + } __attribute__ ((packed)); 331 332 struct cmd_ds_bbp_reg_access { 333 __le16 action; 334 __le16 offset; 335 u8 value; 336 u8 reserved[3]; 337 + } __attribute__ ((packed)); 338 339 struct cmd_ds_rf_reg_access { 340 __le16 action; 341 __le16 offset; 342 u8 value; 343 u8 reserved[3]; 344 + } __attribute__ ((packed)); 345 346 struct cmd_ds_802_11_radio_control { 347 struct cmd_header hdr; 348 349 __le16 action; 350 __le16 control; 351 + } __attribute__ ((packed)); 352 353 struct cmd_ds_802_11_beacon_control { 354 __le16 action; 355 __le16 beacon_enable; 356 __le16 beacon_period; 357 + } __attribute__ ((packed)); 358 359 struct cmd_ds_802_11_sleep_params { 360 struct cmd_header hdr; ··· 379 380 /* reserved field, should be set to zero */ 381 __le16 reserved; 382 + } __attribute__ ((packed)); 383 384 struct cmd_ds_802_11_inactivity_timeout { 385 struct cmd_header hdr; ··· 389 390 /* Inactivity timeout in msec */ 391 __le16 timeout; 392 + } __attribute__ ((packed)); 393 394 struct cmd_ds_802_11_rf_channel { 395 struct cmd_header hdr; ··· 399 __le16 rftype; /* unused */ 400 __le16 reserved; /* unused */ 401 u8 channellist[32]; /* unused */ 402 + } __attribute__ ((packed)); 403 404 struct cmd_ds_802_11_rssi { 405 /* weighting factor */ ··· 408 __le16 reserved_0; 409 __le16 reserved_1; 410 __le16 reserved_2; 411 + } __attribute__ ((packed)); 412 413 struct cmd_ds_802_11_rssi_rsp { 414 __le16 SNR; 415 __le16 noisefloor; 416 __le16 avgSNR; 417 __le16 avgnoisefloor; 418 + } __attribute__ ((packed)); 419 420 struct cmd_ds_802_11_mac_address { 421 struct cmd_header hdr; 422 423 __le16 action; 424 u8 macadd[ETH_ALEN]; 425 + } __attribute__ ((packed)); 426 427 struct cmd_ds_802_11_rf_tx_power { 428 struct cmd_header hdr; ··· 431 __le16 curlevel; 432 s8 maxlevel; 433 s8 minlevel; 434 + } __attribute__ ((packed)); 435 436 struct cmd_ds_802_11_rf_antenna { 437 __le16 action; ··· 439 /* Number of antennas or 0xffff(diversity) */ 440 __le16 antennamode; 441 442 + } __attribute__ ((packed)); 443 444 struct cmd_ds_802_11_monitor_mode { 445 __le16 action; 446 __le16 mode; 447 + } __attribute__ ((packed)); 448 449 struct cmd_ds_set_boot2_ver { 450 struct cmd_header hdr; 451 452 __le16 action; 453 __le16 version; 454 + } __attribute__ ((packed)); 455 456 struct cmd_ds_802_11_fw_wake_method { 457 struct cmd_header hdr; 458 459 __le16 action; 460 __le16 method; 461 + } __attribute__ ((packed)); 462 463 struct cmd_ds_802_11_sleep_period { 464 struct cmd_header hdr; 465 466 __le16 action; 467 __le16 period; 468 + } __attribute__ ((packed)); 469 470 struct cmd_ds_802_11_ps_mode { 471 __le16 action; ··· 473 __le16 multipledtim; 474 __le16 reserved; 475 __le16 locallisteninterval; 476 + } __attribute__ ((packed)); 477 478 struct cmd_confirm_sleep { 479 struct cmd_header hdr; ··· 483 __le16 multipledtim; 484 __le16 reserved; 485 __le16 locallisteninterval; 486 + } __attribute__ ((packed)); 487 488 struct cmd_ds_802_11_data_rate { 489 struct cmd_header hdr; ··· 491 __le16 action; 492 __le16 reserved; 493 u8 rates[MAX_RATES]; 494 + } __attribute__ ((packed)); 495 496 struct cmd_ds_802_11_rate_adapt_rateset { 497 struct cmd_header hdr; 498 __le16 action; 499 __le16 enablehwauto; 500 __le16 bitmap; 501 + } __attribute__ ((packed)); 502 503 struct cmd_ds_802_11_ad_hoc_start { 504 struct cmd_header hdr; ··· 520 521 u8 pad[3]; 522 u8 bssid[ETH_ALEN]; 523 + } __attribute__ ((packed)); 524 525 struct adhoc_bssdesc { 526 u8 bssid[ETH_ALEN]; ··· 578 579 /* key material of size keylen */ 580 u8 key[32]; 581 + } __attribute__ ((packed)); 582 583 #define MAX_WOL_RULES 16 584 ··· 590 __le16 reserve; 591 __be32 sig_mask; 592 __be32 signature; 593 + } __attribute__ ((packed)); 594 595 struct wol_config { 596 uint8_t action; ··· 598 uint8_t no_rules_in_cmd; 599 uint8_t result; 600 struct host_wol_rule rule[MAX_WOL_RULES]; 601 + } __attribute__ ((packed)); 602 603 struct cmd_ds_host_sleep { 604 struct cmd_header hdr;
+14 -18
drivers/net/wireless/orinoco/orinoco.c
··· 1673 s = "UNKNOWN"; 1674 } 1675 1676 - printk(KERN_INFO "%s: New link status: %s (%04x)\n", 1677 dev->name, s, status); 1678 } 1679 ··· 5068 struct orinoco_private *priv = netdev_priv(dev); 5069 u8 *buf; 5070 unsigned long flags; 5071 - int err = 0; 5072 5073 /* cut off at IEEE80211_MAX_DATA_LEN */ 5074 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || 5075 (wrqu->data.length && (extra == NULL))) 5076 return -EINVAL; 5077 5078 - if (orinoco_lock(priv, &flags) != 0) 5079 - return -EBUSY; 5080 - 5081 if (wrqu->data.length) { 5082 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 5083 - if (buf == NULL) { 5084 - err = -ENOMEM; 5085 - goto out; 5086 - } 5087 5088 memcpy(buf, extra, wrqu->data.length); 5089 - kfree(priv->wpa_ie); 5090 - priv->wpa_ie = buf; 5091 - priv->wpa_ie_len = wrqu->data.length; 5092 - } else { 5093 - kfree(priv->wpa_ie); 5094 - priv->wpa_ie = NULL; 5095 - priv->wpa_ie_len = 0; 5096 } 5097 5098 if (priv->wpa_ie) { 5099 /* Looks like wl_lkm wants to check the auth alg, and ··· 5100 */ 5101 } 5102 5103 - out: 5104 orinoco_unlock(priv, &flags); 5105 - return err; 5106 } 5107 5108 static int orinoco_ioctl_get_genie(struct net_device *dev,
··· 1673 s = "UNKNOWN"; 1674 } 1675 1676 + printk(KERN_DEBUG "%s: New link status: %s (%04x)\n", 1677 dev->name, s, status); 1678 } 1679 ··· 5068 struct orinoco_private *priv = netdev_priv(dev); 5069 u8 *buf; 5070 unsigned long flags; 5071 5072 /* cut off at IEEE80211_MAX_DATA_LEN */ 5073 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || 5074 (wrqu->data.length && (extra == NULL))) 5075 return -EINVAL; 5076 5077 if (wrqu->data.length) { 5078 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 5079 + if (buf == NULL) 5080 + return -ENOMEM; 5081 5082 memcpy(buf, extra, wrqu->data.length); 5083 + } else 5084 + buf = NULL; 5085 + 5086 + if (orinoco_lock(priv, &flags) != 0) { 5087 + kfree(buf); 5088 + return -EBUSY; 5089 } 5090 + 5091 + kfree(priv->wpa_ie); 5092 + priv->wpa_ie = buf; 5093 + priv->wpa_ie_len = wrqu->data.length; 5094 5095 if (priv->wpa_ie) { 5096 /* Looks like wl_lkm wants to check the auth alg, and ··· 5103 */ 5104 } 5105 5106 orinoco_unlock(priv, &flags); 5107 + return 0; 5108 } 5109 5110 static int orinoco_ioctl_get_genie(struct net_device *dev,
+22 -8
drivers/net/wireless/p54/p54common.c
··· 451 } 452 if (err) 453 goto err; 454 - 455 - } 456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 458 if (!priv->iq_autocal) { ··· 745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 746 struct p54_hdr *entry_hdr; 747 struct p54_tx_data *entry_data; 748 - int pad = 0; 749 750 range = (void *)info->rate_driver_data; 751 if (range->start_addr != addr) { ··· 768 __skb_unlink(entry, &priv->tx_queue); 769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 770 771 entry_hdr = (struct p54_hdr *) entry->data; 772 entry_data = (struct p54_tx_data *) entry_hdr->data; 773 priv->tx_stats[entry_data->hw_queue].len--; ··· 815 info->status.ack_signal = p54_rssi_to_dbm(dev, 816 (int)payload->ack_rssi); 817 818 - if (entry_data->key_type == P54_CRYPTO_TKIPMICHAEL) { 819 u8 *iv = (u8 *)(entry_data->align + pad + 820 - entry_data->crypt_offset); 821 822 /* Restore the original TKIP IV. */ 823 iv[2] = iv[0]; 824 iv[0] = iv[1]; 825 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ 826 } 827 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 828 ieee80211_tx_status_irqsafe(dev, entry); 829 goto out; ··· 1161 1162 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1163 sizeof(struct p54_hdr) + sizeof(*tim), 1164 - P54_CONTROL_TYPE_TIM, GFP_KERNEL); 1165 if (!skb) 1166 return -ENOMEM; 1167 ··· 1624 1625 err: 1626 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); 1627 - kfree_skb(skb); 1628 return -EINVAL; 1629 } 1630 ··· 2091 algo = P54_CRYPTO_AESCCMP; 2092 break; 2093 default: 2094 - return -EINVAL; 2095 } 2096 } 2097
··· 451 } 452 if (err) 453 goto err; 454 + } 455 + break; 456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 458 if (!priv->iq_autocal) { ··· 745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 746 struct p54_hdr *entry_hdr; 747 struct p54_tx_data *entry_data; 748 + unsigned int pad = 0, frame_len; 749 750 range = (void *)info->rate_driver_data; 751 if (range->start_addr != addr) { ··· 768 __skb_unlink(entry, &priv->tx_queue); 769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 770 771 + frame_len = entry->len; 772 entry_hdr = (struct p54_hdr *) entry->data; 773 entry_data = (struct p54_tx_data *) entry_hdr->data; 774 priv->tx_stats[entry_data->hw_queue].len--; ··· 814 info->status.ack_signal = p54_rssi_to_dbm(dev, 815 (int)payload->ack_rssi); 816 817 + /* Undo all changes to the frame. */ 818 + switch (entry_data->key_type) { 819 + case P54_CRYPTO_TKIPMICHAEL: { 820 u8 *iv = (u8 *)(entry_data->align + pad + 821 + entry_data->crypt_offset); 822 823 /* Restore the original TKIP IV. */ 824 iv[2] = iv[0]; 825 iv[0] = iv[1]; 826 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ 827 + 828 + frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */ 829 + break; 830 + } 831 + case P54_CRYPTO_AESCCMP: 832 + frame_len -= 8; /* remove CCMP_MIC */ 833 + break; 834 + case P54_CRYPTO_WEP: 835 + frame_len -= 4; /* remove WEP_ICV */ 836 + break; 837 } 838 + skb_trim(entry, frame_len); 839 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 840 ieee80211_tx_status_irqsafe(dev, entry); 841 goto out; ··· 1147 1148 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1149 sizeof(struct p54_hdr) + sizeof(*tim), 1150 + P54_CONTROL_TYPE_TIM, GFP_ATOMIC); 1151 if (!skb) 1152 return -ENOMEM; 1153 ··· 1610 1611 err: 1612 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); 1613 + p54_free_skb(dev, skb); 1614 return -EINVAL; 1615 } 1616 ··· 2077 algo = P54_CRYPTO_AESCCMP; 2078 break; 2079 default: 2080 + return -EOPNOTSUPP; 2081 } 2082 } 2083
+18 -23
drivers/net/wireless/p54/p54usb.c
··· 144 struct sk_buff *skb = urb->context; 145 struct ieee80211_hw *dev = (struct ieee80211_hw *) 146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 147 - struct p54u_priv *priv = dev->priv; 148 149 - skb_pull(skb, priv->common.tx_hdr_len); 150 - if (FREE_AFTER_TX(skb)) 151 - p54_free_skb(dev, skb); 152 } 153 154 static void p54u_tx_dummy_cb(struct urb *urb) { } ··· 227 p54u_tx_dummy_cb, dev); 228 usb_fill_bulk_urb(data_urb, priv->udev, 229 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 230 - skb->data, skb->len, p54u_tx_cb, skb); 231 232 usb_anchor_urb(addr_urb, &priv->submitted); 233 err = usb_submit_urb(addr_urb, GFP_ATOMIC); ··· 239 goto out; 240 } 241 242 - usb_anchor_urb(addr_urb, &priv->submitted); 243 err = usb_submit_urb(data_urb, GFP_ATOMIC); 244 if (err) 245 usb_unanchor_urb(data_urb); ··· 269 { 270 struct p54u_priv *priv = dev->priv; 271 struct urb *data_urb; 272 - struct lm87_tx_hdr *hdr; 273 - __le32 checksum; 274 - __le32 addr = ((struct p54_hdr *)skb->data)->req_id; 275 276 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 277 if (!data_urb) 278 return; 279 280 - checksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); 281 - hdr = (struct lm87_tx_hdr *)skb_push(skb, sizeof(*hdr)); 282 - hdr->chksum = checksum; 283 - hdr->device_addr = addr; 284 285 usb_fill_bulk_urb(data_urb, priv->udev, 286 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 287 - skb->data, skb->len, p54u_tx_cb, skb); 288 data_urb->transfer_flags |= URB_ZERO_PACKET; 289 290 usb_anchor_urb(data_urb, &priv->submitted); 291 if (usb_submit_urb(data_urb, GFP_ATOMIC)) { 292 usb_unanchor_urb(data_urb); 293 - skb_pull(skb, sizeof(*hdr)); 294 p54_free_skb(dev, skb); 295 } 296 usb_free_urb(data_urb); ··· 296 { 297 struct p54u_priv *priv = dev->priv; 298 struct urb *int_urb, *data_urb; 299 - struct net2280_tx_hdr *hdr; 300 struct net2280_reg_write *reg; 301 int err = 0; 302 - __le32 addr = ((struct p54_hdr *) skb->data)->req_id; 303 - __le16 len = cpu_to_le16(skb->len); 304 305 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 306 if (!reg) ··· 321 reg->addr = cpu_to_le32(P54U_DEV_BASE); 322 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 323 324 - hdr = (void *)skb_push(skb, sizeof(*hdr)); 325 memset(hdr, 0, sizeof(*hdr)); 326 - hdr->len = len; 327 - hdr->device_addr = addr; 328 329 usb_fill_bulk_urb(int_urb, priv->udev, 330 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), ··· 334 * free what's inside the transfer_buffer after the callback routine 335 * has completed. 336 */ 337 - int_urb->transfer_flags |= URB_FREE_BUFFER; 338 339 usb_fill_bulk_urb(data_urb, priv->udev, 340 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 341 - skb->data, skb->len, p54u_tx_cb, skb); 342 343 usb_anchor_urb(int_urb, &priv->submitted); 344 err = usb_submit_urb(int_urb, GFP_ATOMIC);
··· 144 struct sk_buff *skb = urb->context; 145 struct ieee80211_hw *dev = (struct ieee80211_hw *) 146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 147 148 + p54_free_skb(dev, skb); 149 } 150 151 static void p54u_tx_dummy_cb(struct urb *urb) { } ··· 230 p54u_tx_dummy_cb, dev); 231 usb_fill_bulk_urb(data_urb, priv->udev, 232 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 233 + skb->data, skb->len, FREE_AFTER_TX(skb) ? 234 + p54u_tx_cb : p54u_tx_dummy_cb, skb); 235 + addr_urb->transfer_flags |= URB_ZERO_PACKET; 236 + data_urb->transfer_flags |= URB_ZERO_PACKET; 237 238 usb_anchor_urb(addr_urb, &priv->submitted); 239 err = usb_submit_urb(addr_urb, GFP_ATOMIC); ··· 239 goto out; 240 } 241 242 + usb_anchor_urb(data_urb, &priv->submitted); 243 err = usb_submit_urb(data_urb, GFP_ATOMIC); 244 if (err) 245 usb_unanchor_urb(data_urb); ··· 269 { 270 struct p54u_priv *priv = dev->priv; 271 struct urb *data_urb; 272 + struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); 273 274 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 275 if (!data_urb) 276 return; 277 278 + hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); 279 + hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id; 280 281 usb_fill_bulk_urb(data_urb, priv->udev, 282 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 283 + hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? 284 + p54u_tx_cb : p54u_tx_dummy_cb, skb); 285 data_urb->transfer_flags |= URB_ZERO_PACKET; 286 287 usb_anchor_urb(data_urb, &priv->submitted); 288 if (usb_submit_urb(data_urb, GFP_ATOMIC)) { 289 usb_unanchor_urb(data_urb); 290 p54_free_skb(dev, skb); 291 } 292 usb_free_urb(data_urb); ··· 300 { 301 struct p54u_priv *priv = dev->priv; 302 struct urb *int_urb, *data_urb; 303 + struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); 304 struct net2280_reg_write *reg; 305 int err = 0; 306 307 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 308 if (!reg) ··· 327 reg->addr = cpu_to_le32(P54U_DEV_BASE); 328 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 329 330 memset(hdr, 0, sizeof(*hdr)); 331 + hdr->len = cpu_to_le16(skb->len); 332 + hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id; 333 334 usb_fill_bulk_urb(int_urb, priv->udev, 335 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), ··· 341 * free what's inside the transfer_buffer after the callback routine 342 * has completed. 343 */ 344 + int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET; 345 346 usb_fill_bulk_urb(data_urb, priv->udev, 347 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 348 + hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? 349 + p54u_tx_cb : p54u_tx_dummy_cb, skb); 350 + data_urb->transfer_flags |= URB_ZERO_PACKET; 351 352 usb_anchor_urb(int_urb, &priv->submitted); 353 err = usb_submit_urb(int_urb, GFP_ATOMIC);
-2
drivers/net/wireless/rndis_wlan.c
··· 1649 char *end_buf, 1650 struct ndis_80211_bssid_ex *bssid) 1651 { 1652 - #ifdef DEBUG 1653 struct usbnet *usbdev = netdev_priv(dev); 1654 - #endif 1655 u8 *ie; 1656 char *current_val; 1657 int bssid_len, ie_len, i;
··· 1649 char *end_buf, 1650 struct ndis_80211_bssid_ex *bssid) 1651 { 1652 struct usbnet *usbdev = netdev_priv(dev); 1653 u8 *ie; 1654 char *current_val; 1655 int bssid_len, ie_len, i;
+2 -1
drivers/net/wireless/rt2x00/rt2x00queue.c
··· 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 157 struct ieee80211_rate *rate = 158 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 159 const struct rt2x00_rate *hwrate; ··· 314 * When preamble is enabled we should set the 315 * preamble bit for the signal. 316 */ 317 - if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 318 txdesc->signal |= 0x08; 319 } 320 }
··· 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 157 + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 158 struct ieee80211_rate *rate = 159 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 160 const struct rt2x00_rate *hwrate; ··· 313 * When preamble is enabled we should set the 314 * preamble bit for the signal. 315 */ 316 + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 317 txdesc->signal |= 0x08; 318 } 319 }
+1 -1
drivers/net/wireless/rt2x00/rt2x00rfkill.c
··· 162 163 void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 164 { 165 - if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags)) 166 return; 167 168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
··· 162 163 void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 164 { 165 + if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) 166 return; 167 168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
+1
drivers/net/wireless/rtl818x/rtl8187_dev.c
··· 273 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 275 buf, skb->len, rtl8187_tx_cb, skb); 276 usb_anchor_urb(urb, &priv->anchored); 277 rc = usb_submit_urb(urb, GFP_ATOMIC); 278 if (rc < 0) {
··· 273 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 275 buf, skb->len, rtl8187_tx_cb, skb); 276 + urb->transfer_flags |= URB_ZERO_PACKET; 277 usb_anchor_urb(urb, &priv->anchored); 278 rc = usb_submit_urb(urb, GFP_ATOMIC); 279 if (rc < 0) {
+1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 87 /* "Driverless" devices that need ejecting */ 88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
··· 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 87 + { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B }, 88 /* "Driverless" devices that need ejecting */ 89 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 90 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
+6 -2
drivers/s390/net/lcs.c
··· 70 static void lcs_tasklet(unsigned long); 71 static void lcs_start_kernel_thread(struct work_struct *); 72 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 73 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 74 static int lcs_recovery(void *ptr); 75 76 /** ··· 1287 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1288 return 0; 1289 } 1290 /** 1291 * function called by net device to 1292 * handle multicast address relevant things ··· 1296 static void 1297 lcs_set_multicast_list(struct net_device *dev) 1298 { 1299 struct lcs_card *card; 1300 1301 LCS_DBF_TEXT(4, trace, "setmulti"); ··· 1304 1305 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1306 schedule_work(&card->kernel_thread_starter); 1307 - } 1308 - 1309 #endif /* CONFIG_IP_MULTICAST */ 1310 1311 static long 1312 lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
··· 70 static void lcs_tasklet(unsigned long); 71 static void lcs_start_kernel_thread(struct work_struct *); 72 static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 73 + #ifdef CONFIG_IP_MULTICAST 74 static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 75 + #endif /* CONFIG_IP_MULTICAST */ 76 static int lcs_recovery(void *ptr); 77 78 /** ··· 1285 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1286 return 0; 1287 } 1288 + #endif /* CONFIG_IP_MULTICAST */ 1289 + 1290 /** 1291 * function called by net device to 1292 * handle multicast address relevant things ··· 1292 static void 1293 lcs_set_multicast_list(struct net_device *dev) 1294 { 1295 + #ifdef CONFIG_IP_MULTICAST 1296 struct lcs_card *card; 1297 1298 LCS_DBF_TEXT(4, trace, "setmulti"); ··· 1299 1300 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1301 schedule_work(&card->kernel_thread_starter); 1302 #endif /* CONFIG_IP_MULTICAST */ 1303 + } 1304 1305 static long 1306 lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+4 -6
include/linux/if_frad.h
··· 26 27 #include <linux/if.h> 28 29 - #if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) 30 - 31 /* Structures and constants associated with the DLCI device driver */ 32 33 struct dlci_add ··· 125 126 #ifdef __KERNEL__ 127 128 /* these are the fields of an RFC 1490 header */ 129 struct frhdr 130 { ··· 190 int buffer; /* current buffer for S508 firmware */ 191 }; 192 193 - #endif /* __KERNEL__ */ 194 - 195 #endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ 196 197 - #ifdef __KERNEL__ 198 extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); 199 - #endif 200 201 #endif
··· 26 27 #include <linux/if.h> 28 29 /* Structures and constants associated with the DLCI device driver */ 30 31 struct dlci_add ··· 127 128 #ifdef __KERNEL__ 129 130 + #if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) 131 + 132 /* these are the fields of an RFC 1490 header */ 133 struct frhdr 134 { ··· 190 int buffer; /* current buffer for S508 firmware */ 191 }; 192 193 #endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ 194 195 extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); 196 + 197 + #endif /* __KERNEL__ */ 198 199 #endif
+3 -1
include/linux/usb/usbnet.h
··· 197 #define devdbg(usbnet, fmt, arg...) \ 198 printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , ## arg) 199 #else 200 - #define devdbg(usbnet, fmt, arg...) do {} while(0) 201 #endif 202 203 #define deverr(usbnet, fmt, arg...) \
··· 197 #define devdbg(usbnet, fmt, arg...) \ 198 printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , ## arg) 199 #else 200 + #define devdbg(usbnet, fmt, arg...) \ 201 + ({ if (0) printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , \ 202 + ## arg); 0; }) 203 #endif 204 205 #define deverr(usbnet, fmt, arg...) \
-1
include/net/mac80211.h
··· 322 * @control: union for control data 323 * @status: union for status data 324 * @driver_data: array of driver_data pointers 325 - * @retry_count: number of retries 326 * @ampdu_ack_len: number of aggregated frames. 327 * relevant only if IEEE80211_TX_STATUS_AMPDU was set. 328 * @ampdu_ack_map: block ack bit map for the aggregation.
··· 322 * @control: union for control data 323 * @status: union for status data 324 * @driver_data: array of driver_data pointers 325 * @ampdu_ack_len: number of aggregated frames. 326 * relevant only if IEEE80211_TX_STATUS_AMPDU was set. 327 * @ampdu_ack_map: block ack bit map for the aggregation.
+1 -1
include/net/sctp/checksum.h
··· 79 80 static inline __be32 sctp_end_cksum(__be32 crc32) 81 { 82 - return ~crc32; 83 }
··· 79 80 static inline __be32 sctp_end_cksum(__be32 crc32) 81 { 82 + return (__force __be32)~cpu_to_le32((__force u32)crc32); 83 }
+8
init/Kconfig
··· 633 Unless you want to work with an experimental feature 634 say N here. 635 636 config BLK_DEV_INITRD 637 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" 638 depends on BROKEN || !FRV
··· 633 Unless you want to work with an experimental feature 634 say N here. 635 636 + config NET_NS 637 + bool "Network namespace" 638 + default n 639 + depends on NAMESPACES && EXPERIMENTAL && NET 640 + help 641 + Allow user space to create what appear to be multiple instances 642 + of the network stack. 643 + 644 config BLK_DEV_INITRD 645 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" 646 depends on BROKEN || !FRV
+2 -2
net/8021q/vlan_core.c
··· 62 { 63 return vlan_dev_info(dev)->real_dev; 64 } 65 - EXPORT_SYMBOL_GPL(vlan_dev_real_dev); 66 67 u16 vlan_dev_vlan_id(const struct net_device *dev) 68 { 69 return vlan_dev_info(dev)->vlan_id; 70 } 71 - EXPORT_SYMBOL_GPL(vlan_dev_vlan_id); 72 73 static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 74 unsigned int vlan_tci, struct sk_buff *skb)
··· 62 { 63 return vlan_dev_info(dev)->real_dev; 64 } 65 + EXPORT_SYMBOL(vlan_dev_real_dev); 66 67 u16 vlan_dev_vlan_id(const struct net_device *dev) 68 { 69 return vlan_dev_info(dev)->vlan_id; 70 } 71 + EXPORT_SYMBOL(vlan_dev_vlan_id); 72 73 static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 74 unsigned int vlan_tci, struct sk_buff *skb)
+1 -1
net/9p/client.c
··· 618 return ERR_PTR(-ENOMEM); 619 620 ret = p9_idpool_get(clnt->fidpool); 621 - if (fid->fid < 0) { 622 ret = -ENOSPC; 623 goto error; 624 }
··· 618 return ERR_PTR(-ENOMEM); 619 620 ret = p9_idpool_get(clnt->fidpool); 621 + if (ret < 0) { 622 ret = -ENOSPC; 623 goto error; 624 }
-8
net/Kconfig
··· 24 25 menu "Networking options" 26 27 - config NET_NS 28 - bool "Network namespace support" 29 - default n 30 - depends on EXPERIMENTAL && NAMESPACES 31 - help 32 - Allow user space to create what appear to be multiple instances 33 - of the network stack. 34 - 35 config COMPAT_NET_DEV_OPS 36 def_bool y 37
··· 24 25 menu "Networking options" 26 27 config COMPAT_NET_DEV_OPS 28 def_bool y 29
+14 -1
net/core/dev.c
··· 1534 skb->mac_len = skb->network_header - skb->mac_header; 1535 __skb_pull(skb, skb->mac_len); 1536 1537 - if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) { 1538 if (skb_header_cloned(skb) && 1539 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1540 return ERR_PTR(err); ··· 2536 2537 if (!pskb_may_pull(skb, ETH_HLEN)) { 2538 napi_reuse_skb(napi, skb); 2539 goto out; 2540 } 2541
··· 1534 skb->mac_len = skb->network_header - skb->mac_header; 1535 __skb_pull(skb, skb->mac_len); 1536 1537 + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { 1538 + struct net_device *dev = skb->dev; 1539 + struct ethtool_drvinfo info = {}; 1540 + 1541 + if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) 1542 + dev->ethtool_ops->get_drvinfo(dev, &info); 1543 + 1544 + WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d " 1545 + "ip_summed=%d", 1546 + info.driver, dev ? dev->features : 0L, 1547 + skb->sk ? skb->sk->sk_route_caps : 0L, 1548 + skb->len, skb->data_len, skb->ip_summed); 1549 + 1550 if (skb_header_cloned(skb) && 1551 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1552 return ERR_PTR(err); ··· 2524 2525 if (!pskb_may_pull(skb, ETH_HLEN)) { 2526 napi_reuse_skb(napi, skb); 2527 + skb = NULL; 2528 goto out; 2529 } 2530
+1 -1
net/core/net_namespace.c
··· 341 rv = register_pernet_operations(first_device, ops); 342 if (rv < 0) 343 ida_remove(&net_generic_ids, *id); 344 - mutex_unlock(&net_mutex); 345 out: 346 return rv; 347 } 348 EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
··· 341 rv = register_pernet_operations(first_device, ops); 342 if (rv < 0) 343 ida_remove(&net_generic_ids, *id); 344 out: 345 + mutex_unlock(&net_mutex); 346 return rv; 347 } 348 EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);
+34 -36
net/core/skbuff.c
··· 73 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 74 struct pipe_buffer *buf) 75 { 76 - struct sk_buff *skb = (struct sk_buff *) buf->private; 77 - 78 - kfree_skb(skb); 79 } 80 81 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 82 struct pipe_buffer *buf) 83 { 84 - struct sk_buff *skb = (struct sk_buff *) buf->private; 85 - 86 - skb_get(skb); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, ··· 1330 */ 1331 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1332 { 1333 - struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private; 1334 1335 - kfree_skb(skb); 1336 } 1337 1338 /* ··· 1350 */ 1351 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1352 unsigned int len, unsigned int offset, 1353 - struct sk_buff *skb) 1354 { 1355 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1356 return 1; 1357 1358 spd->pages[spd->nr_pages] = page; 1359 spd->partial[spd->nr_pages].len = len; 1360 spd->partial[spd->nr_pages].offset = offset; 1361 - spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb); 1362 spd->nr_pages++; 1363 return 0; 1364 } 1365 ··· 1382 static inline int __splice_segment(struct page *page, unsigned int poff, 1383 unsigned int plen, unsigned int *off, 1384 unsigned int *len, struct sk_buff *skb, 1385 - struct splice_pipe_desc *spd) 1386 { 1387 if (!*len) 1388 return 1; ··· 1405 /* the linear region may spread across several pages */ 1406 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1407 1408 - if (spd_fill_page(spd, page, flen, poff, skb)) 1409 return 1; 1410 1411 __segment_seek(&page, &poff, &plen, flen); ··· 1432 if (__splice_segment(virt_to_page(skb->data), 1433 (unsigned long) skb->data & (PAGE_SIZE - 1), 1434 skb_headlen(skb), 1435 - offset, len, skb, spd)) 1436 return 1; 1437 1438 /* ··· 1442 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1443 1444 if (__splice_segment(f->page, f->page_offset, f->size, 1445 - offset, len, skb, spd)) 1446 return 1; 1447 } 1448 ··· 1455 * the frag list, if such a thing exists. We'd probably need to recurse to 1456 * handle that cleanly. 1457 */ 1458 - int skb_splice_bits(struct sk_buff *__skb, unsigned int offset, 1459 struct pipe_inode_info *pipe, unsigned int tlen, 1460 unsigned int flags) 1461 { ··· 1468 .ops = &sock_pipe_buf_ops, 1469 .spd_release = sock_spd_release, 1470 }; 1471 - struct sk_buff *skb; 1472 - 1473 - /* 1474 - * I'd love to avoid the clone here, but tcp_read_sock() 1475 - * ignores reference counts and unconditonally kills the sk_buff 1476 - * on return from the actor. 1477 - */ 1478 - skb = skb_clone(__skb, GFP_KERNEL); 1479 - if (unlikely(!skb)) 1480 - return -ENOMEM; 1481 1482 /* 1483 * __skb_splice_bits() only fails if the output has no room left, ··· 1491 } 1492 1493 done: 1494 - /* 1495 - * drop our reference to the clone, the pipe consumption will 1496 - * drop the rest. 1497 - */ 1498 - kfree_skb(skb); 1499 - 1500 if (spd.nr_pages) { 1501 int ret; 1502 - struct sock *sk = __skb->sk; 1503 1504 /* 1505 * Drop the socket lock, otherwise we have reverse ··· 2585 struct sk_buff *nskb; 2586 unsigned int headroom; 2587 unsigned int hlen = p->data - skb_mac_header(p); 2588 2589 - if (hlen + p->len + skb->len >= 65536) 2590 return -E2BIG; 2591 2592 if (skb_shinfo(p)->frag_list) ··· 2649 2650 done: 2651 NAPI_GRO_CB(p)->count++; 2652 - p->data_len += skb->len; 2653 - p->truesize += skb->len; 2654 - p->len += skb->len; 2655 2656 NAPI_GRO_CB(skb)->same_flow = 1; 2657 return 0;
··· 73 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 74 struct pipe_buffer *buf) 75 { 76 + put_page(buf->page); 77 } 78 79 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 80 struct pipe_buffer *buf) 81 { 82 + get_page(buf->page); 83 } 84 85 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, ··· 1334 */ 1335 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1336 { 1337 + put_page(spd->pages[i]); 1338 + } 1339 1340 + static inline struct page *linear_to_page(struct page *page, unsigned int len, 1341 + unsigned int offset) 1342 + { 1343 + struct page *p = alloc_pages(GFP_KERNEL, 0); 1344 + 1345 + if (!p) 1346 + return NULL; 1347 + memcpy(page_address(p) + offset, page_address(page) + offset, len); 1348 + 1349 + return p; 1350 } 1351 1352 /* ··· 1344 */ 1345 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, 1346 unsigned int len, unsigned int offset, 1347 + struct sk_buff *skb, int linear) 1348 { 1349 if (unlikely(spd->nr_pages == PIPE_BUFFERS)) 1350 return 1; 1351 1352 + if (linear) { 1353 + page = linear_to_page(page, len, offset); 1354 + if (!page) 1355 + return 1; 1356 + } else 1357 + get_page(page); 1358 + 1359 spd->pages[spd->nr_pages] = page; 1360 spd->partial[spd->nr_pages].len = len; 1361 spd->partial[spd->nr_pages].offset = offset; 1362 spd->nr_pages++; 1363 + 1364 return 0; 1365 } 1366 ··· 1369 static inline int __splice_segment(struct page *page, unsigned int poff, 1370 unsigned int plen, unsigned int *off, 1371 unsigned int *len, struct sk_buff *skb, 1372 + struct splice_pipe_desc *spd, int linear) 1373 { 1374 if (!*len) 1375 return 1; ··· 1392 /* the linear region may spread across several pages */ 1393 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1394 1395 + if (spd_fill_page(spd, page, flen, poff, skb, linear)) 1396 return 1; 1397 1398 __segment_seek(&page, &poff, &plen, flen); ··· 1419 if (__splice_segment(virt_to_page(skb->data), 1420 (unsigned long) skb->data & (PAGE_SIZE - 1), 1421 skb_headlen(skb), 1422 + offset, len, skb, spd, 1)) 1423 return 1; 1424 1425 /* ··· 1429 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1430 1431 if (__splice_segment(f->page, f->page_offset, f->size, 1432 + offset, len, skb, spd, 0)) 1433 return 1; 1434 } 1435 ··· 1442 * the frag list, if such a thing exists. We'd probably need to recurse to 1443 * handle that cleanly. 1444 */ 1445 + int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1446 struct pipe_inode_info *pipe, unsigned int tlen, 1447 unsigned int flags) 1448 { ··· 1455 .ops = &sock_pipe_buf_ops, 1456 .spd_release = sock_spd_release, 1457 }; 1458 1459 /* 1460 * __skb_splice_bits() only fails if the output has no room left, ··· 1488 } 1489 1490 done: 1491 if (spd.nr_pages) { 1492 + struct sock *sk = skb->sk; 1493 int ret; 1494 1495 /* 1496 * Drop the socket lock, otherwise we have reverse ··· 2588 struct sk_buff *nskb; 2589 unsigned int headroom; 2590 unsigned int hlen = p->data - skb_mac_header(p); 2591 + unsigned int len = skb->len; 2592 2593 + if (hlen + p->len + len >= 65536) 2594 return -E2BIG; 2595 2596 if (skb_shinfo(p)->frag_list) ··· 2651 2652 done: 2653 NAPI_GRO_CB(p)->count++; 2654 + p->data_len += len; 2655 + p->truesize += len; 2656 + p->len += len; 2657 2658 NAPI_GRO_CB(skb)->same_flow = 1; 2659 return 0;
+7
net/ipv6/af_inet6.c
··· 797 unsigned int nlen; 798 int flush = 1; 799 int proto; 800 801 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 802 goto out; ··· 809 810 rcu_read_lock(); 811 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); 812 IPV6_GRO_CB(skb)->proto = proto; 813 ops = rcu_dereference(inet6_protos[proto]); 814 if (!ops || !ops->gro_receive) ··· 841 842 NAPI_GRO_CB(skb)->flush |= flush; 843 844 pp = ops->gro_receive(head, skb); 845 846 out_unlock: 847 rcu_read_unlock();
··· 797 unsigned int nlen; 798 int flush = 1; 799 int proto; 800 + __wsum csum; 801 802 if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 803 goto out; ··· 808 809 rcu_read_lock(); 810 proto = ipv6_gso_pull_exthdrs(skb, iph->nexthdr); 811 + iph = ipv6_hdr(skb); 812 IPV6_GRO_CB(skb)->proto = proto; 813 ops = rcu_dereference(inet6_protos[proto]); 814 if (!ops || !ops->gro_receive) ··· 839 840 NAPI_GRO_CB(skb)->flush |= flush; 841 842 + csum = skb->csum; 843 + skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); 844 + 845 pp = ops->gro_receive(head, skb); 846 + 847 + skb->csum = csum; 848 849 out_unlock: 850 rcu_read_unlock();
+1
net/key/af_key.c
··· 1285 ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; 1286 natt->encap_dport = n_port->sadb_x_nat_t_port_port; 1287 } 1288 } 1289 1290 err = xfrm_init_state(x);
··· 1285 ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; 1286 natt->encap_dport = n_port->sadb_x_nat_t_port_port; 1287 } 1288 + memset(&natt->encap_oa, 0, sizeof(natt->encap_oa)); 1289 } 1290 1291 err = xfrm_init_state(x);
+2 -2
net/mac80211/mlme.c
··· 620 if (use_short_slot != bss_conf->use_short_slot) { 621 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 622 if (net_ratelimit()) { 623 - printk(KERN_DEBUG "%s: switched to %s slot" 624 - " (BSSID=%s)\n", 625 sdata->dev->name, 626 use_short_slot ? "short" : "long", 627 ifsta->bssid);
··· 620 if (use_short_slot != bss_conf->use_short_slot) { 621 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 622 if (net_ratelimit()) { 623 + printk(KERN_DEBUG "%s: switched to %s slot time" 624 + " (BSSID=%pM)\n", 625 sdata->dev->name, 626 use_short_slot ? "short" : "long", 627 ifsta->bssid);
-1
net/mac80211/sta_info.h
··· 195 * @tx_packets: number of RX/TX MSDUs 196 * @tx_bytes: number of bytes transmitted to this STA 197 * @tx_fragments: number of transmitted MPDUs 198 - * @last_txrate: description of the last used transmit rate 199 * @tid_seq: per-TID sequence numbers for sending to this STA 200 * @ampdu_mlme: A-MPDU state machine state 201 * @timer_to_tid: identity mapping to ID timers
··· 195 * @tx_packets: number of RX/TX MSDUs 196 * @tx_bytes: number of bytes transmitted to this STA 197 * @tx_fragments: number of transmitted MPDUs 198 * @tid_seq: per-TID sequence numbers for sending to this STA 199 * @ampdu_mlme: A-MPDU state machine state 200 * @timer_to_tid: identity mapping to ID timers
+4 -2
net/mac80211/tx.c
··· 1307 if (is_multicast_ether_addr(hdr->addr3)) 1308 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1309 else 1310 - if (mesh_nexthop_lookup(skb, osdata)) 1311 - return 0; 1312 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1313 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1314 fwded_frames);
··· 1307 if (is_multicast_ether_addr(hdr->addr3)) 1308 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1309 else 1310 + if (mesh_nexthop_lookup(skb, osdata)) { 1311 + dev_put(odev); 1312 + return 0; 1313 + } 1314 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1315 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1316 fwded_frames);
+3
net/netfilter/nf_conntrack_netlink.c
··· 831 if (!parse_nat_setup) { 832 #ifdef CONFIG_MODULES 833 rcu_read_unlock(); 834 nfnl_unlock(); 835 if (request_module("nf-nat-ipv4") < 0) { 836 nfnl_lock(); 837 rcu_read_lock(); 838 return -EOPNOTSUPP; 839 } 840 nfnl_lock(); 841 rcu_read_lock(); 842 if (nfnetlink_parse_nat_setup_hook) 843 return -EAGAIN;
··· 831 if (!parse_nat_setup) { 832 #ifdef CONFIG_MODULES 833 rcu_read_unlock(); 834 + spin_unlock_bh(&nf_conntrack_lock); 835 nfnl_unlock(); 836 if (request_module("nf-nat-ipv4") < 0) { 837 nfnl_lock(); 838 + spin_lock_bh(&nf_conntrack_lock); 839 rcu_read_lock(); 840 return -EOPNOTSUPP; 841 } 842 nfnl_lock(); 843 + spin_lock_bh(&nf_conntrack_lock); 844 rcu_read_lock(); 845 if (nfnetlink_parse_nat_setup_hook) 846 return -EAGAIN;
+13
net/sctp/input.c
··· 249 */ 250 sctp_bh_lock_sock(sk); 251 252 if (sock_owned_by_user(sk)) { 253 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 254 sctp_add_backlog(sk, skb);
··· 249 */ 250 sctp_bh_lock_sock(sk); 251 252 + if (sk != rcvr->sk) { 253 + /* Our cached sk is different from the rcvr->sk. This is 254 + * because migrate()/accept() may have moved the association 255 + * to a new socket and released all the sockets. So now we 256 + * are holding a lock on the old socket while the user may 257 + * be doing something with the new socket. Switch our veiw 258 + * of the current sk. 259 + */ 260 + sctp_bh_unlock_sock(sk); 261 + sk = rcvr->sk; 262 + sctp_bh_lock_sock(sk); 263 + } 264 + 265 if (sock_owned_by_user(sk)) { 266 SCTP_INC_STATS_BH(SCTP_MIB_IN_PKT_BACKLOG); 267 sctp_add_backlog(sk, skb);
+4 -3
net/sctp/output.c
··· 324 switch (chunk->chunk_hdr->type) { 325 case SCTP_CID_DATA: 326 retval = sctp_packet_append_data(packet, chunk); 327 /* Disallow SACK bundling after DATA. */ 328 packet->has_sack = 1; 329 /* Disallow AUTH bundling after DATA */ 330 packet->has_auth = 1; 331 /* Let it be knows that packet has DATA in it */ 332 packet->has_data = 1; 333 - if (SCTP_XMIT_OK != retval) 334 - goto finish; 335 break; 336 case SCTP_CID_COOKIE_ECHO: 337 packet->has_cookie_echo = 1; ··· 472 } else 473 chunk->resent = 1; 474 475 - chunk->sent_at = jiffies; 476 has_data = 1; 477 } 478
··· 324 switch (chunk->chunk_hdr->type) { 325 case SCTP_CID_DATA: 326 retval = sctp_packet_append_data(packet, chunk); 327 + if (SCTP_XMIT_OK != retval) 328 + goto finish; 329 /* Disallow SACK bundling after DATA. */ 330 packet->has_sack = 1; 331 /* Disallow AUTH bundling after DATA */ 332 packet->has_auth = 1; 333 /* Let it be knows that packet has DATA in it */ 334 packet->has_data = 1; 335 + /* timestamp the chunk for rtx purposes */ 336 + chunk->sent_at = jiffies; 337 break; 338 case SCTP_CID_COOKIE_ECHO: 339 packet->has_cookie_echo = 1; ··· 470 } else 471 chunk->resent = 1; 472 473 has_data = 1; 474 } 475
+1 -2
net/sctp/outqueue.c
··· 929 } 930 931 /* Finally, transmit new packets. */ 932 - start_timer = 0; 933 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 934 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 935 * stream identifier. ··· 1027 list_add_tail(&chunk->transmitted_list, 1028 &transport->transmitted); 1029 1030 - sctp_transport_reset_timers(transport, start_timer-1); 1031 1032 q->empty = 0; 1033
··· 929 } 930 931 /* Finally, transmit new packets. */ 932 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 933 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid 934 * stream identifier. ··· 1028 list_add_tail(&chunk->transmitted_list, 1029 &transport->transmitted); 1030 1031 + sctp_transport_reset_timers(transport, 0); 1032 1033 q->empty = 0; 1034
+117 -11
net/wireless/reg.c
··· 421 return 0; 422 } 423 424 /* Converts a country IE to a regulatory domain. A regulatory domain 425 * structure has a lot of information which the IE doesn't yet have, 426 * so for the other values we use upper max values as we will intersect ··· 563 564 /* This time around we fill in the rd */ 565 while (country_ie_len >= 3) { 566 struct ieee80211_country_ie_triplet *triplet = 567 (struct ieee80211_country_ie_triplet *) country_ie; 568 struct ieee80211_reg_rule *reg_rule = NULL; ··· 585 586 reg_rule->flags = flags; 587 588 /* The +10 is since the regulatory domain expects 589 * the actual band edge, not the center of freq for 590 * its start and end freqs, assuming 20 MHz bandwidth on ··· 611 triplet->chans.first_channel) - 10); 612 freq_range->end_freq_khz = 613 MHZ_TO_KHZ(ieee80211_channel_to_frequency( 614 - triplet->chans.first_channel + 615 - triplet->chans.num_channels) + 10); 616 617 /* Large arbitrary values, we intersect later */ 618 /* Increment this if we ever support >= 40 MHz channels ··· 790 * this value to the maximum allowed bandwidth. 791 * @reg_rule: the regulatory rule which we have for this frequency 792 * 793 - * Use this function to get the regulatory rule for a specific frequency. 794 */ 795 static int freq_reg_info(u32 center_freq, u32 *bandwidth, 796 const struct ieee80211_reg_rule **reg_rule) 797 { 798 int i; 799 u32 max_bandwidth = 0; 800 801 if (!cfg80211_regdomain) ··· 820 rr = &cfg80211_regdomain->reg_rules[i]; 821 fr = &rr->freq_range; 822 pr = &rr->power_rule; 823 max_bandwidth = freq_max_bandwidth(fr, center_freq); 824 if (max_bandwidth && *bandwidth <= max_bandwidth) { 825 *reg_rule = rr; 826 *bandwidth = max_bandwidth; ··· 836 } 837 } 838 839 return !max_bandwidth; 840 } 841 842 - static void handle_channel(struct ieee80211_channel *chan) 843 { 844 int r; 845 - u32 flags = chan->orig_flags; 846 u32 max_bandwidth = 0; 847 const struct ieee80211_reg_rule *reg_rule = NULL; 848 const struct ieee80211_power_rule *power_rule = NULL; 849 850 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), 851 &max_bandwidth, &reg_rule); 852 853 if (r) { 854 - flags |= IEEE80211_CHAN_DISABLED; 855 - chan->flags = flags; 856 return; 857 } 858 ··· 910 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 911 } 912 913 - static void handle_band(struct ieee80211_supported_band *sband) 914 { 915 - int i; 916 917 for (i = 0; i < sband->n_channels; i++) 918 - handle_channel(&sband->channels[i]); 919 } 920 921 static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) ··· 946 enum ieee80211_band band; 947 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 948 if (wiphy->bands[band]) 949 - handle_band(wiphy->bands[band]); 950 if (wiphy->reg_notifier) 951 wiphy->reg_notifier(wiphy, setby); 952 }
··· 421 return 0; 422 } 423 424 + /** 425 + * freq_in_rule_band - tells us if a frequency is in a frequency band 426 + * @freq_range: frequency rule we want to query 427 + * @freq_khz: frequency we are inquiring about 428 + * 429 + * This lets us know if a specific frequency rule is or is not relevant to 430 + * a specific frequency's band. Bands are device specific and artificial 431 + * definitions (the "2.4 GHz band" and the "5 GHz band"), however it is 432 + * safe for now to assume that a frequency rule should not be part of a 433 + * frequency's band if the start freq or end freq are off by more than 2 GHz. 434 + * This resolution can be lowered and should be considered as we add 435 + * regulatory rule support for other "bands". 436 + **/ 437 + static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, 438 + u32 freq_khz) 439 + { 440 + #define ONE_GHZ_IN_KHZ 1000000 441 + if (abs(freq_khz - freq_range->start_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) 442 + return true; 443 + if (abs(freq_khz - freq_range->end_freq_khz) <= (2 * ONE_GHZ_IN_KHZ)) 444 + return true; 445 + return false; 446 + #undef ONE_GHZ_IN_KHZ 447 + } 448 + 449 /* Converts a country IE to a regulatory domain. A regulatory domain 450 * structure has a lot of information which the IE doesn't yet have, 451 * so for the other values we use upper max values as we will intersect ··· 538 539 /* This time around we fill in the rd */ 540 while (country_ie_len >= 3) { 541 + int end_channel = 0; 542 struct ieee80211_country_ie_triplet *triplet = 543 (struct ieee80211_country_ie_triplet *) country_ie; 544 struct ieee80211_reg_rule *reg_rule = NULL; ··· 559 560 reg_rule->flags = flags; 561 562 + /* 2 GHz */ 563 + if (triplet->chans.first_channel <= 14) 564 + end_channel = triplet->chans.first_channel + 565 + triplet->chans.num_channels; 566 + else 567 + /* 568 + * 5 GHz -- For example in country IEs if the first 569 + * channel given is 36 and the number of channels is 4 570 + * then the individual channel numbers defined for the 571 + * 5 GHz PHY by these parameters are: 36, 40, 44, and 48 572 + * and not 36, 37, 38, 39. 573 + * 574 + * See: http://tinyurl.com/11d-clarification 575 + */ 576 + end_channel = triplet->chans.first_channel + 577 + (4 * (triplet->chans.num_channels - 1)); 578 + 579 /* The +10 is since the regulatory domain expects 580 * the actual band edge, not the center of freq for 581 * its start and end freqs, assuming 20 MHz bandwidth on ··· 568 triplet->chans.first_channel) - 10); 569 freq_range->end_freq_khz = 570 MHZ_TO_KHZ(ieee80211_channel_to_frequency( 571 + end_channel) + 10); 572 573 /* Large arbitrary values, we intersect later */ 574 /* Increment this if we ever support >= 40 MHz channels ··· 748 * this value to the maximum allowed bandwidth. 749 * @reg_rule: the regulatory rule which we have for this frequency 750 * 751 + * Use this function to get the regulatory rule for a specific frequency on 752 + * a given wireless device. If the device has a specific regulatory domain 753 + * it wants to follow we respect that unless a country IE has been received 754 + * and processed already. 755 + * 756 + * Returns 0 if it was able to find a valid regulatory rule which does 757 + * apply to the given center_freq otherwise it returns non-zero. It will 758 + * also return -ERANGE if we determine the given center_freq does not even have 759 + * a regulatory rule for a frequency range in the center_freq's band. See 760 + * freq_in_rule_band() for our current definition of a band -- this is purely 761 + * subjective and right now its 802.11 specific. 762 */ 763 static int freq_reg_info(u32 center_freq, u32 *bandwidth, 764 const struct ieee80211_reg_rule **reg_rule) 765 { 766 int i; 767 + bool band_rule_found = false; 768 u32 max_bandwidth = 0; 769 770 if (!cfg80211_regdomain) ··· 767 rr = &cfg80211_regdomain->reg_rules[i]; 768 fr = &rr->freq_range; 769 pr = &rr->power_rule; 770 + 771 + /* We only need to know if one frequency rule was 772 + * was in center_freq's band, that's enough, so lets 773 + * not overwrite it once found */ 774 + if (!band_rule_found) 775 + band_rule_found = freq_in_rule_band(fr, center_freq); 776 + 777 max_bandwidth = freq_max_bandwidth(fr, center_freq); 778 + 779 if (max_bandwidth && *bandwidth <= max_bandwidth) { 780 *reg_rule = rr; 781 *bandwidth = max_bandwidth; ··· 775 } 776 } 777 778 + if (!band_rule_found) 779 + return -ERANGE; 780 + 781 return !max_bandwidth; 782 } 783 784 + static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band, 785 + unsigned int chan_idx) 786 { 787 int r; 788 + u32 flags; 789 u32 max_bandwidth = 0; 790 const struct ieee80211_reg_rule *reg_rule = NULL; 791 const struct ieee80211_power_rule *power_rule = NULL; 792 + struct ieee80211_supported_band *sband; 793 + struct ieee80211_channel *chan; 794 + 795 + sband = wiphy->bands[band]; 796 + BUG_ON(chan_idx >= sband->n_channels); 797 + chan = &sband->channels[chan_idx]; 798 + 799 + flags = chan->orig_flags; 800 801 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq), 802 &max_bandwidth, &reg_rule); 803 804 if (r) { 805 + /* This means no regulatory rule was found in the country IE 806 + * with a frequency range on the center_freq's band, since 807 + * IEEE-802.11 allows for a country IE to have a subset of the 808 + * regulatory information provided in a country we ignore 809 + * disabling the channel unless at least one reg rule was 810 + * found on the center_freq's band. For details see this 811 + * clarification: 812 + * 813 + * http://tinyurl.com/11d-clarification 814 + */ 815 + if (r == -ERANGE && 816 + last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) { 817 + #ifdef CONFIG_CFG80211_REG_DEBUG 818 + printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz " 819 + "intact on %s - no rule found in band on " 820 + "Country IE\n", 821 + chan->center_freq, wiphy_name(wiphy)); 822 + #endif 823 + } else { 824 + /* In this case we know the country IE has at least one reg rule 825 + * for the band so we respect its band definitions */ 826 + #ifdef CONFIG_CFG80211_REG_DEBUG 827 + if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE) 828 + printk(KERN_DEBUG "cfg80211: Disabling " 829 + "channel %d MHz on %s due to " 830 + "Country IE\n", 831 + chan->center_freq, wiphy_name(wiphy)); 832 + #endif 833 + flags |= IEEE80211_CHAN_DISABLED; 834 + chan->flags = flags; 835 + } 836 return; 837 } 838 ··· 808 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); 809 } 810 811 + static void handle_band(struct wiphy *wiphy, enum ieee80211_band band) 812 { 813 + unsigned int i; 814 + struct ieee80211_supported_band *sband; 815 + 816 + BUG_ON(!wiphy->bands[band]); 817 + sband = wiphy->bands[band]; 818 819 for (i = 0; i < sband->n_channels; i++) 820 + handle_channel(wiphy, band, i); 821 } 822 823 static bool ignore_reg_update(struct wiphy *wiphy, enum reg_set_by setby) ··· 840 enum ieee80211_band band; 841 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 842 if (wiphy->bands[band]) 843 + handle_band(wiphy, band); 844 if (wiphy->reg_notifier) 845 wiphy->reg_notifier(wiphy, setby); 846 }
+2 -9
net/xfrm/xfrm_user.c
··· 1914 } 1915 #endif 1916 1917 - /* For the xfrm_usersa_info cases we have to work around some 32-bit vs. 1918 - * 64-bit compatability issues. On 32-bit the structure is 220 bytes, but 1919 - * for 64-bit it gets padded out to 224 bytes. Those bytes are just 1920 - * padding and don't have any content we care about. Therefore as long 1921 - * as we have enough bytes for the content we can make both cases work. 1922 - */ 1923 - 1924 #define XMSGSIZE(type) sizeof(struct type) 1925 1926 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 1927 - [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = 220, /* see above */ 1928 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1929 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1930 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), ··· 1927 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 1928 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 1929 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1930 - [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = 220, /* see above */ 1931 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 1932 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 1933 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
··· 1914 } 1915 #endif 1916 1917 #define XMSGSIZE(type) sizeof(struct type) 1918 1919 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 1920 + [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1921 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1922 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 1923 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), ··· 1934 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 1935 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 1936 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 1937 + [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 1938 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 1939 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 1940 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,