Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits)
sfc: Change falcon_probe_board() to fail for unsupported boards
sfc: Always close net device at the end of a disabling reset
sfc: Wait at most 10ms for the MC to finish reading out MAC statistics
sctp: Fix oops when sending queued ASCONF chunks
sctp: fix to calc the INIT/INIT-ACK chunk length correctly is set
sctp: per_cpu variables should be in bh_disabled section
sctp: fix potential reference of a freed pointer
sctp: avoid irq lock inversion while call sk->sk_data_ready()
Revert "tcp: bind() fix when many ports are bound"
net/usb: add sierra_net.c driver
cdc_ether: fix autosuspend for mbm devices
bluetooth: handle l2cap_create_connless_pdu() errors
gianfar: Wait for both RX and TX to stop
ipheth: potential null dereferences on error path
smc91c92_cs: spin_unlock_irqrestore before calling smc_interrupt()
drivers/usb/net/kaweth.c: add device "Allied Telesyn AT-USB10 USB Ethernet Adapter"
bnx2: Update version to 2.0.9.
bnx2: Prevent "scheduling while atomic" warning with cnic, bonding and vlan.
bnx2: Fix lost MSI-X problem on 5709 NICs.
cxgb3: Wait longer for control packets on initialization
...

+1312 -188
+27 -21
drivers/net/bnx2.c
··· 58 58 #include "bnx2_fw.h" 59 59 60 60 #define DRV_MODULE_NAME "bnx2" 61 - #define DRV_MODULE_VERSION "2.0.8" 62 - #define DRV_MODULE_RELDATE "Feb 15, 2010" 61 + #define DRV_MODULE_VERSION "2.0.9" 62 + #define DRV_MODULE_RELDATE "April 27, 2010" 63 63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 64 64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 65 65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" ··· 651 651 } 652 652 653 653 static void 654 - bnx2_netif_stop(struct bnx2 *bp) 654 + bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic) 655 655 { 656 - bnx2_cnic_stop(bp); 656 + if (stop_cnic) 657 + bnx2_cnic_stop(bp); 657 658 if (netif_running(bp->dev)) { 658 659 int i; 659 660 ··· 672 671 } 673 672 674 673 static void 675 - bnx2_netif_start(struct bnx2 *bp) 674 + bnx2_netif_start(struct bnx2 *bp, bool start_cnic) 676 675 { 677 676 if (atomic_dec_and_test(&bp->intr_sem)) { 678 677 if (netif_running(bp->dev)) { 679 678 netif_tx_wake_all_queues(bp->dev); 680 679 bnx2_napi_enable(bp); 681 680 bnx2_enable_int(bp); 682 - bnx2_cnic_start(bp); 681 + if (start_cnic) 682 + bnx2_cnic_start(bp); 683 683 } 684 684 } 685 685 } ··· 4761 4759 rc = bnx2_alloc_bad_rbuf(bp); 4762 4760 } 4763 4761 4764 - if (bp->flags & BNX2_FLAG_USING_MSIX) 4762 + if (bp->flags & BNX2_FLAG_USING_MSIX) { 4765 4763 bnx2_setup_msix_tbl(bp); 4764 + /* Prevent MSIX table reads and write from timing out */ 4765 + REG_WR(bp, BNX2_MISC_ECO_HW_CTL, 4766 + BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 4767 + } 4766 4768 4767 4769 return rc; 4768 4770 } ··· 6279 6273 return; 6280 6274 } 6281 6275 6282 - bnx2_netif_stop(bp); 6276 + bnx2_netif_stop(bp, true); 6283 6277 6284 6278 bnx2_init_nic(bp, 1); 6285 6279 6286 6280 atomic_set(&bp->intr_sem, 1); 6287 - bnx2_netif_start(bp); 6281 + bnx2_netif_start(bp, true); 6288 6282 rtnl_unlock(); 6289 6283 } 6290 6284 ··· 6326 6320 struct bnx2 *bp = netdev_priv(dev); 6327 6321 6328 6322 if (netif_running(dev)) 6329 - bnx2_netif_stop(bp); 6323 + bnx2_netif_stop(bp, false); 6330 6324 6331 6325 bp->vlgrp = vlgrp; 6332 6326 ··· 6337 6331 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6338 6332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6339 6333 6340 - bnx2_netif_start(bp); 6334 + bnx2_netif_start(bp, false); 6341 6335 } 6342 6336 #endif 6343 6337 ··· 7057 7051 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7058 7052 7059 7053 if (netif_running(bp->dev)) { 7060 - bnx2_netif_stop(bp); 7054 + bnx2_netif_stop(bp, true); 7061 7055 bnx2_init_nic(bp, 0); 7062 - bnx2_netif_start(bp); 7056 + bnx2_netif_start(bp, true); 7063 7057 } 7064 7058 7065 7059 return 0; ··· 7089 7083 /* Reset will erase chipset stats; save them */ 7090 7084 bnx2_save_stats(bp); 7091 7085 7092 - bnx2_netif_stop(bp); 7086 + bnx2_netif_stop(bp, true); 7093 7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7094 7088 bnx2_free_skbs(bp); 7095 7089 bnx2_free_mem(bp); ··· 7117 7111 bnx2_setup_cnic_irq_info(bp); 7118 7112 mutex_unlock(&bp->cnic_lock); 7119 7113 #endif 7120 - bnx2_netif_start(bp); 7114 + bnx2_netif_start(bp, true); 7121 7115 } 7122 7116 return 0; 7123 7117 } ··· 7370 7364 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7371 7365 int i; 7372 7366 7373 - bnx2_netif_stop(bp); 7367 + bnx2_netif_stop(bp, true); 7374 7368 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7375 7369 bnx2_free_skbs(bp); 7376 7370 ··· 7389 7383 bnx2_shutdown_chip(bp); 7390 7384 else { 7391 7385 bnx2_init_nic(bp, 1); 7392 - bnx2_netif_start(bp); 7386 + bnx2_netif_start(bp, true); 7393 7387 } 7394 7388 7395 7389 /* wait for link up */ ··· 8383 8377 return 0; 8384 8378 8385 8379 flush_scheduled_work(); 8386 - bnx2_netif_stop(bp); 8380 + bnx2_netif_stop(bp, true); 8387 8381 netif_device_detach(dev); 8388 8382 del_timer_sync(&bp->timer); 8389 8383 bnx2_shutdown_chip(bp); ··· 8405 8399 bnx2_set_power_state(bp, PCI_D0); 8406 8400 netif_device_attach(dev); 8407 8401 bnx2_init_nic(bp, 1); 8408 - bnx2_netif_start(bp); 8402 + bnx2_netif_start(bp, true); 8409 8403 return 0; 8410 8404 } 8411 8405 ··· 8432 8426 } 8433 8427 8434 8428 if (netif_running(dev)) { 8435 - bnx2_netif_stop(bp); 8429 + bnx2_netif_stop(bp, true); 8436 8430 del_timer_sync(&bp->timer); 8437 8431 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8438 8432 } ··· 8489 8483 8490 8484 rtnl_lock(); 8491 8485 if (netif_running(dev)) 8492 - bnx2_netif_start(bp); 8486 + bnx2_netif_start(bp, true); 8493 8487 8494 8488 netif_device_attach(dev); 8495 8489 rtnl_unlock();
+1 -1
drivers/net/cxgb3/cxgb3_main.c
··· 439 439 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 440 440 unsigned long n) 441 441 { 442 - int attempts = 5; 442 + int attempts = 10; 443 443 444 444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 445 445 if (!--attempts)
+9 -11
drivers/net/e1000e/82571.c
··· 336 336 struct e1000_hw *hw = &adapter->hw; 337 337 static int global_quad_port_a; /* global port a indication */ 338 338 struct pci_dev *pdev = adapter->pdev; 339 - u16 eeprom_data = 0; 340 339 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 341 340 s32 rc; 342 341 ··· 386 387 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) 387 388 adapter->flags &= ~FLAG_HAS_WOL; 388 389 break; 389 - 390 390 case e1000_82573: 391 + case e1000_82574: 392 + case e1000_82583: 393 + /* Disable ASPM L0s due to hardware errata */ 394 + e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S); 395 + 391 396 if (pdev->device == E1000_DEV_ID_82573L) { 392 - if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 393 - &eeprom_data) < 0) 394 - break; 395 - if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) { 396 - adapter->flags |= FLAG_HAS_JUMBO_FRAMES; 397 - adapter->max_hw_frame_size = DEFAULT_JUMBO; 398 - } 397 + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; 398 + adapter->max_hw_frame_size = DEFAULT_JUMBO; 399 399 } 400 400 break; 401 401 default: ··· 1790 1792 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1791 1793 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1792 1794 | FLAG_APME_CHECK_PORT_B, 1795 + .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ 1793 1796 .pba = 38, 1794 1797 .max_hw_frame_size = DEFAULT_JUMBO, 1795 1798 .get_variants = e1000_get_variants_82571, ··· 1808 1809 | FLAG_RX_CSUM_ENABLED 1809 1810 | FLAG_HAS_CTRLEXT_ON_LOAD 1810 1811 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1812 + .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ 1811 1813 .pba = 38, 1812 1814 .max_hw_frame_size = DEFAULT_JUMBO, 1813 1815 .get_variants = e1000_get_variants_82571, ··· 1820 1820 struct e1000_info e1000_82573_info = { 1821 1821 .mac = e1000_82573, 1822 1822 .flags = FLAG_HAS_HW_VLAN_FILTER 1823 - | FLAG_HAS_JUMBO_FRAMES 1824 1823 | FLAG_HAS_WOL 1825 1824 | FLAG_APME_IN_CTRL3 1826 1825 | FLAG_RX_CSUM_ENABLED 1827 1826 | FLAG_HAS_SMART_POWER_DOWN 1828 1827 | FLAG_HAS_AMT 1829 - | FLAG_HAS_ERT 1830 1828 | FLAG_HAS_SWSM_ON_LOAD, 1831 1829 .pba = 20, 1832 1830 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+4 -1
drivers/net/e1000e/e1000.h
··· 37 37 #include <linux/io.h> 38 38 #include <linux/netdevice.h> 39 39 #include <linux/pci.h> 40 + #include <linux/pci-aspm.h> 40 41 41 42 #include "hw.h" 42 43 ··· 375 374 struct e1000_info { 376 375 enum e1000_mac_type mac; 377 376 unsigned int flags; 378 - unsigned int flags2; 377 + unsigned int flags2; 379 378 u32 pba; 380 379 u32 max_hw_frame_size; 381 380 s32 (*get_variants)(struct e1000_adapter *); ··· 422 421 #define FLAG2_CRC_STRIPPING (1 << 0) 423 422 #define FLAG2_HAS_PHY_WAKEUP (1 << 1) 424 423 #define FLAG2_IS_DISCARDING (1 << 2) 424 + #define FLAG2_DISABLE_ASPM_L1 (1 << 3) 425 425 426 426 #define E1000_RX_DESC_PS(R, i) \ 427 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) ··· 463 461 extern bool e1000e_has_link(struct e1000_adapter *adapter); 464 462 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 465 463 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 464 + extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 466 465 467 466 extern unsigned int copybreak; 468 467
+42 -28
drivers/net/e1000e/netdev.c
··· 4283 4283 return -EINVAL; 4284 4284 } 4285 4285 4286 + /* 82573 Errata 17 */ 4287 + if (((adapter->hw.mac.type == e1000_82573) || 4288 + (adapter->hw.mac.type == e1000_82574)) && 4289 + (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) { 4290 + adapter->flags2 |= FLAG2_DISABLE_ASPM_L1; 4291 + e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1); 4292 + } 4293 + 4286 4294 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4287 4295 msleep(1); 4288 4296 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ ··· 4613 4605 } 4614 4606 } 4615 4607 4616 - static void e1000e_disable_l1aspm(struct pci_dev *pdev) 4608 + #ifdef CONFIG_PCIEASPM 4609 + static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 4610 + { 4611 + pci_disable_link_state(pdev, state); 4612 + } 4613 + #else 4614 + static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 4617 4615 { 4618 4616 int pos; 4619 - u16 val; 4617 + u16 reg16; 4620 4618 4621 4619 /* 4622 - * 82573 workaround - disable L1 ASPM on mobile chipsets 4623 - * 4624 - * L1 ASPM on various mobile (ich7) chipsets do not behave properly 4625 - * resulting in lost data or garbage information on the pci-e link 4626 - * level. This could result in (false) bad EEPROM checksum errors, 4627 - * long ping times (up to 2s) or even a system freeze/hang. 4628 - * 4629 - * Unfortunately this feature saves about 1W power consumption when 4630 - * active. 4620 + * Both device and parent should have the same ASPM setting. 4621 + * Disable ASPM in downstream component first and then upstream. 4631 4622 */ 4632 - pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4633 - pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 4634 - if (val & 0x2) { 4635 - dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 4636 - val &= ~0x2; 4637 - pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); 4638 - } 4623 + pos = pci_pcie_cap(pdev); 4624 + pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 4625 + reg16 &= ~state; 4626 + pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16); 4627 + 4628 + pos = pci_pcie_cap(pdev->bus->self); 4629 + pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16); 4630 + reg16 &= ~state; 4631 + pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16); 4632 + } 4633 + #endif 4634 + void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 4635 + { 4636 + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", 4637 + (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", 4638 + (state & PCIE_LINK_STATE_L1) ? "L1" : ""); 4639 + 4640 + __e1000e_disable_aspm(pdev, state); 4639 4641 } 4640 4642 4641 4643 #ifdef CONFIG_PM ··· 4671 4653 pci_set_power_state(pdev, PCI_D0); 4672 4654 pci_restore_state(pdev); 4673 4655 pci_save_state(pdev); 4674 - e1000e_disable_l1aspm(pdev); 4656 + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 4657 + e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 4675 4658 4676 4659 err = pci_enable_device_mem(pdev); 4677 4660 if (err) { ··· 4814 4795 int err; 4815 4796 pci_ers_result_t result; 4816 4797 4817 - e1000e_disable_l1aspm(pdev); 4798 + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 4799 + e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 4818 4800 err = pci_enable_device_mem(pdev); 4819 4801 if (err) { 4820 4802 dev_err(&pdev->dev, ··· 4909 4889 dev_warn(&adapter->pdev->dev, 4910 4890 "Warning: detected DSPD enabled in EEPROM\n"); 4911 4891 } 4912 - 4913 - ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); 4914 - if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) { 4915 - /* ASPM enable */ 4916 - dev_warn(&adapter->pdev->dev, 4917 - "Warning: detected ASPM enabled in EEPROM\n"); 4918 - } 4919 4892 } 4920 4893 4921 4894 static const struct net_device_ops e1000e_netdev_ops = { ··· 4957 4944 u16 eeprom_data = 0; 4958 4945 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4959 4946 4960 - e1000e_disable_l1aspm(pdev); 4947 + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) 4948 + e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1); 4961 4949 4962 4950 err = pci_enable_device_mem(pdev); 4963 4951 if (err)
+3 -3
drivers/net/gianfar.c
··· 1511 1511 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1512 1512 gfar_write(&regs->dmactrl, tempval); 1513 1513 1514 - while (!(gfar_read(&regs->ievent) & 1515 - (IEVENT_GRSC | IEVENT_GTSC))) 1516 - cpu_relax(); 1514 + spin_event_timeout(((gfar_read(&regs->ievent) & 1515 + (IEVENT_GRSC | IEVENT_GTSC)) == 1516 + (IEVENT_GRSC | IEVENT_GTSC)), -1, 0); 1517 1517 } 1518 1518 } 1519 1519
+48 -14
drivers/net/ixgbe/ixgbe_82599.c
··· 39 39 #define IXGBE_82599_MC_TBL_SIZE 128 40 40 #define IXGBE_82599_VFT_TBL_SIZE 128 41 41 42 + void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43 + void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 42 44 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43 45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 44 46 ixgbe_link_speed speed, ··· 71 69 if (hw->phy.multispeed_fiber) { 72 70 /* Set up dual speed SFP+ support */ 73 71 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 72 + mac->ops.disable_tx_laser = 73 + &ixgbe_disable_tx_laser_multispeed_fiber; 74 + mac->ops.enable_tx_laser = 75 + &ixgbe_enable_tx_laser_multispeed_fiber; 74 76 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 75 77 } else { 78 + mac->ops.disable_tx_laser = NULL; 79 + mac->ops.enable_tx_laser = NULL; 76 80 mac->ops.flap_tx_laser = NULL; 77 81 if ((mac->ops.get_media_type(hw) == 78 82 ixgbe_media_type_backplane) && ··· 423 415 return status; 424 416 } 425 417 418 + /** 419 + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 420 + * @hw: pointer to hardware structure 421 + * 422 + * The base drivers may require better control over SFP+ module 423 + * PHY states. This includes selectively shutting down the Tx 424 + * laser on the PHY, effectively halting physical link. 425 + **/ 426 + void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 427 + { 428 + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 429 + 430 + /* Disable tx laser; allow 100us to go dark per spec */ 431 + esdp_reg |= IXGBE_ESDP_SDP3; 432 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 433 + IXGBE_WRITE_FLUSH(hw); 434 + udelay(100); 435 + } 436 + 437 + /** 438 + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 439 + * @hw: pointer to hardware structure 440 + * 441 + * The base drivers may require better control over SFP+ module 442 + * PHY states. This includes selectively turning on the Tx 443 + * laser on the PHY, effectively starting physical link. 444 + **/ 445 + void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 446 + { 447 + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 448 + 449 + /* Enable tx laser; allow 100ms to light up */ 450 + esdp_reg &= ~IXGBE_ESDP_SDP3; 451 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 452 + IXGBE_WRITE_FLUSH(hw); 453 + msleep(100); 454 + } 455 + 426 456 /** 427 457 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 428 458 * @hw: pointer to hardware structure ··· 475 429 **/ 476 430 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 477 431 { 478 - u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 479 - 480 432 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); 481 433 482 434 if (hw->mac.autotry_restart) { 483 - /* Disable tx laser; allow 100us to go dark per spec */ 484 - esdp_reg |= IXGBE_ESDP_SDP3; 485 - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 486 - IXGBE_WRITE_FLUSH(hw); 487 - udelay(100); 488 - 489 - /* Enable tx laser; allow 100ms to light up */ 490 - esdp_reg &= ~IXGBE_ESDP_SDP3; 491 - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 492 - IXGBE_WRITE_FLUSH(hw); 493 - msleep(100); 494 - 435 + ixgbe_disable_tx_laser_multispeed_fiber(hw); 436 + ixgbe_enable_tx_laser_multispeed_fiber(hw); 495 437 hw->mac.autotry_restart = false; 496 438 } 497 439 }
+12 -10
drivers/net/ixgbe/ixgbe_main.c
··· 2982 2982 else 2983 2983 ixgbe_configure_msi_and_legacy(adapter); 2984 2984 2985 + /* enable the optics */ 2986 + if (hw->phy.multispeed_fiber) 2987 + hw->mac.ops.enable_tx_laser(hw); 2988 + 2985 2989 clear_bit(__IXGBE_DOWN, &adapter->state); 2986 2990 ixgbe_napi_enable_all(adapter); 2987 2991 ··· 3246 3242 3247 3243 /* signal that we are down to the interrupt handler */ 3248 3244 set_bit(__IXGBE_DOWN, &adapter->state); 3245 + 3246 + /* power down the optics */ 3247 + if (hw->phy.multispeed_fiber) 3248 + hw->mac.ops.disable_tx_laser(hw); 3249 3249 3250 3250 /* disable receive for all VFs and wait one second */ 3251 3251 if (adapter->num_vfs) { ··· 6261 6253 goto err_eeprom; 6262 6254 } 6263 6255 6256 + /* power down the optics */ 6257 + if (hw->phy.multispeed_fiber) 6258 + hw->mac.ops.disable_tx_laser(hw); 6259 + 6264 6260 init_timer(&adapter->watchdog_timer); 6265 6261 adapter->watchdog_timer.function = &ixgbe_watchdog; 6266 6262 adapter->watchdog_timer.data = (unsigned long)adapter; ··· 6412 6400 del_timer_sync(&adapter->sfp_timer); 6413 6401 cancel_work_sync(&adapter->watchdog_task); 6414 6402 cancel_work_sync(&adapter->sfp_task); 6415 - if (adapter->hw.phy.multispeed_fiber) { 6416 - struct ixgbe_hw *hw = &adapter->hw; 6417 - /* 6418 - * Restart clause 37 autoneg, disable and re-enable 6419 - * the tx laser, to clear & alert the link partner 6420 - * that it needs to restart autotry 6421 - */ 6422 - hw->mac.autotry_restart = true; 6423 - hw->mac.ops.flap_tx_laser(hw); 6424 - } 6425 6403 cancel_work_sync(&adapter->multispeed_fiber_task); 6426 6404 cancel_work_sync(&adapter->sfp_config_module_task); 6427 6405 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+2
drivers/net/ixgbe/ixgbe_type.h
··· 2398 2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2399 2399 2400 2400 /* Link */ 2401 + void (*disable_tx_laser)(struct ixgbe_hw *); 2402 + void (*enable_tx_laser)(struct ixgbe_hw *); 2401 2403 void (*flap_tx_laser)(struct ixgbe_hw *); 2402 2404 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2403 2405 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+21 -10
drivers/net/pcmcia/smc91c92_cs.c
··· 1804 1804 SMC_SELECT_BANK(1); 1805 1805 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; 1806 1806 1807 + SMC_SELECT_BANK(saved_bank); 1808 + spin_unlock_irqrestore(&smc->lock, flags); 1809 + 1807 1810 /* Check for pending interrupt with watchdog flag set: with 1808 1811 this, we can limp along even if the interrupt is blocked */ 1809 1812 if (smc->watchdog++ && ((i>>8) & i)) { 1810 1813 if (!smc->fast_poll) 1811 1814 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1815 + local_irq_save(flags); 1812 1816 smc_interrupt(dev->irq, dev); 1817 + local_irq_restore(flags); 1813 1818 smc->fast_poll = HZ; 1814 1819 } 1815 1820 if (smc->fast_poll) { 1816 1821 smc->fast_poll--; 1817 1822 smc->media.expires = jiffies + HZ/100; 1818 1823 add_timer(&smc->media); 1819 - SMC_SELECT_BANK(saved_bank); 1820 - spin_unlock_irqrestore(&smc->lock, flags); 1821 1824 return; 1822 1825 } 1826 + 1827 + spin_lock_irqsave(&smc->lock, flags); 1828 + 1829 + saved_bank = inw(ioaddr + BANK_SELECT); 1823 1830 1824 1831 if (smc->cfg & CFG_MII_SELECT) { 1825 1832 if (smc->mii_if.phy_id < 0) ··· 1985 1978 unsigned int ioaddr = dev->base_addr; 1986 1979 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1987 1980 int ret; 1981 + unsigned long flags; 1988 1982 1989 - spin_lock_irq(&smc->lock); 1983 + spin_lock_irqsave(&smc->lock, flags); 1990 1984 SMC_SELECT_BANK(3); 1991 1985 if (smc->cfg & CFG_MII_SELECT) 1992 1986 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 1993 1987 else 1994 1988 ret = smc_netdev_get_ecmd(dev, ecmd); 1995 1989 SMC_SELECT_BANK(saved_bank); 1996 - spin_unlock_irq(&smc->lock); 1990 + spin_unlock_irqrestore(&smc->lock, flags); 1997 1991 return ret; 1998 1992 } 1999 1993 ··· 2004 1996 unsigned int ioaddr = dev->base_addr; 2005 1997 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2006 1998 int ret; 1999 + unsigned long flags; 2007 2000 2008 - spin_lock_irq(&smc->lock); 2001 + spin_lock_irqsave(&smc->lock, flags); 2009 2002 SMC_SELECT_BANK(3); 2010 2003 if (smc->cfg & CFG_MII_SELECT) 2011 2004 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 2012 2005 else 2013 2006 ret = smc_netdev_set_ecmd(dev, ecmd); 2014 2007 SMC_SELECT_BANK(saved_bank); 2015 - spin_unlock_irq(&smc->lock); 2008 + spin_unlock_irqrestore(&smc->lock, flags); 2016 2009 return ret; 2017 2010 } 2018 2011 ··· 2023 2014 unsigned int ioaddr = dev->base_addr; 2024 2015 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2025 2016 u32 ret; 2017 + unsigned long flags; 2026 2018 2027 - spin_lock_irq(&smc->lock); 2019 + spin_lock_irqsave(&smc->lock, flags); 2028 2020 SMC_SELECT_BANK(3); 2029 2021 ret = smc_link_ok(dev); 2030 2022 SMC_SELECT_BANK(saved_bank); 2031 - spin_unlock_irq(&smc->lock); 2023 + spin_unlock_irqrestore(&smc->lock, flags); 2032 2024 return ret; 2033 2025 } 2034 2026 ··· 2066 2056 int rc = 0; 2067 2057 u16 saved_bank; 2068 2058 unsigned int ioaddr = dev->base_addr; 2059 + unsigned long flags; 2069 2060 2070 2061 if (!netif_running(dev)) 2071 2062 return -EINVAL; 2072 2063 2073 - spin_lock_irq(&smc->lock); 2064 + spin_lock_irqsave(&smc->lock, flags); 2074 2065 saved_bank = inw(ioaddr + BANK_SELECT); 2075 2066 SMC_SELECT_BANK(3); 2076 2067 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); 2077 2068 SMC_SELECT_BANK(saved_bank); 2078 - spin_unlock_irq(&smc->lock); 2069 + spin_unlock_irqrestore(&smc->lock, flags); 2079 2070 return rc; 2080 2071 } 2081 2072
+18 -14
drivers/net/r8169.c
··· 2759 2759 { 2760 2760 iounmap(ioaddr); 2761 2761 pci_release_regions(pdev); 2762 + pci_clear_mwi(pdev); 2762 2763 pci_disable_device(pdev); 2763 2764 free_netdev(dev); 2764 2765 } ··· 2826 2825 spin_lock_irq(&tp->lock); 2827 2826 2828 2827 RTL_W8(Cfg9346, Cfg9346_Unlock); 2828 + 2829 2829 RTL_W32(MAC4, high); 2830 + RTL_R32(MAC4); 2831 + 2830 2832 RTL_W32(MAC0, low); 2833 + RTL_R32(MAC0); 2834 + 2831 2835 RTL_W8(Cfg9346, Cfg9346_Lock); 2832 2836 2833 2837 spin_unlock_irq(&tp->lock); ··· 3020 3014 goto err_out_free_dev_1; 3021 3015 } 3022 3016 3023 - rc = pci_set_mwi(pdev); 3024 - if (rc < 0) 3025 - goto err_out_disable_2; 3017 + if (pci_set_mwi(pdev) < 0) 3018 + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); 3026 3019 3027 3020 /* make sure PCI base addr 1 is MMIO */ 3028 3021 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { ··· 3029 3024 "region #%d not an MMIO resource, aborting\n", 3030 3025 region); 3031 3026 rc = -ENODEV; 3032 - goto err_out_mwi_3; 3027 + goto err_out_mwi_2; 3033 3028 } 3034 3029 3035 3030 /* check for weird/broken PCI region reporting */ ··· 3037 3032 netif_err(tp, probe, dev, 3038 3033 "Invalid PCI region size(s), aborting\n"); 3039 3034 rc = -ENODEV; 3040 - goto err_out_mwi_3; 3035 + goto err_out_mwi_2; 3041 3036 } 3042 3037 3043 3038 rc = pci_request_regions(pdev, MODULENAME); 3044 3039 if (rc < 0) { 3045 3040 netif_err(tp, probe, dev, "could not request regions\n"); 3046 - goto err_out_mwi_3; 3041 + goto err_out_mwi_2; 3047 3042 } 3048 3043 3049 3044 tp->cp_cmd = PCIMulRW | RxChkSum; ··· 3056 3051 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3057 3052 if (rc < 0) { 3058 3053 netif_err(tp, probe, dev, "DMA configuration failed\n"); 3059 - goto err_out_free_res_4; 3054 + goto err_out_free_res_3; 3060 3055 } 3061 3056 } 3062 3057 ··· 3065 3060 if (!ioaddr) { 3066 3061 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); 3067 3062 rc = -EIO; 3068 - goto err_out_free_res_4; 3063 + goto err_out_free_res_3; 3069 3064 } 3070 3065 3071 3066 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); ··· 3107 3102 if (i == ARRAY_SIZE(rtl_chip_info)) { 3108 3103 dev_err(&pdev->dev, 3109 3104 "driver bug, MAC version not found in rtl_chip_info\n"); 3110 - goto err_out_msi_5; 3105 + goto err_out_msi_4; 3111 3106 } 3112 3107 tp->chipset = i; 3113 3108 ··· 3172 3167 3173 3168 rc = register_netdev(dev); 3174 3169 if (rc < 0) 3175 - goto err_out_msi_5; 3170 + goto err_out_msi_4; 3176 3171 3177 3172 pci_set_drvdata(pdev, dev); 3178 3173 ··· 3195 3190 out: 3196 3191 return rc; 3197 3192 3198 - err_out_msi_5: 3193 + err_out_msi_4: 3199 3194 rtl_disable_msi(pdev, tp); 3200 3195 iounmap(ioaddr); 3201 - err_out_free_res_4: 3196 + err_out_free_res_3: 3202 3197 pci_release_regions(pdev); 3203 - err_out_mwi_3: 3198 + err_out_mwi_2: 3204 3199 pci_clear_mwi(pdev); 3205 - err_out_disable_2: 3206 3200 pci_disable_device(pdev); 3207 3201 err_out_free_dev_1: 3208 3202 free_netdev(dev);
+2 -2
drivers/net/sfc/efx.c
··· 1861 1861 } 1862 1862 1863 1863 if (disabled) { 1864 + dev_close(efx->net_dev); 1864 1865 EFX_ERR(efx, "has been disabled\n"); 1865 1866 efx->state = STATE_DISABLED; 1866 1867 } else { ··· 1885 1884 } 1886 1885 1887 1886 rtnl_lock(); 1888 - if (efx_reset(efx, efx->reset_pending)) 1889 - dev_close(efx->net_dev); 1887 + (void)efx_reset(efx, efx->reset_pending); 1890 1888 rtnl_unlock(); 1891 1889 } 1892 1890
+3 -1
drivers/net/sfc/falcon.c
··· 1320 1320 1321 1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1322 1322 1323 - falcon_probe_board(efx, board_rev); 1323 + rc = falcon_probe_board(efx, board_rev); 1324 + if (rc) 1325 + goto fail2; 1324 1326 1325 1327 kfree(nvconfig); 1326 1328 return 0;
+3 -10
drivers/net/sfc/falcon_boards.c
··· 728 728 }, 729 729 }; 730 730 731 - static const struct falcon_board_type falcon_dummy_board = { 732 - .init = efx_port_dummy_op_int, 733 - .init_phy = efx_port_dummy_op_void, 734 - .fini = efx_port_dummy_op_void, 735 - .set_id_led = efx_port_dummy_op_set_id_led, 736 - .monitor = efx_port_dummy_op_int, 737 - }; 738 - 739 - void falcon_probe_board(struct efx_nic *efx, u16 revision_info) 731 + int falcon_probe_board(struct efx_nic *efx, u16 revision_info) 740 732 { 741 733 struct falcon_board *board = falcon_board(efx); 742 734 u8 type_id = FALCON_BOARD_TYPE(revision_info); ··· 746 754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 747 755 ? board->type->ref_model : board->type->gen_type, 748 756 'A' + board->major, board->minor); 757 + return 0; 749 758 } else { 750 759 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 - board->type = &falcon_dummy_board; 760 + return -ENODEV; 752 761 } 753 762 }
+1 -1
drivers/net/sfc/nic.h
··· 156 156 ************************************************************************** 157 157 */ 158 158 159 - extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 159 + extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); 160 160 161 161 /* TX data path */ 162 162 extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
+11 -2
drivers/net/sfc/siena.c
··· 456 456 457 457 static void siena_update_nic_stats(struct efx_nic *efx) 458 458 { 459 - while (siena_try_update_nic_stats(efx) == -EAGAIN) 460 - cpu_relax(); 459 + int retry; 460 + 461 + /* If we're unlucky enough to read statistics wduring the DMA, wait 462 + * up to 10ms for it to finish (typically takes <500us) */ 463 + for (retry = 0; retry < 100; ++retry) { 464 + if (siena_try_update_nic_stats(efx) == 0) 465 + return; 466 + udelay(100); 467 + } 468 + 469 + /* Use the old values instead */ 461 470 } 462 471 463 472 static void siena_start_nic_stats(struct efx_nic *efx)
+1
drivers/net/tg3.c
··· 8633 8633 pci_disable_msi(tp->pdev); 8634 8634 8635 8635 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8636 + tp->napi[0].irq_vec = tp->pdev->irq; 8636 8637 8637 8638 err = tg3_request_irq(tp, 0); 8638 8639 if (err)
+10
drivers/net/usb/Kconfig
··· 397 397 398 398 For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver 399 399 400 + config USB_SIERRA_NET 401 + tristate "USB-to-WWAN Driver for Sierra Wireless modems" 402 + depends on USB_USBNET 403 + default y 404 + help 405 + Choose this option if you have a Sierra Wireless USB-to-WWAN device. 406 + 407 + To compile this driver as a module, choose M here: the 408 + module will be called sierra_net. 409 + 400 410 endmenu
+1
drivers/net/usb/Makefile
··· 24 24 obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 25 25 obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 26 26 obj-$(CONFIG_USB_IPHETH) += ipheth.o 27 + obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27 28
+1
drivers/net/usb/cdc_ether.c
··· 431 431 .bind = cdc_bind, 432 432 .unbind = usbnet_cdc_unbind, 433 433 .status = cdc_status, 434 + .manage_power = cdc_manage_power, 434 435 }; 435 436 436 437 /*-------------------------------------------------------------------------*/
+8 -7
drivers/net/usb/ipheth.c
··· 122 122 123 123 tx_urb = usb_alloc_urb(0, GFP_KERNEL); 124 124 if (tx_urb == NULL) 125 - goto error; 125 + goto error_nomem; 126 126 127 127 rx_urb = usb_alloc_urb(0, GFP_KERNEL); 128 128 if (rx_urb == NULL) 129 - goto error; 129 + goto free_tx_urb; 130 130 131 131 tx_buf = usb_buffer_alloc(iphone->udev, 132 132 IPHETH_BUF_SIZE, 133 133 GFP_KERNEL, 134 134 &tx_urb->transfer_dma); 135 135 if (tx_buf == NULL) 136 - goto error; 136 + goto free_rx_urb; 137 137 138 138 rx_buf = usb_buffer_alloc(iphone->udev, 139 139 IPHETH_BUF_SIZE, 140 140 GFP_KERNEL, 141 141 &rx_urb->transfer_dma); 142 142 if (rx_buf == NULL) 143 - goto error; 143 + goto free_tx_buf; 144 144 145 145 146 146 iphone->tx_urb = tx_urb; ··· 149 149 iphone->rx_buf = rx_buf; 150 150 return 0; 151 151 152 - error: 153 - usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, rx_buf, 154 - rx_urb->transfer_dma); 152 + free_tx_buf: 155 153 usb_buffer_free(iphone->udev, IPHETH_BUF_SIZE, tx_buf, 156 154 tx_urb->transfer_dma); 155 + free_rx_urb: 157 156 usb_free_urb(rx_urb); 157 + free_tx_urb: 158 158 usb_free_urb(tx_urb); 159 + error_nomem: 159 160 return -ENOMEM; 160 161 } 161 162
+1
drivers/net/usb/kaweth.c
··· 145 145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ 146 146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ 147 147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ 148 + { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ 148 149 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ 149 150 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ 150 151 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
+1001
drivers/net/usb/sierra_net.c
··· 1 + /* 2 + * USB-to-WWAN Driver for Sierra Wireless modems 3 + * 4 + * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer 5 + * <linux@sierrawireless.com> 6 + * 7 + * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) 8 + * and Ole Andre Vadla Ravnas (ActiveSync) (2006). 9 + * 10 + * IMPORTANT DISCLAIMER: This driver is not commercially supported by 11 + * Sierra Wireless. Use at your own risk. 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License as published by 15 + * the Free Software Foundation; either version 2 of the License, or 16 + * (at your option) any later version. 17 + * 18 + * This program is distributed in the hope that it will be useful, 19 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 + * GNU General Public License for more details. 22 + * 23 + * You should have received a copy of the GNU General Public License 24 + * along with this program; if not, write to the Free Software 25 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 + */ 27 + 28 + #define DRIVER_VERSION "v.2.0" 29 + #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" 30 + #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" 31 + static const char driver_name[] = "sierra_net"; 32 + 33 + /* if defined debug messages enabled */ 34 + /*#define DEBUG*/ 35 + 36 + #include <linux/module.h> 37 + #include <linux/etherdevice.h> 38 + #include <linux/ethtool.h> 39 + #include <linux/mii.h> 40 + #include <linux/sched.h> 41 + #include <linux/timer.h> 42 + #include <linux/usb.h> 43 + #include <linux/usb/cdc.h> 44 + #include <net/ip.h> 45 + #include <net/udp.h> 46 + #include <asm/unaligned.h> 47 + #include <linux/usb/usbnet.h> 48 + 49 + #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 50 + #define SWI_GET_FW_ATTR_MASK 0x08 51 + 52 + /* atomic counter partially included in MAC address to make sure 2 devices 53 + * do not end up with the same MAC - concept breaks in case of > 255 ifaces 54 + */ 55 + static atomic_t iface_counter = ATOMIC_INIT(0); 56 + 57 + /* 58 + * SYNC Timer Delay definition used to set the expiry time 59 + */ 60 + #define SIERRA_NET_SYNCDELAY (2*HZ) 61 + 62 + /* Max. MTU supported. The modem buffers are limited to 1500 */ 63 + #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 64 + 65 + /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control 66 + * message reception ... and thus the max. received packet. 67 + * (May be the cause for parse_hip returning -EINVAL) 68 + */ 69 + #define SIERRA_NET_USBCTL_BUF_LEN 1024 70 + 71 + /* list of interface numbers - used for constructing interface lists */ 72 + struct sierra_net_iface_info { 73 + const u32 infolen; /* number of interface numbers on list */ 74 + const u8 *ifaceinfo; /* pointer to the array holding the numbers */ 75 + }; 76 + 77 + struct sierra_net_info_data { 78 + u16 rx_urb_size; 79 + struct sierra_net_iface_info whitelist; 80 + }; 81 + 82 + /* Private data structure */ 83 + struct sierra_net_data { 84 + 85 + u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ 86 + 87 + u16 link_up; /* air link up or down */ 88 + u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ 89 + 90 + u8 sync_msg[4]; /* SYNC message */ 91 + u8 shdwn_msg[4]; /* Shutdown message */ 92 + 93 + /* Backpointer to the container */ 94 + struct usbnet *usbnet; 95 + 96 + u8 ifnum; /* interface number */ 97 + 98 + /* Bit masks, must be a power of 2 */ 99 + #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 100 + #define SIERRA_NET_TIMER_EXPIRY 0x02 101 + unsigned long kevent_flags; 102 + struct work_struct sierra_net_kevent; 103 + struct timer_list sync_timer; /* For retrying SYNC sequence */ 104 + }; 105 + 106 + struct param { 107 + int is_present; 108 + union { 109 + void *ptr; 110 + u32 dword; 111 + u16 word; 112 + u8 byte; 113 + }; 114 + }; 115 + 116 + /* HIP message type */ 117 + #define SIERRA_NET_HIP_EXTENDEDID 0x7F 118 + #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ 119 + #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ 120 + #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ 121 + #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ 122 + 123 + #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 124 + #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 125 + 126 + /* 3G UMTS Link Sense Indication definitions */ 127 + #define SIERRA_NET_HIP_LSI_UMTSID 0x78 128 + 129 + /* Reverse Channel Grant Indication HIP message */ 130 + #define SIERRA_NET_HIP_RCGI 0x64 131 + 132 + /* LSI Protocol types */ 133 + #define SIERRA_NET_PROTOCOL_UMTS 0x01 134 + /* LSI Coverage */ 135 + #define SIERRA_NET_COVERAGE_NONE 0x00 136 + #define SIERRA_NET_COVERAGE_NOPACKET 0x01 137 + 138 + /* LSI Session */ 139 + #define SIERRA_NET_SESSION_IDLE 0x00 140 + /* LSI Link types */ 141 + #define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 142 + 143 + struct lsi_umts { 144 + u8 protocol; 145 + u8 unused1; 146 + __be16 length; 147 + /* eventually use a union for the rest - assume umts for now */ 148 + u8 coverage; 149 + u8 unused2[41]; 150 + u8 session_state; 151 + u8 unused3[33]; 152 + u8 link_type; 153 + u8 pdp_addr_len; /* NW-supplied PDP address len */ 154 + u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ 155 + u8 unused4[23]; 156 + u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ 157 + u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ 158 + u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ 159 + u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ 160 + u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ 161 + u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ 162 + u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ 163 + u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ 164 + u8 unused5[4]; 165 + u8 gw_addr_len; /* NW-supplied GW address len */ 166 + u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ 167 + u8 reserved[8]; 168 + } __attribute__ ((packed)); 169 + 170 + #define SIERRA_NET_LSI_COMMON_LEN 4 171 + #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) 172 + #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ 173 + (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) 174 + 175 + /* Forward definitions */ 176 + static void sierra_sync_timer(unsigned long syncdata); 177 + static int sierra_net_change_mtu(struct net_device *net, int new_mtu); 178 + 179 + /* Our own net device operations structure */ 180 + static const struct net_device_ops sierra_net_device_ops = { 181 + .ndo_open = usbnet_open, 182 + .ndo_stop = usbnet_stop, 183 + .ndo_start_xmit = usbnet_start_xmit, 184 + .ndo_tx_timeout = usbnet_tx_timeout, 185 + .ndo_change_mtu = sierra_net_change_mtu, 186 + .ndo_set_mac_address = eth_mac_addr, 187 + .ndo_validate_addr = eth_validate_addr, 188 + }; 189 + 190 + /* get private data associated with passed in usbnet device */ 191 + static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) 192 + { 193 + return (struct sierra_net_data *)dev->data[0]; 194 + } 195 + 196 + /* set private data associated with passed in usbnet device */ 197 + static inline void sierra_net_set_private(struct usbnet *dev, 198 + struct sierra_net_data *priv) 199 + { 200 + dev->data[0] = (unsigned long)priv; 201 + } 202 + 203 + /* is packet IPv4 */ 204 + static inline int is_ip(struct sk_buff *skb) 205 + { 206 + return (skb->protocol == cpu_to_be16(ETH_P_IP)); 207 + } 208 + 209 + /* 210 + * check passed in packet and make sure that: 211 + * - it is linear (no scatter/gather) 212 + * - it is ethernet (mac_header properly set) 213 + */ 214 + static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) 215 + { 216 + skb_reset_mac_header(skb); /* ethernet header */ 217 + 218 + if (skb_is_nonlinear(skb)) { 219 + netdev_err(dev->net, "Non linear buffer-dropping\n"); 220 + return 0; 221 + } 222 + 223 + if (!pskb_may_pull(skb, ETH_HLEN)) 224 + return 0; 225 + skb->protocol = eth_hdr(skb)->h_proto; 226 + 227 + return 1; 228 + } 229 + 230 + static const u8 *save16bit(struct param *p, const u8 *datap) 231 + { 232 + p->is_present = 1; 233 + p->word = get_unaligned_be16(datap); 234 + return datap + sizeof(p->word); 235 + } 236 + 237 + static const u8 *save8bit(struct param *p, const u8 *datap) 238 + { 239 + p->is_present = 1; 240 + p->byte = *datap; 241 + return datap + sizeof(p->byte); 242 + } 243 + 244 + /*----------------------------------------------------------------------------* 245 + * BEGIN HIP * 246 + *----------------------------------------------------------------------------*/ 247 + /* HIP header */ 248 + #define SIERRA_NET_HIP_HDR_LEN 4 249 + /* Extended HIP header */ 250 + #define SIERRA_NET_HIP_EXT_HDR_LEN 6 251 + 252 + struct hip_hdr { 253 + int hdrlen; 254 + struct param payload_len; 255 + struct param msgid; 256 + struct param msgspecific; 257 + struct param extmsgid; 258 + }; 259 + 260 + static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) 261 + { 262 + const u8 *curp = buf; 263 + int padded; 264 + 265 + if (buflen < SIERRA_NET_HIP_HDR_LEN) 266 + return -EPROTO; 267 + 268 + curp = save16bit(&hh->payload_len, curp); 269 + curp = save8bit(&hh->msgid, curp); 270 + curp = save8bit(&hh->msgspecific, curp); 271 + 272 + padded = hh->msgid.byte & 0x80; 273 + hh->msgid.byte &= 0x7F; /* 7 bits */ 274 + 275 + hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); 276 + if (hh->extmsgid.is_present) { 277 + if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) 278 + return -EPROTO; 279 + 280 + hh->payload_len.word &= 0x3FFF; /* 14 bits */ 281 + 282 + curp = save16bit(&hh->extmsgid, curp); 283 + hh->extmsgid.word &= 0x03FF; /* 10 bits */ 284 + 285 + hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; 286 + } else { 287 + hh->payload_len.word &= 0x07FF; /* 11 bits */ 288 + hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; 289 + } 290 + 291 + if (padded) { 292 + hh->hdrlen++; 293 + hh->payload_len.word--; 294 + } 295 + 296 + /* if real packet shorter than the claimed length */ 297 + if (buflen < (hh->hdrlen + hh->payload_len.word)) 298 + return -EINVAL; 299 + 300 + return 0; 301 + } 302 + 303 + static void build_hip(u8 *buf, const u16 payloadlen, 304 + struct sierra_net_data *priv) 305 + { 306 + /* the following doesn't have the full functionality. We 307 + * currently build only one kind of header, so it is faster this way 308 + */ 309 + put_unaligned_be16(payloadlen, buf); 310 + memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); 311 + } 312 + /*----------------------------------------------------------------------------* 313 + * END HIP * 314 + *----------------------------------------------------------------------------*/ 315 + 316 + static int sierra_net_send_cmd(struct usbnet *dev, 317 + u8 *cmd, int cmdlen, const char * cmd_name) 318 + { 319 + struct sierra_net_data *priv = sierra_net_get_private(dev); 320 + int status; 321 + 322 + status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 323 + USB_CDC_SEND_ENCAPSULATED_COMMAND, 324 + USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, 325 + priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT); 326 + 327 + if (status != cmdlen && status != -ENODEV) 328 + netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); 329 + 330 + return status; 331 + } 332 + 333 + static int sierra_net_send_sync(struct usbnet *dev) 334 + { 335 + int status; 336 + struct sierra_net_data *priv = sierra_net_get_private(dev); 337 + 338 + dev_dbg(&dev->udev->dev, "%s", __func__); 339 + 340 + status = sierra_net_send_cmd(dev, priv->sync_msg, 341 + sizeof(priv->sync_msg), "SYNC"); 342 + 343 + return status; 344 + } 345 + 346 + static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) 347 + { 348 + dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); 349 + priv->tx_hdr_template[0] = 0x3F; 350 + priv->tx_hdr_template[1] = ctx_ix; 351 + *((u16 *)&priv->tx_hdr_template[2]) = 352 + cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); 353 + } 354 + 355 + static inline int sierra_net_is_valid_addrlen(u8 len) 356 + { 357 + return (len == sizeof(struct in_addr)); 358 + } 359 + 360 + static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) 361 + { 362 + struct lsi_umts *lsi = (struct lsi_umts *)data; 363 + 364 + if (datalen < sizeof(struct lsi_umts)) { 365 + netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", 366 + __func__, datalen, 367 + sizeof(struct lsi_umts)); 368 + return -1; 369 + } 370 + 371 + if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { 372 + netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", 373 + __func__, be16_to_cpu(lsi->length), 374 + (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); 375 + return -1; 376 + } 377 + 378 + /* Validate the protocol - only support UMTS for now */ 379 + if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { 380 + netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", 381 + lsi->protocol); 382 + return -1; 383 + } 384 + 385 + /* Validate the link type */ 386 + if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { 387 + netdev_err(dev->net, "Link type unsupported: 0x%02x\n", 388 + lsi->link_type); 389 + return -1; 390 + } 391 + 392 + /* Validate the coverage */ 393 + if (lsi->coverage == SIERRA_NET_COVERAGE_NONE 394 + || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { 395 + netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); 396 + return 0; 397 + } 398 + 399 + /* Validate the session state */ 400 + if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { 401 + netdev_err(dev->net, "Session idle, 0x%02x\n", 402 + lsi->session_state); 403 + return 0; 404 + } 405 + 406 + /* Set link_sense true */ 407 + return 1; 408 + } 409 + 410 + static void sierra_net_handle_lsi(struct usbnet *dev, char *data, 411 + struct hip_hdr *hh) 412 + { 413 + struct sierra_net_data *priv = sierra_net_get_private(dev); 414 + int link_up; 415 + 416 + link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, 417 + hh->payload_len.word); 418 + if (link_up < 0) { 419 + netdev_err(dev->net, "Invalid LSI\n"); 420 + return; 421 + } 422 + if (link_up) { 423 + sierra_net_set_ctx_index(priv, hh->msgspecific.byte); 424 + priv->link_up = 1; 425 + netif_carrier_on(dev->net); 426 + } else { 427 + priv->link_up = 0; 428 + netif_carrier_off(dev->net); 429 + } 430 + } 431 + 432 + static void sierra_net_dosync(struct usbnet *dev) 433 + { 434 + int status; 435 + struct sierra_net_data *priv = sierra_net_get_private(dev); 436 + 437 + dev_dbg(&dev->udev->dev, "%s", __func__); 438 + 439 + /* tell modem we are ready */ 440 + status = sierra_net_send_sync(dev); 441 + if (status < 0) 442 + netdev_err(dev->net, 443 + "Send SYNC failed, status %d\n", status); 444 + status = sierra_net_send_sync(dev); 445 + if (status < 0) 446 + netdev_err(dev->net, 447 + "Send SYNC failed, status %d\n", status); 448 + 449 + /* Now, start a timer and make sure we get the Restart Indication */ 450 + priv->sync_timer.function = sierra_sync_timer; 451 + priv->sync_timer.data = (unsigned long) dev; 452 + priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; 453 + add_timer(&priv->sync_timer); 454 + } 455 + 456 + static void sierra_net_kevent(struct work_struct *work) 457 + { 458 + struct sierra_net_data *priv = 459 + container_of(work, struct sierra_net_data, sierra_net_kevent); 460 + struct usbnet *dev = priv->usbnet; 461 + int len; 462 + int err; 463 + u8 *buf; 464 + u8 ifnum; 465 + 466 + if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { 467 + clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); 468 + 469 + /* Query the modem for the LSI message */ 470 + buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); 471 + if (!buf) { 472 + netdev_err(dev->net, 473 + "failed to allocate buf for LS msg\n"); 474 + return; 475 + } 476 + ifnum = priv->ifnum; 477 + len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 478 + USB_CDC_GET_ENCAPSULATED_RESPONSE, 479 + USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 480 + 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, 481 + USB_CTRL_SET_TIMEOUT); 482 + 483 + if (len < 0) { 484 + netdev_err(dev->net, 485 + "usb_control_msg failed, status %d\n", len); 486 + } else { 487 + struct hip_hdr hh; 488 + 489 + dev_dbg(&dev->udev->dev, "%s: Received status message," 490 + " %04x bytes", __func__, len); 491 + 492 + err = parse_hip(buf, len, &hh); 493 + if (err) { 494 + netdev_err(dev->net, "%s: Bad packet," 495 + " parse result %d\n", __func__, err); 496 + kfree(buf); 497 + return; 498 + } 499 + 500 + /* Validate packet length */ 501 + if (len != hh.hdrlen + hh.payload_len.word) { 502 + netdev_err(dev->net, "%s: Bad packet, received" 503 + " %d, expected %d\n", __func__, len, 504 + hh.hdrlen + hh.payload_len.word); 505 + kfree(buf); 506 + return; 507 + } 508 + 509 + /* Switch on received message types */ 510 + switch (hh.msgid.byte) { 511 + case SIERRA_NET_HIP_LSI_UMTSID: 512 + dev_dbg(&dev->udev->dev, "LSI for ctx:%d", 513 + hh.msgspecific.byte); 514 + sierra_net_handle_lsi(dev, buf, &hh); 515 + break; 516 + case SIERRA_NET_HIP_RESTART_ID: 517 + dev_dbg(&dev->udev->dev, "Restart reported: %d," 518 + " stopping sync timer", 519 + hh.msgspecific.byte); 520 + /* Got sync resp - stop timer & clear mask */ 521 + del_timer_sync(&priv->sync_timer); 522 + clear_bit(SIERRA_NET_TIMER_EXPIRY, 523 + &priv->kevent_flags); 524 + break; 525 + case SIERRA_NET_HIP_HSYNC_ID: 526 + dev_dbg(&dev->udev->dev, "SYNC received"); 527 + err = sierra_net_send_sync(dev); 528 + if (err < 0) 529 + netdev_err(dev->net, 530 + "Send SYNC failed %d\n", err); 531 + break; 532 + case SIERRA_NET_HIP_EXTENDEDID: 533 + netdev_err(dev->net, "Unrecognized HIP msg, " 534 + "extmsgid 0x%04x\n", hh.extmsgid.word); 535 + break; 536 + case SIERRA_NET_HIP_RCGI: 537 + /* Ignored */ 538 + break; 539 + default: 540 + netdev_err(dev->net, "Unrecognized HIP msg, " 541 + "msgid 0x%02x\n", hh.msgid.byte); 542 + break; 543 + } 544 + } 545 + kfree(buf); 546 + } 547 + /* The sync timer bit might be set */ 548 + if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { 549 + clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); 550 + dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); 551 + sierra_net_dosync(priv->usbnet); 552 + } 553 + 554 + if (priv->kevent_flags) 555 + dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " 556 + "kevent_flags = 0x%lx", priv->kevent_flags); 557 + } 558 + 559 + static void sierra_net_defer_kevent(struct usbnet *dev, int work) 560 + { 561 + struct sierra_net_data *priv = sierra_net_get_private(dev); 562 + 563 + set_bit(work, &priv->kevent_flags); 564 + schedule_work(&priv->sierra_net_kevent); 565 + } 566 + 567 + /* 568 + * Sync Retransmit Timer Handler. On expiry, kick the work queue 569 + */ 570 + void sierra_sync_timer(unsigned long syncdata) 571 + { 572 + struct usbnet *dev = (struct usbnet *)syncdata; 573 + 574 + dev_dbg(&dev->udev->dev, "%s", __func__); 575 + /* Kick the tasklet */ 576 + sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); 577 + } 578 + 579 + static void sierra_net_status(struct usbnet *dev, struct urb *urb) 580 + { 581 + struct usb_cdc_notification *event; 582 + 583 + dev_dbg(&dev->udev->dev, "%s", __func__); 584 + 585 + if (urb->actual_length < sizeof *event) 586 + return; 587 + 588 + /* Add cases to handle other standard notifications. */ 589 + event = urb->transfer_buffer; 590 + switch (event->bNotificationType) { 591 + case USB_CDC_NOTIFY_NETWORK_CONNECTION: 592 + case USB_CDC_NOTIFY_SPEED_CHANGE: 593 + /* USB 305 sends those */ 594 + break; 595 + case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: 596 + sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); 597 + break; 598 + default: 599 + netdev_err(dev->net, ": unexpected notification %02x!\n", 600 + event->bNotificationType); 601 + break; 602 + } 603 + } 604 + 605 + static void sierra_net_get_drvinfo(struct net_device *net, 606 + struct ethtool_drvinfo *info) 607 + { 608 + /* Inherit standard device info */ 609 + usbnet_get_drvinfo(net, info); 610 + strncpy(info->driver, driver_name, sizeof info->driver); 611 + strncpy(info->version, DRIVER_VERSION, sizeof info->version); 612 + } 613 + 614 + static u32 sierra_net_get_link(struct net_device *net) 615 + { 616 + struct usbnet *dev = netdev_priv(net); 617 + /* Report link is down whenever the interface is down */ 618 + return sierra_net_get_private(dev)->link_up && netif_running(net); 619 + } 620 + 621 + static struct ethtool_ops sierra_net_ethtool_ops = { 622 + .get_drvinfo = sierra_net_get_drvinfo, 623 + .get_link = sierra_net_get_link, 624 + .get_msglevel = usbnet_get_msglevel, 625 + .set_msglevel = usbnet_set_msglevel, 626 + .get_settings = usbnet_get_settings, 627 + .set_settings = usbnet_set_settings, 628 + .nway_reset = usbnet_nway_reset, 629 + }; 630 + 631 + /* MTU can not be more than 1500 bytes, enforce it. */ 632 + static int sierra_net_change_mtu(struct net_device *net, int new_mtu) 633 + { 634 + if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU) 635 + return -EINVAL; 636 + 637 + return usbnet_change_mtu(net, new_mtu); 638 + } 639 + 640 + static int is_whitelisted(const u8 ifnum, 641 + const struct sierra_net_iface_info *whitelist) 642 + { 643 + if (whitelist) { 644 + const u8 *list = whitelist->ifaceinfo; 645 + int i; 646 + 647 + for (i = 0; i < whitelist->infolen; i++) { 648 + if (list[i] == ifnum) 649 + return 1; 650 + } 651 + } 652 + return 0; 653 + } 654 + 655 + static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) 656 + { 657 + int result = 0; 658 + u16 *attrdata; 659 + 660 + attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL); 661 + if (!attrdata) 662 + return -ENOMEM; 663 + 664 + result = usb_control_msg( 665 + dev->udev, 666 + usb_rcvctrlpipe(dev->udev, 0), 667 + /* _u8 vendor specific request */ 668 + SWI_USB_REQUEST_GET_FW_ATTR, 669 + USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ 670 + 0x0000, /* __u16 value not used */ 671 + 0x0000, /* __u16 index not used */ 672 + attrdata, /* char *data */ 673 + sizeof(*attrdata), /* __u16 size */ 674 + USB_CTRL_SET_TIMEOUT); /* int timeout */ 675 + 676 + if (result < 0) { 677 + kfree(attrdata); 678 + return -EIO; 679 + } 680 + 681 + *datap = *attrdata; 682 + 683 + kfree(attrdata); 684 + return result; 685 + } 686 + 687 + /* 688 + * collects the bulk endpoints, the status endpoint. 689 + */ 690 + static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) 691 + { 692 + u8 ifacenum; 693 + u8 numendpoints; 694 + u16 fwattr = 0; 695 + int status; 696 + struct ethhdr *eth; 697 + struct sierra_net_data *priv; 698 + static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 699 + 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; 700 + static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 701 + 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; 702 + 703 + struct sierra_net_info_data *data = 704 + (struct sierra_net_info_data *)dev->driver_info->data; 705 + 706 + dev_dbg(&dev->udev->dev, "%s", __func__); 707 + 708 + ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; 709 + /* We only accept certain interfaces */ 710 + if (!is_whitelisted(ifacenum, &data->whitelist)) { 711 + dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); 712 + return -ENODEV; 713 + } 714 + numendpoints = intf->cur_altsetting->desc.bNumEndpoints; 715 + /* We have three endpoints, bulk in and out, and a status */ 716 + if (numendpoints != 3) { 717 + dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", 718 + numendpoints); 719 + return -ENODEV; 720 + } 721 + /* Status endpoint set in usbnet_get_endpoints() */ 722 + dev->status = NULL; 723 + status = usbnet_get_endpoints(dev, intf); 724 + if (status < 0) { 725 + dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", 726 + status); 727 + return -ENODEV; 728 + } 729 + /* Initialize sierra private data */ 730 + priv = kzalloc(sizeof *priv, GFP_KERNEL); 731 + if (!priv) { 732 + dev_err(&dev->udev->dev, "No memory"); 733 + return -ENOMEM; 734 + } 735 + 736 + priv->usbnet = dev; 737 + priv->ifnum = ifacenum; 738 + dev->net->netdev_ops = &sierra_net_device_ops; 739 + 740 + /* change MAC addr to include, ifacenum, and to be unique */ 741 + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); 742 + dev->net->dev_addr[ETH_ALEN-1] = ifacenum; 743 + 744 + /* we will have to manufacture ethernet headers, prepare template */ 745 + eth = (struct ethhdr *)priv->ethr_hdr_tmpl; 746 + memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN); 747 + eth->h_proto = cpu_to_be16(ETH_P_IP); 748 + 749 + /* prepare shutdown message template */ 750 + memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); 751 + /* set context index initially to 0 - prepares tx hdr template */ 752 + sierra_net_set_ctx_index(priv, 0); 753 + 754 + /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ 755 + dev->rx_urb_size = data->rx_urb_size; 756 + if (dev->udev->speed != USB_SPEED_HIGH) 757 + dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); 758 + 759 + dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; 760 + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 761 + 762 + /* Set up the netdev */ 763 + dev->net->flags |= IFF_NOARP; 764 + dev->net->ethtool_ops = &sierra_net_ethtool_ops; 765 + netif_carrier_off(dev->net); 766 + 767 + sierra_net_set_private(dev, priv); 768 + 769 + priv->kevent_flags = 0; 770 + 771 + /* Use the shared workqueue */ 772 + INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); 773 + 774 + /* Only need to do this once */ 775 + init_timer(&priv->sync_timer); 776 + 777 + /* verify fw attributes */ 778 + status = sierra_net_get_fw_attr(dev, &fwattr); 779 + dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); 780 + 781 + /* test whether firmware supports DHCP */ 782 + if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { 783 + /* found incompatible firmware version */ 784 + dev_err(&dev->udev->dev, "Incompatible driver and firmware" 785 + " versions\n"); 786 + kfree(priv); 787 + return -ENODEV; 788 + } 789 + /* prepare sync message from template */ 790 + memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); 791 + 792 + return 0; 793 + } 794 + 795 + static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) 796 + { 797 + int status; 798 + struct sierra_net_data *priv = sierra_net_get_private(dev); 799 + 800 + dev_dbg(&dev->udev->dev, "%s", __func__); 801 + 802 + /* Kill the timer then flush the work queue */ 803 + del_timer_sync(&priv->sync_timer); 804 + 805 + flush_scheduled_work(); 806 + 807 + /* tell modem we are going away */ 808 + status = sierra_net_send_cmd(dev, priv->shdwn_msg, 809 + sizeof(priv->shdwn_msg), "Shutdown"); 810 + if (status < 0) 811 + netdev_err(dev->net, 812 + "usb_control_msg failed, status %d\n", status); 813 + 814 + sierra_net_set_private(dev, NULL); 815 + 816 + kfree(priv); 817 + } 818 + 819 + static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, 820 + struct sk_buff *skb, int len) 821 + { 822 + struct sk_buff *new_skb; 823 + 824 + /* clone skb */ 825 + new_skb = skb_clone(skb, GFP_ATOMIC); 826 + 827 + /* remove len bytes from original */ 828 + skb_pull(skb, len); 829 + 830 + /* trim next packet to it's length */ 831 + if (new_skb) { 832 + skb_trim(new_skb, len); 833 + } else { 834 + if (netif_msg_rx_err(dev)) 835 + netdev_err(dev->net, "failed to get skb\n"); 836 + dev->net->stats.rx_dropped++; 837 + } 838 + 839 + return new_skb; 840 + } 841 + 842 + /* ---------------------------- Receive data path ----------------------*/ 843 + static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 844 + { 845 + int err; 846 + struct hip_hdr hh; 847 + struct sk_buff *new_skb; 848 + 849 + dev_dbg(&dev->udev->dev, "%s", __func__); 850 + 851 + /* could contain multiple packets */ 852 + while (likely(skb->len)) { 853 + err = parse_hip(skb->data, skb->len, &hh); 854 + if (err) { 855 + if (netif_msg_rx_err(dev)) 856 + netdev_err(dev->net, "Invalid HIP header %d\n", 857 + err); 858 + /* dev->net->stats.rx_errors incremented by caller */ 859 + dev->net->stats.rx_length_errors++; 860 + return 0; 861 + } 862 + 863 + /* Validate Extended HIP header */ 864 + if (!hh.extmsgid.is_present 865 + || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { 866 + if (netif_msg_rx_err(dev)) 867 + netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); 868 + 869 + dev->net->stats.rx_frame_errors++; 870 + /* dev->net->stats.rx_errors incremented by caller */; 871 + return 0; 872 + } 873 + 874 + skb_pull(skb, hh.hdrlen); 875 + 876 + /* We are going to accept this packet, prepare it */ 877 + memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, 878 + ETH_HLEN); 879 + 880 + /* Last packet in batch handled by usbnet */ 881 + if (hh.payload_len.word == skb->len) 882 + return 1; 883 + 884 + new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); 885 + if (new_skb) 886 + usbnet_skb_return(dev, new_skb); 887 + 888 + } /* while */ 889 + 890 + return 0; 891 + } 892 + 893 + /* ---------------------------- Transmit data path ----------------------*/ 894 + struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 895 + gfp_t flags) 896 + { 897 + struct sierra_net_data *priv = sierra_net_get_private(dev); 898 + u16 len; 899 + bool need_tail; 900 + 901 + dev_dbg(&dev->udev->dev, "%s", __func__); 902 + if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { 903 + /* enough head room as is? */ 904 + if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { 905 + /* Save the Eth/IP length and set up HIP hdr */ 906 + len = skb->len; 907 + skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); 908 + /* Handle ZLP issue */ 909 + need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) 910 + % dev->maxpacket == 0); 911 + if (need_tail) { 912 + if (unlikely(skb_tailroom(skb) == 0)) { 913 + netdev_err(dev->net, "tx_fixup:" 914 + "no room for packet\n"); 915 + dev_kfree_skb_any(skb); 916 + return NULL; 917 + } else { 918 + skb->data[skb->len] = 0; 919 + __skb_put(skb, 1); 920 + len = len + 1; 921 + } 922 + } 923 + build_hip(skb->data, len, priv); 924 + return skb; 925 + } else { 926 + /* 927 + * compensate in the future if necessary 928 + */ 929 + netdev_err(dev->net, "tx_fixup: no room for HIP\n"); 930 + } /* headroom */ 931 + } 932 + 933 + if (!priv->link_up) 934 + dev->net->stats.tx_carrier_errors++; 935 + 936 + /* tx_dropped incremented by usbnet */ 937 + 938 + /* filter the packet out, release it */ 939 + dev_kfree_skb_any(skb); 940 + return NULL; 941 + } 942 + 943 + static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; 944 + static const struct sierra_net_info_data sierra_net_info_data_68A3 = { 945 + .rx_urb_size = 8 * 1024, 946 + .whitelist = { 947 + .infolen = ARRAY_SIZE(sierra_net_ifnum_list), 948 + .ifaceinfo = sierra_net_ifnum_list 949 + } 950 + }; 951 + 952 + static const struct driver_info sierra_net_info_68A3 = { 953 + .description = "Sierra Wireless USB-to-WWAN Modem", 954 + .flags = FLAG_WWAN | FLAG_SEND_ZLP, 955 + .bind = sierra_net_bind, 956 + .unbind = sierra_net_unbind, 957 + .status = sierra_net_status, 958 + .rx_fixup = sierra_net_rx_fixup, 959 + .tx_fixup = sierra_net_tx_fixup, 960 + .data = (unsigned long)&sierra_net_info_data_68A3, 961 + }; 962 + 963 + static const struct usb_device_id products[] = { 964 + {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 965 + .driver_info = (unsigned long) &sierra_net_info_68A3}, 966 + 967 + {}, /* last item */ 968 + }; 969 + MODULE_DEVICE_TABLE(usb, products); 970 + 971 + /* We are based on usbnet, so let it handle the USB driver specifics */ 972 + static struct usb_driver sierra_net_driver = { 973 + .name = "sierra_net", 974 + .id_table = products, 975 + .probe = usbnet_probe, 976 + .disconnect = usbnet_disconnect, 977 + .suspend = usbnet_suspend, 978 + .resume = usbnet_resume, 979 + .no_dynamic_id = 1, 980 + }; 981 + 982 + static int __init sierra_net_init(void) 983 + { 984 + BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) 985 + < sizeof(struct cdc_state)); 986 + 987 + return usb_register(&sierra_net_driver); 988 + } 989 + 990 + static void __exit sierra_net_exit(void) 991 + { 992 + usb_deregister(&sierra_net_driver); 993 + } 994 + 995 + module_exit(sierra_net_exit); 996 + module_init(sierra_net_init); 997 + 998 + MODULE_AUTHOR(DRIVER_AUTHOR); 999 + MODULE_DESCRIPTION(DRIVER_DESC); 1000 + MODULE_VERSION(DRIVER_VERSION); 1001 + MODULE_LICENSE("GPL");
+1
include/net/sctp/command.h
··· 107 107 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ 108 108 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 109 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 110 + SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */ 110 111 SCTP_CMD_LAST 111 112 } sctp_verb_t; 112 113
+1
include/net/sctp/sctp.h
··· 128 128 int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 129 129 int sctp_inet_listen(struct socket *sock, int backlog); 130 130 void sctp_write_space(struct sock *sk); 131 + void sctp_data_ready(struct sock *sk, int len); 131 132 unsigned int sctp_poll(struct file *file, struct socket *sock, 132 133 poll_table *wait); 133 134 void sctp_sock_rfree(struct sk_buff *skb);
+4 -1
net/bluetooth/l2cap.c
··· 1626 1626 /* Connectionless channel */ 1627 1627 if (sk->sk_type == SOCK_DGRAM) { 1628 1628 skb = l2cap_create_connless_pdu(sk, msg, len); 1629 - err = l2cap_do_send(sk, skb); 1629 + if (IS_ERR(skb)) 1630 + err = PTR_ERR(skb); 1631 + else 1632 + err = l2cap_do_send(sk, skb); 1630 1633 goto done; 1631 1634 } 1632 1635
+3 -3
net/bridge/br_multicast.c
··· 957 957 unsigned offset; 958 958 int err; 959 959 960 - BR_INPUT_SKB_CB(skb)->igmp = 0; 961 - BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 962 - 963 960 /* We treat OOM as packet loss for now. */ 964 961 if (!pskb_may_pull(skb, sizeof(*iph))) 965 962 return -EINVAL; ··· 1046 1049 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1047 1050 struct sk_buff *skb) 1048 1051 { 1052 + BR_INPUT_SKB_CB(skb)->igmp = 0; 1053 + BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1054 + 1049 1055 if (br->multicast_disabled) 1050 1056 return 0; 1051 1057
+3
net/ieee802154/af_ieee802154.c
··· 151 151 dev_load(sock_net(sk), ifr.ifr_name); 152 152 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); 153 153 154 + if (!dev) 155 + return -ENODEV; 156 + 154 157 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) 155 158 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); 156 159
+5 -11
net/ipv4/inet_connection_sock.c
··· 70 70 (!sk->sk_bound_dev_if || 71 71 !sk2->sk_bound_dev_if || 72 72 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 73 - const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 74 - 75 73 if (!reuse || !sk2->sk_reuse || 76 74 sk2->sk_state == TCP_LISTEN) { 75 + const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 77 76 if (!sk2_rcv_saddr || !sk_rcv_saddr || 78 77 sk2_rcv_saddr == sk_rcv_saddr) 79 78 break; 80 - } else if (reuse && sk2->sk_reuse && 81 - sk2_rcv_saddr && 82 - sk2_rcv_saddr == sk_rcv_saddr) 83 - break; 79 + } 84 80 } 85 81 } 86 82 return node != NULL; ··· 120 124 smallest_size = tb->num_owners; 121 125 smallest_rover = rover; 122 126 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 123 - if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 124 - spin_unlock(&head->lock); 125 - snum = smallest_rover; 126 - goto have_snum; 127 - } 127 + spin_unlock(&head->lock); 128 + snum = smallest_rover; 129 + goto have_snum; 128 130 } 129 131 } 130 132 goto next;
+5 -10
net/ipv6/inet6_connection_sock.c
··· 42 42 if (sk != sk2 && 43 43 (!sk->sk_bound_dev_if || 44 44 !sk2->sk_bound_dev_if || 45 - sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 46 - if ((!sk->sk_reuse || !sk2->sk_reuse || 47 - sk2->sk_state == TCP_LISTEN) && 48 - ipv6_rcv_saddr_equal(sk, sk2)) 49 - break; 50 - else if (sk->sk_reuse && sk2->sk_reuse && 51 - !ipv6_addr_any(inet6_rcv_saddr(sk)) && 52 - ipv6_rcv_saddr_equal(sk, sk2)) 53 - break; 54 - } 45 + sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 46 + (!sk->sk_reuse || !sk2->sk_reuse || 47 + sk2->sk_state == TCP_LISTEN) && 48 + ipv6_rcv_saddr_equal(sk, sk2)) 49 + break; 55 50 } 56 51 57 52 return node != NULL;
+4 -2
net/sctp/associola.c
··· 1194 1194 /* Remove any peer addresses not present in the new association. */ 1195 1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1196 1196 trans = list_entry(pos, struct sctp_transport, transports); 1197 - if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) 1198 - sctp_assoc_del_peer(asoc, &trans->ipaddr); 1197 + if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { 1198 + sctp_assoc_rm_peer(asoc, trans); 1199 + continue; 1200 + } 1199 1201 1200 1202 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1201 1203 sctp_transport_reset(trans);
+1
net/sctp/endpointola.c
··· 144 144 /* Use SCTP specific send buffer space queues. */ 145 145 ep->sndbuf_policy = sctp_sndbuf_policy; 146 146 147 + sk->sk_data_ready = sctp_data_ready; 147 148 sk->sk_write_space = sctp_write_space; 148 149 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 149 150
+10 -22
net/sctp/sm_make_chunk.c
··· 208 208 sp = sctp_sk(asoc->base.sk); 209 209 num_types = sp->pf->supported_addrs(sp, types); 210 210 211 - chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); 211 + chunksize = sizeof(init) + addrs_len; 212 + chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); 212 213 chunksize += sizeof(ecap_param); 213 214 214 215 if (sctp_prsctp_enable) ··· 239 238 /* Add HMACS parameter length if any were defined */ 240 239 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 241 240 if (auth_hmacs->length) 242 - chunksize += ntohs(auth_hmacs->length); 241 + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); 243 242 else 244 243 auth_hmacs = NULL; 245 244 246 245 /* Add CHUNKS parameter length */ 247 246 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 248 247 if (auth_chunks->length) 249 - chunksize += ntohs(auth_chunks->length); 248 + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); 250 249 else 251 250 auth_chunks = NULL; 252 251 ··· 256 255 257 256 /* If we have any extensions to report, account for that */ 258 257 if (num_ext) 259 - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 258 + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + 259 + num_ext); 260 260 261 261 /* RFC 2960 3.3.2 Initiation (INIT) (1) 262 262 * ··· 399 397 400 398 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 401 399 if (auth_hmacs->length) 402 - chunksize += ntohs(auth_hmacs->length); 400 + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); 403 401 else 404 402 auth_hmacs = NULL; 405 403 406 404 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 407 405 if (auth_chunks->length) 408 - chunksize += ntohs(auth_chunks->length); 406 + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); 409 407 else 410 408 auth_chunks = NULL; 411 409 ··· 414 412 } 415 413 416 414 if (num_ext) 417 - chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 415 + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + 416 + num_ext); 418 417 419 418 /* Now allocate and fill out the chunk. */ 420 419 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); ··· 3317 3314 list_del_init(&asconf->transmitted_list); 3318 3315 sctp_chunk_free(asconf); 3319 3316 asoc->addip_last_asconf = NULL; 3320 - 3321 - /* Send the next asconf chunk from the addip chunk queue. */ 3322 - if (!list_empty(&asoc->addip_chunk_list)) { 3323 - struct list_head *entry = asoc->addip_chunk_list.next; 3324 - asconf = list_entry(entry, struct sctp_chunk, list); 3325 - 3326 - list_del_init(entry); 3327 - 3328 - /* Hold the chunk until an ASCONF_ACK is received. */ 3329 - sctp_chunk_hold(asconf); 3330 - if (sctp_primitive_ASCONF(asoc, asconf)) 3331 - sctp_chunk_free(asconf); 3332 - else 3333 - asoc->addip_last_asconf = asconf; 3334 - } 3335 3317 3336 3318 return retval; 3337 3319 }
+26
net/sctp/sm_sideeffect.c
··· 962 962 } 963 963 964 964 965 + /* Sent the next ASCONF packet currently stored in the association. 966 + * This happens after the ASCONF_ACK was succeffully processed. 967 + */ 968 + static void sctp_cmd_send_asconf(struct sctp_association *asoc) 969 + { 970 + /* Send the next asconf chunk from the addip chunk 971 + * queue. 972 + */ 973 + if (!list_empty(&asoc->addip_chunk_list)) { 974 + struct list_head *entry = asoc->addip_chunk_list.next; 975 + struct sctp_chunk *asconf = list_entry(entry, 976 + struct sctp_chunk, list); 977 + list_del_init(entry); 978 + 979 + /* Hold the chunk until an ASCONF_ACK is received. */ 980 + sctp_chunk_hold(asconf); 981 + if (sctp_primitive_ASCONF(asoc, asconf)) 982 + sctp_chunk_free(asconf); 983 + else 984 + asoc->addip_last_asconf = asconf; 985 + } 986 + } 987 + 965 988 966 989 /* These three macros allow us to pull the debugging code out of the 967 990 * main flow of sctp_do_sm() to keep attention focused on the real ··· 1639 1616 local_cork = 1; 1640 1617 } 1641 1618 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1619 + break; 1620 + case SCTP_CMD_SEND_NEXT_ASCONF: 1621 + sctp_cmd_send_asconf(asoc); 1642 1622 break; 1643 1623 default: 1644 1624 printk(KERN_WARNING "Impossible command: %u, %p\n",
+7 -1
net/sctp/sm_statefuns.c
··· 3676 3676 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3677 3677 3678 3678 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3679 - asconf_ack)) 3679 + asconf_ack)) { 3680 + /* Successfully processed ASCONF_ACK. We can 3681 + * release the next asconf if we have one. 3682 + */ 3683 + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, 3684 + SCTP_NULL()); 3680 3685 return SCTP_DISPOSITION_CONSUME; 3686 + } 3681 3687 3682 3688 abort = sctp_make_abort(asoc, asconf_ack, 3683 3689 sizeof(sctp_errhdr_t));
+12 -2
net/sctp/socket.c
··· 3719 3719 sp->hmac = NULL; 3720 3720 3721 3721 SCTP_DBG_OBJCNT_INC(sock); 3722 - percpu_counter_inc(&sctp_sockets_allocated); 3723 3722 3724 3723 /* Set socket backlog limit. */ 3725 3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1]; 3726 3725 3727 3726 local_bh_disable(); 3727 + percpu_counter_inc(&sctp_sockets_allocated); 3728 3728 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3729 3729 local_bh_enable(); 3730 3730 ··· 3741 3741 /* Release our hold on the endpoint. */ 3742 3742 ep = sctp_sk(sk)->ep; 3743 3743 sctp_endpoint_free(ep); 3744 - percpu_counter_dec(&sctp_sockets_allocated); 3745 3744 local_bh_disable(); 3745 + percpu_counter_dec(&sctp_sockets_allocated); 3746 3746 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3747 3747 local_bh_enable(); 3748 3748 } ··· 6187 6187 do_nonblock: 6188 6188 err = -EAGAIN; 6189 6189 goto out; 6190 + } 6191 + 6192 + void sctp_data_ready(struct sock *sk, int len) 6193 + { 6194 + read_lock_bh(&sk->sk_callback_lock); 6195 + if (sk_has_sleeper(sk)) 6196 + wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 6197 + POLLRDNORM | POLLRDBAND); 6198 + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 6199 + read_unlock_bh(&sk->sk_callback_lock); 6190 6200 } 6191 6201 6192 6202 /* If socket sndbuf has changed, wake up all per association waiters. */