Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET]: rt_check_expire() can take a long time, add a cond_resched()
[ISDN] sc: Really, really fix warning
[ISDN] sc: Fix sndpkt to have the correct number of arguments
[TCP] FRTO: Clear frto_highmark only after process_frto that uses it
[NET]: Remove notifier block from chain when register_netdevice_notifier fails
[FS_ENET]: Fix module build.
[TCP]: Make sure write_queue_from does not begin with NULL ptr
[TCP]: Fix size calculation in sk_stream_alloc_pskb
[S2IO]: Fixed memory leak when MSI-X vector allocation fails
[BONDING]: Fix resource use after free
[SYSCTL]: Fix warning for token-ring from sysctl checker
[NET] random : secure_tcp_sequence_number should not assume CONFIG_KTIME_SCALAR
[IWLWIFI]: Not correctly dealing with hotunplug.
[TCP] FRTO: Plug potential LOST-bit leak
[TCP] FRTO: Limit snd_cwnd if TCP was application limited
[E1000]: Fix schedule while atomic when called from mii-tool.
[NETX]: Fix build failure added by 2.6.24 statistics cleanup.
[EP93xx_ETH]: Build fix after 2.6.24 NAPI changes.
[PKT_SCHED]: Check subqueue status before calling hard_start_xmit

+118 -96
+3 -3
drivers/char/random.c
··· 1494 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; 1495 seq += keyptr->count; 1496 1497 - seq += ktime_get_real().tv64; 1498 1499 return seq; 1500 } ··· 1556 * overlaps less than one time per MSL (2 minutes). 1557 * Choosing a clock of 64 ns period is OK. (period of 274 s) 1558 */ 1559 - seq += ktime_get_real().tv64 >> 6; 1560 #if 0 1561 printk("init_seq(%lx, %lx, %d, %d) = %d\n", 1562 saddr, daddr, sport, dport, seq); ··· 1616 seq = half_md4_transform(hash, keyptr->secret); 1617 seq |= ((u64)keyptr->count) << (32 - HASH_BITS); 1618 1619 - seq += ktime_get_real().tv64; 1620 seq &= (1ull << 48) - 1; 1621 #if 0 1622 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
··· 1494 seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; 1495 seq += keyptr->count; 1496 1497 + seq += ktime_to_ns(ktime_get_real()); 1498 1499 return seq; 1500 } ··· 1556 * overlaps less than one time per MSL (2 minutes). 1557 * Choosing a clock of 64 ns period is OK. (period of 274 s) 1558 */ 1559 + seq += ktime_to_ns(ktime_get_real()) >> 6; 1560 #if 0 1561 printk("init_seq(%lx, %lx, %d, %d) = %d\n", 1562 saddr, daddr, sport, dport, seq); ··· 1616 seq = half_md4_transform(hash, keyptr->secret); 1617 seq |= ((u64)keyptr->count) << (32 - HASH_BITS); 1618 1619 + seq += ktime_to_ns(ktime_get_real()); 1620 seq &= (1ull << 48) - 1; 1621 #if 0 1622 printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
+1 -1
drivers/isdn/sc/card.h
··· 109 int get_card_from_id(int driver); 110 int indicate_status(int card, int event, ulong Channel, char *Data); 111 irqreturn_t interrupt_handler(int interrupt, void *cardptr); 112 - int sndpkt(int devId, int channel, struct sk_buff *data); 113 void rcvpkt(int card, RspMessage *rcvmsg); 114 int command(isdn_ctrl *cmd); 115 int reset(int card);
··· 109 int get_card_from_id(int driver); 110 int indicate_status(int card, int event, ulong Channel, char *Data); 111 irqreturn_t interrupt_handler(int interrupt, void *cardptr); 112 + int sndpkt(int devId, int channel, int ack, struct sk_buff *data); 113 void rcvpkt(int card, RspMessage *rcvmsg); 114 int command(isdn_ctrl *cmd); 115 int reset(int card);
+1 -1
drivers/isdn/sc/packet.c
··· 20 #include "message.h" 21 #include "card.h" 22 23 - int sndpkt(int devId, int channel, struct sk_buff *data) 24 { 25 LLData ReqLnkWrite; 26 int status;
··· 20 #include "message.h" 21 #include "card.h" 22 23 + int sndpkt(int devId, int channel, int ack, struct sk_buff *data) 24 { 25 LLData ReqLnkWrite; 26 int status;
+1 -1
drivers/isdn/sc/shmem.c
··· 50 51 outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, 52 sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); 53 - memcpy_toio(sc_adapter[card]->rambase + dest_rem, src, n); 54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); 55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, 56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
··· 50 51 outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80, 52 sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]); 53 + memcpy_toio((void __iomem *)(sc_adapter[card]->rambase + dest_rem), src, n); 54 spin_unlock_irqrestore(&sc_adapter[card]->lock, flags); 55 pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename, 56 ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
+1 -1
drivers/net/arm/ep93xx_eth.c
··· 417 418 if (status & REG_INTSTS_RX) { 419 spin_lock(&ep->rx_lock); 420 - if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) { 421 wrl(ep, REG_INTEN, REG_INTEN_TX); 422 __netif_rx_schedule(dev, &ep->napi); 423 }
··· 417 418 if (status & REG_INTSTS_RX) { 419 spin_lock(&ep->rx_lock); 420 + if (likely(netif_rx_schedule_prep(dev, &ep->napi))) { 421 wrl(ep, REG_INTEN, REG_INTEN_TX); 422 __netif_rx_schedule(dev, &ep->napi); 423 }
+2 -2
drivers/net/bonding/bond_main.c
··· 1847 */ 1848 void bond_destroy(struct bonding *bond) 1849 { 1850 - unregister_netdevice(bond->dev); 1851 bond_deinit(bond->dev); 1852 bond_destroy_sysfs_entry(bond); 1853 } 1854 1855 /* ··· 4475 bond_mc_list_destroy(bond); 4476 /* Release the bonded slaves */ 4477 bond_release_all(bond_dev); 4478 - unregister_netdevice(bond_dev); 4479 bond_deinit(bond_dev); 4480 } 4481 4482 #ifdef CONFIG_PROC_FS
··· 1847 */ 1848 void bond_destroy(struct bonding *bond) 1849 { 1850 bond_deinit(bond->dev); 1851 bond_destroy_sysfs_entry(bond); 1852 + unregister_netdevice(bond->dev); 1853 } 1854 1855 /* ··· 4475 bond_mc_list_destroy(bond); 4476 /* Release the bonded slaves */ 4477 bond_release_all(bond_dev); 4478 bond_deinit(bond_dev); 4479 + unregister_netdevice(bond_dev); 4480 } 4481 4482 #ifdef CONFIG_PROC_FS
+3 -10
drivers/net/e1000/e1000_main.c
··· 4804 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4805 return -EIO; 4806 } 4807 if (adapter->hw.media_type == e1000_media_type_copper) { 4808 switch (data->reg_num) { 4809 case PHY_CTRL: ··· 4825 DUPLEX_HALF; 4826 retval = e1000_set_spd_dplx(adapter, 4827 spddplx); 4828 - if (retval) { 4829 - spin_unlock_irqrestore( 4830 - &adapter->stats_lock, 4831 - flags); 4832 return retval; 4833 - } 4834 } 4835 if (netif_running(adapter->netdev)) 4836 e1000_reinit_locked(adapter); ··· 4835 break; 4836 case M88E1000_PHY_SPEC_CTRL: 4837 case M88E1000_EXT_PHY_SPEC_CTRL: 4838 - if (e1000_phy_reset(&adapter->hw)) { 4839 - spin_unlock_irqrestore( 4840 - &adapter->stats_lock, flags); 4841 return -EIO; 4842 - } 4843 break; 4844 } 4845 } else { ··· 4851 break; 4852 } 4853 } 4854 - spin_unlock_irqrestore(&adapter->stats_lock, flags); 4855 break; 4856 default: 4857 return -EOPNOTSUPP;
··· 4804 spin_unlock_irqrestore(&adapter->stats_lock, flags); 4805 return -EIO; 4806 } 4807 + spin_unlock_irqrestore(&adapter->stats_lock, flags); 4808 if (adapter->hw.media_type == e1000_media_type_copper) { 4809 switch (data->reg_num) { 4810 case PHY_CTRL: ··· 4824 DUPLEX_HALF; 4825 retval = e1000_set_spd_dplx(adapter, 4826 spddplx); 4827 + if (retval) 4828 return retval; 4829 } 4830 if (netif_running(adapter->netdev)) 4831 e1000_reinit_locked(adapter); ··· 4838 break; 4839 case M88E1000_PHY_SPEC_CTRL: 4840 case M88E1000_EXT_PHY_SPEC_CTRL: 4841 + if (e1000_phy_reset(&adapter->hw)) 4842 return -EIO; 4843 break; 4844 } 4845 } else { ··· 4857 break; 4858 } 4859 } 4860 break; 4861 default: 4862 return -EOPNOTSUPP;
+10 -1
drivers/net/fs_enet/Kconfig
··· 2 tristate "Freescale Ethernet Driver" 3 depends on CPM1 || CPM2 4 select MII 5 6 config FS_ENET_HAS_SCC 7 bool "Chip has an SCC usable for ethernet" ··· 12 config FS_ENET_HAS_FCC 13 bool "Chip has an FCC usable for ethernet" 14 depends on FS_ENET && CPM2 15 - select MDIO_BITBANG 16 default y 17 18 config FS_ENET_HAS_FEC 19 bool "Chip has an FEC usable for ethernet" 20 depends on FS_ENET && CPM1 21 default y 22
··· 2 tristate "Freescale Ethernet Driver" 3 depends on CPM1 || CPM2 4 select MII 5 + select PHYLIB 6 7 config FS_ENET_HAS_SCC 8 bool "Chip has an SCC usable for ethernet" ··· 11 config FS_ENET_HAS_FCC 12 bool "Chip has an FCC usable for ethernet" 13 depends on FS_ENET && CPM2 14 default y 15 16 config FS_ENET_HAS_FEC 17 bool "Chip has an FEC usable for ethernet" 18 depends on FS_ENET && CPM1 19 + select FS_ENET_MDIO_FEC 20 default y 21 22 + config FS_ENET_MDIO_FEC 23 + tristate "MDIO driver for FEC" 24 + depends on FS_ENET && CPM1 25 + 26 + config FS_ENET_MDIO_FCC 27 + tristate "MDIO driver for FCC" 28 + depends on FS_ENET && CPM2 29 + select MDIO_BITBANG
+12 -3
drivers/net/fs_enet/Makefile
··· 4 5 obj-$(CONFIG_FS_ENET) += fs_enet.o 6 7 - obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o 8 - obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o 9 10 - fs_enet-objs := fs_enet-main.o
··· 4 5 obj-$(CONFIG_FS_ENET) += fs_enet.o 6 7 + fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o 8 + fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o 9 + fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o 10 11 + ifeq ($(CONFIG_PPC_CPM_NEW_BINDING),y) 12 + obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o 13 + obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o 14 + else 15 + fs_enet-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o 16 + fs_enet-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o 17 + endif 18 + 19 + fs_enet-objs := fs_enet-main.o $(fs_enet-m)
+3 -3
drivers/net/netx-eth.c
··· 128 FIFO_PTR_FRAMELEN(len)); 129 130 ndev->trans_start = jiffies; 131 - dev->stats.tx_packets++; 132 - dev->stats.tx_bytes += skb->len; 133 134 netif_stop_queue(ndev); 135 spin_unlock_irq(&priv->lock); ··· 155 if (unlikely(skb == NULL)) { 156 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", 157 ndev->name); 158 - dev->stats.rx_dropped++; 159 return; 160 } 161
··· 128 FIFO_PTR_FRAMELEN(len)); 129 130 ndev->trans_start = jiffies; 131 + ndev->stats.tx_packets++; 132 + ndev->stats.tx_bytes += skb->len; 133 134 netif_stop_queue(ndev); 135 spin_unlock_irq(&priv->lock); ··· 155 if (unlikely(skb == NULL)) { 156 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", 157 ndev->name); 158 + ndev->stats.rx_dropped++; 159 return; 160 } 161
+51 -59
drivers/net/s2io.c
··· 84 #include "s2io.h" 85 #include "s2io-regs.h" 86 87 - #define DRV_VERSION "2.0.26.5" 88 89 /* S2io Driver name & version. */ 90 static char s2io_driver_name[] = "Neterion"; ··· 3775 3776 return err; 3777 } 3778 /* ********************************************************* * 3779 * Functions defined below concern the OS part of the driver * 3780 * ********************************************************* */ ··· 3843 int ret = s2io_enable_msi_x(sp); 3844 3845 if (!ret) { 3846 - u16 msi_control; 3847 - 3848 ret = s2io_test_msi(sp); 3849 - 3850 /* rollback MSI-X, will re-enable during add_isr() */ 3851 - kfree(sp->entries); 3852 - sp->mac_control.stats_info->sw_stat.mem_freed += 3853 - (MAX_REQUESTED_MSI_X * 3854 - sizeof(struct msix_entry)); 3855 - kfree(sp->s2io_entries); 3856 - sp->mac_control.stats_info->sw_stat.mem_freed += 3857 - (MAX_REQUESTED_MSI_X * 3858 - sizeof(struct s2io_msix_entry)); 3859 - sp->entries = NULL; 3860 - sp->s2io_entries = NULL; 3861 - 3862 - pci_read_config_word(sp->pdev, 0x42, &msi_control); 3863 - msi_control &= 0xFFFE; /* Disable MSI */ 3864 - pci_write_config_word(sp->pdev, 0x42, msi_control); 3865 - 3866 - pci_disable_msix(sp->pdev); 3867 - 3868 } 3869 if (ret) { 3870 ··· 6734 } 6735 } 6736 if (err) { 6737 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 6738 "failed\n", dev->name, i); 6739 - DBG_PRINT(ERR_DBG, "Returned: %d\n", err); 6740 - return -1; 6741 } 6742 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 6743 } 6744 - printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt); 6745 - printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt); 6746 } 6747 if (sp->config.intr_type == INTA) { 6748 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, ··· 6764 } 6765 static void s2io_rem_isr(struct s2io_nic * sp) 6766 { 6767 - struct net_device *dev = sp->dev; 6768 - struct swStat *stats = &sp->mac_control.stats_info->sw_stat; 6769 - 6770 - if (sp->config.intr_type == MSI_X) { 6771 - int i; 6772 - u16 msi_control; 6773 - 6774 - for (i=1; (sp->s2io_entries[i].in_use == 6775 - MSIX_REGISTERED_SUCCESS); i++) { 6776 - int vector = sp->entries[i].vector; 6777 - void *arg = sp->s2io_entries[i].arg; 6778 - 6779 - synchronize_irq(vector); 6780 - free_irq(vector, arg); 6781 - } 6782 - 6783 - kfree(sp->entries); 6784 - stats->mem_freed += 6785 - (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 6786 - kfree(sp->s2io_entries); 6787 - stats->mem_freed += 6788 - (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 6789 - sp->entries = NULL; 6790 - sp->s2io_entries = NULL; 6791 - 6792 - pci_read_config_word(sp->pdev, 0x42, &msi_control); 6793 - msi_control &= 0xFFFE; /* Disable MSI */ 6794 - pci_write_config_word(sp->pdev, 0x42, msi_control); 6795 - 6796 - pci_disable_msix(sp->pdev); 6797 - } else { 6798 - synchronize_irq(sp->pdev->irq); 6799 - free_irq(sp->pdev->irq, dev); 6800 - } 6801 } 6802 6803 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
··· 84 #include "s2io.h" 85 #include "s2io-regs.h" 86 87 + #define DRV_VERSION "2.0.26.6" 88 89 /* S2io Driver name & version. */ 90 static char s2io_driver_name[] = "Neterion"; ··· 3775 3776 return err; 3777 } 3778 + 3779 + static void remove_msix_isr(struct s2io_nic *sp) 3780 + { 3781 + int i; 3782 + u16 msi_control; 3783 + 3784 + for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3785 + if (sp->s2io_entries[i].in_use == 3786 + MSIX_REGISTERED_SUCCESS) { 3787 + int vector = sp->entries[i].vector; 3788 + void *arg = sp->s2io_entries[i].arg; 3789 + free_irq(vector, arg); 3790 + } 3791 + } 3792 + 3793 + kfree(sp->entries); 3794 + kfree(sp->s2io_entries); 3795 + sp->entries = NULL; 3796 + sp->s2io_entries = NULL; 3797 + 3798 + pci_read_config_word(sp->pdev, 0x42, &msi_control); 3799 + msi_control &= 0xFFFE; /* Disable MSI */ 3800 + pci_write_config_word(sp->pdev, 0x42, msi_control); 3801 + 3802 + pci_disable_msix(sp->pdev); 3803 + } 3804 + 3805 + static void remove_inta_isr(struct s2io_nic *sp) 3806 + { 3807 + struct net_device *dev = sp->dev; 3808 + 3809 + free_irq(sp->pdev->irq, dev); 3810 + } 3811 + 3812 /* ********************************************************* * 3813 * Functions defined below concern the OS part of the driver * 3814 * ********************************************************* */ ··· 3809 int ret = s2io_enable_msi_x(sp); 3810 3811 if (!ret) { 3812 ret = s2io_test_msi(sp); 3813 /* rollback MSI-X, will re-enable during add_isr() */ 3814 + remove_msix_isr(sp); 3815 } 3816 if (ret) { 3817 ··· 6719 } 6720 } 6721 if (err) { 6722 + remove_msix_isr(sp); 6723 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 6724 "failed\n", dev->name, i); 6725 + DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", 6726 + dev->name); 6727 + sp->config.intr_type = INTA; 6728 + break; 6729 } 6730 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 6731 } 6732 + if (!err) { 6733 + printk(KERN_INFO "MSI-X-TX %d entries enabled\n", 6734 + msix_tx_cnt); 6735 + printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 6736 + msix_rx_cnt); 6737 + } 6738 } 6739 if (sp->config.intr_type == INTA) { 6740 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED, ··· 6742 } 6743 static void s2io_rem_isr(struct s2io_nic * sp) 6744 { 6745 + if (sp->config.intr_type == MSI_X) 6746 + remove_msix_isr(sp); 6747 + else 6748 + remove_inta_isr(sp); 6749 } 6750 6751 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
+2 -1
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 4850 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 4851 /* Hardware disappeared */ 4852 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); 4853 - goto none; 4854 } 4855 4856 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", ··· 4858 4859 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 4860 tasklet_schedule(&priv->irq_tasklet); 4861 spin_unlock(&priv->lock); 4862 4863 return IRQ_HANDLED;
··· 4850 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 4851 /* Hardware disappeared */ 4852 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); 4853 + goto unplugged; 4854 } 4855 4856 IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", ··· 4858 4859 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 4860 tasklet_schedule(&priv->irq_tasklet); 4861 + unplugged: 4862 spin_unlock(&priv->lock); 4863 4864 return IRQ_HANDLED;
+6 -4
include/net/sock.h
··· 1235 gfp_t gfp) 1236 { 1237 struct sk_buff *skb; 1238 - int hdr_len; 1239 1240 - hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header); 1241 - skb = alloc_skb_fclone(size + hdr_len, gfp); 1242 if (skb) { 1243 skb->truesize += mem; 1244 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 1245 - skb_reserve(skb, hdr_len); 1246 return skb; 1247 } 1248 __kfree_skb(skb);
··· 1235 gfp_t gfp) 1236 { 1237 struct sk_buff *skb; 1238 1239 + skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 1240 if (skb) { 1241 skb->truesize += mem; 1242 if (sk_stream_wmem_schedule(sk, skb->truesize)) { 1243 + /* 1244 + * Make sure that we have exactly size bytes 1245 + * available to the caller, no more, no less. 1246 + */ 1247 + skb_reserve(skb, skb_tailroom(skb) - size); 1248 return skb; 1249 } 1250 __kfree_skb(skb);
+1 -1
kernel/sysctl_check.c
··· 738 { NET_ROSE, "rose", trans_net_rose_table }, 739 { NET_IPV6, "ipv6", trans_net_ipv6_table }, 740 { NET_X25, "x25", trans_net_x25_table }, 741 - { NET_TR, "tr", trans_net_tr_table }, 742 { NET_DECNET, "decnet", trans_net_decnet_table }, 743 /* NET_ECONET not used */ 744 { NET_SCTP, "sctp", trans_net_sctp_table },
··· 738 { NET_ROSE, "rose", trans_net_rose_table }, 739 { NET_IPV6, "ipv6", trans_net_ipv6_table }, 740 { NET_X25, "x25", trans_net_x25_table }, 741 + { NET_TR, "token-ring", trans_net_tr_table }, 742 { NET_DECNET, "decnet", trans_net_decnet_table }, 743 /* NET_ECONET not used */ 744 { NET_SCTP, "sctp", trans_net_sctp_table },
+2
net/core/dev.c
··· 1171 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1172 } 1173 } 1174 goto unlock; 1175 } 1176
··· 1171 nb->notifier_call(nb, NETDEV_UNREGISTER, dev); 1172 } 1173 } 1174 + 1175 + raw_notifier_chain_unregister(&netdev_chain, nb); 1176 goto unlock; 1177 } 1178
+3
net/ipv4/route.c
··· 578 i = (i + 1) & rt_hash_mask; 579 rthp = &rt_hash_table[i].chain; 580 581 if (*rthp == NULL) 582 continue; 583 spin_lock_bh(rt_hash_lock_addr(i));
··· 578 i = (i + 1) & rt_hash_mask; 579 rthp = &rt_hash_table[i].chain; 580 581 + if (need_resched()) 582 + cond_resched(); 583 + 584 if (*rthp == NULL) 585 continue; 586 spin_lock_bh(rt_hash_lock_addr(i));
+13 -3
net/ipv4/tcp_input.c
··· 1269 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1270 return 0; 1271 1272 /* SACK fastpath: 1273 * if the only SACK change is the increase of the end_seq of 1274 * the first block then only apply that SACK block ··· 1518 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1519 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 1520 1521 #if FASTRETRANS_DEBUG > 0 1522 BUG_TRAP((int)tp->sacked_out >= 0); 1523 BUG_TRAP((int)tp->lost_out >= 0); ··· 1674 } 1675 tcp_verify_left_out(tp); 1676 1677 /* Earlier loss recovery underway (see RFC4138; Appendix B). 1678 * The last condition is necessary at least in tp->frto_counter case. 1679 */ ··· 1709 tcp_for_write_queue(skb, sk) { 1710 if (skb == tcp_send_head(sk)) 1711 break; 1712 /* 1713 * Count the retransmission made on RTO correctly (only when 1714 * waiting for the first ACK and did not get it)... ··· 1724 } else { 1725 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1726 tp->undo_marker = 0; 1727 - TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1728 } 1729 1730 /* Don't lost mark skbs that were fwd transmitted after RTO */ ··· 3113 /* See if we can take anything off of the retransmit queue. */ 3114 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); 3115 3116 /* Guarantee sacktag reordering detection against wrap-arounds */ 3117 if (before(tp->frto_highmark, tp->snd_una)) 3118 tp->frto_highmark = 0; 3119 - if (tp->frto_counter) 3120 - frto_cwnd = tcp_process_frto(sk, flag); 3121 3122 if (tcp_ack_is_dubious(sk, flag)) { 3123 /* Advance CWND, if state allows this. */
··· 1269 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1270 return 0; 1271 1272 + if (!tp->packets_out) 1273 + goto out; 1274 + 1275 /* SACK fastpath: 1276 * if the only SACK change is the increase of the end_seq of 1277 * the first block then only apply that SACK block ··· 1515 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1516 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 1517 1518 + out: 1519 + 1520 #if FASTRETRANS_DEBUG > 0 1521 BUG_TRAP((int)tp->sacked_out >= 0); 1522 BUG_TRAP((int)tp->lost_out >= 0); ··· 1669 } 1670 tcp_verify_left_out(tp); 1671 1672 + /* Too bad if TCP was application limited */ 1673 + tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 1674 + 1675 /* Earlier loss recovery underway (see RFC4138; Appendix B). 1676 * The last condition is necessary at least in tp->frto_counter case. 1677 */ ··· 1701 tcp_for_write_queue(skb, sk) { 1702 if (skb == tcp_send_head(sk)) 1703 break; 1704 + 1705 + TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1706 /* 1707 * Count the retransmission made on RTO correctly (only when 1708 * waiting for the first ACK and did not get it)... ··· 1714 } else { 1715 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1716 tp->undo_marker = 0; 1717 + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1718 } 1719 1720 /* Don't lost mark skbs that were fwd transmitted after RTO */ ··· 3103 /* See if we can take anything off of the retransmit queue. */ 3104 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); 3105 3106 + if (tp->frto_counter) 3107 + frto_cwnd = tcp_process_frto(sk, flag); 3108 /* Guarantee sacktag reordering detection against wrap-arounds */ 3109 if (before(tp->frto_highmark, tp->snd_una)) 3110 tp->frto_highmark = 0; 3111 3112 if (tcp_ack_is_dubious(sk, flag)) { 3113 /* Advance CWND, if state allows this. */
+3 -2
net/sched/sch_generic.c
··· 134 { 135 struct Qdisc *q = dev->qdisc; 136 struct sk_buff *skb; 137 - int ret; 138 139 /* Dequeue packet */ 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) ··· 145 spin_unlock(&dev->queue_lock); 146 147 HARD_TX_LOCK(dev, smp_processor_id()); 148 - ret = dev_hard_start_xmit(skb, dev); 149 HARD_TX_UNLOCK(dev); 150 151 spin_lock(&dev->queue_lock);
··· 134 { 135 struct Qdisc *q = dev->qdisc; 136 struct sk_buff *skb; 137 + int ret = NETDEV_TX_BUSY; 138 139 /* Dequeue packet */ 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) ··· 145 spin_unlock(&dev->queue_lock); 146 147 HARD_TX_LOCK(dev, smp_processor_id()); 148 + if (!netif_subqueue_stopped(dev, skb)) 149 + ret = dev_hard_start_xmit(skb, dev); 150 HARD_TX_UNLOCK(dev); 151 152 spin_lock(&dev->queue_lock);