Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
drivers/net/forcedeth.c

+219 -130
+7
MAINTAINERS
··· 1545 1545 S: Maintained 1546 1546 F: drivers/net/wan/cosa* 1547 1547 1548 + CPMAC ETHERNET DRIVER 1549 + P: Florian Fainelli 1550 + M: florian@openwrt.org 1551 + L: netdev@vger.kernel.org 1552 + S: Maintained 1553 + F: drivers/net/cpmac.c 1554 + 1548 1555 CPU FREQUENCY DRIVERS 1549 1556 P: Dave Jones 1550 1557 M: davej@redhat.com
+4
drivers/net/3c509.c
··· 480 480 481 481 #ifdef CONFIG_EISA 482 482 static struct eisa_device_id el3_eisa_ids[] = { 483 + { "TCM5090" }, 484 + { "TCM5091" }, 483 485 { "TCM5092" }, 484 486 { "TCM5093" }, 487 + { "TCM5094" }, 485 488 { "TCM5095" }, 489 + { "TCM5098" }, 486 490 { "" } 487 491 }; 488 492 MODULE_DEVICE_TABLE(eisa, el3_eisa_ids);
+1 -1
drivers/net/Makefile
··· 105 105 obj-$(CONFIG_NET) += Space.o loopback.o 106 106 obj-$(CONFIG_SEEQ8005) += seeq8005.o 107 107 obj-$(CONFIG_NET_SB1000) += sb1000.o 108 - obj-$(CONFIG_MAC8390) += mac8390.o 8390.o 108 + obj-$(CONFIG_MAC8390) += mac8390.o 109 109 obj-$(CONFIG_APNE) += apne.o 8390.o 110 110 obj-$(CONFIG_PCMCIA_PCNET) += 8390.o 111 111 obj-$(CONFIG_HP100) += hp100.o
+1
drivers/net/atl1e/atl1e_main.c
··· 37 37 */ 38 38 static struct pci_device_id atl1e_pci_tbl[] = { 39 39 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, 40 + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, 40 41 /* required last entry */ 41 42 { 0 } 42 43 };
+6
drivers/net/atlx/atl1.c
··· 82 82 83 83 #include "atl1.h" 84 84 85 + #define ATLX_DRIVER_VERSION "2.1.3" 86 + MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 87 + Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 88 + MODULE_LICENSE("GPL"); 89 + MODULE_VERSION(ATLX_DRIVER_VERSION); 90 + 85 91 /* Temporary hack for merging atl1 and atl2 */ 86 92 #include "atlx.c" 87 93
-6
drivers/net/atlx/atlx.h
··· 29 29 #include <linux/module.h> 30 30 #include <linux/types.h> 31 31 32 - #define ATLX_DRIVER_VERSION "2.1.3" 33 - MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 34 - Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 35 - MODULE_LICENSE("GPL"); 36 - MODULE_VERSION(ATLX_DRIVER_VERSION); 37 - 38 32 #define ATLX_ERR_PHY 2 39 33 #define ATLX_ERR_PHY_SPEED 7 40 34 #define ATLX_ERR_PHY_RES 8
+14 -15
drivers/net/bfin_mac.c
··· 927 927 return 0; 928 928 } 929 929 930 - static const struct net_device_ops bfin_mac_netdev_ops = { 931 - .ndo_open = bfin_mac_open, 932 - .ndo_stop = bfin_mac_close, 933 - .ndo_start_xmit = bfin_mac_hard_start_xmit, 934 - .ndo_set_mac_address = bfin_mac_set_mac_address, 935 - .ndo_tx_timeout = bfin_mac_timeout, 936 - .ndo_set_multicast_list = bfin_mac_set_multicast_list, 937 - .ndo_validate_addr = eth_validate_addr, 938 - .ndo_change_mtu = eth_change_mtu, 939 - #ifdef CONFIG_NET_POLL_CONTROLLER 940 - .ndo_poll_controller = bfin_mac_poll, 941 - #endif 942 - }; 943 - 944 930 /* 945 - * 946 931 * this makes the board clean up everything that it can 947 932 * and not talk to the outside world. Caused by 948 933 * an 'ifconfig ethX down' ··· 951 966 952 967 return 0; 953 968 } 969 + 970 + static const struct net_device_ops bfin_mac_netdev_ops = { 971 + .ndo_open = bfin_mac_open, 972 + .ndo_stop = bfin_mac_close, 973 + .ndo_start_xmit = bfin_mac_hard_start_xmit, 974 + .ndo_set_mac_address = bfin_mac_set_mac_address, 975 + .ndo_tx_timeout = bfin_mac_timeout, 976 + .ndo_set_multicast_list = bfin_mac_set_multicast_list, 977 + .ndo_validate_addr = eth_validate_addr, 978 + .ndo_change_mtu = eth_change_mtu, 979 + #ifdef CONFIG_NET_POLL_CONTROLLER 980 + .ndo_poll_controller = bfin_mac_poll, 981 + #endif 982 + }; 954 983 955 984 static int __devinit bfin_mac_probe(struct platform_device *pdev) 956 985 {
+2 -2
drivers/net/cxgb3/adapter.h
··· 85 85 struct page *page; 86 86 void *va; 87 87 unsigned int offset; 88 - u64 *p_cnt; 89 - DECLARE_PCI_UNMAP_ADDR(mapping); 88 + unsigned long *p_cnt; 89 + dma_addr_t mapping; 90 90 }; 91 91 92 92 struct rx_desc;
+5 -3
drivers/net/cxgb3/cxgb3_main.c
··· 2453 2453 for_each_port(adapter, i) { 2454 2454 struct net_device *dev = adapter->port[i]; 2455 2455 struct port_info *p = netdev_priv(dev); 2456 + int link_fault; 2456 2457 2457 2458 spin_lock_irq(&adapter->work_lock); 2458 - if (p->link_fault) { 2459 + link_fault = p->link_fault; 2460 + spin_unlock_irq(&adapter->work_lock); 2461 + 2462 + if (link_fault) { 2459 2463 t3_link_fault(adapter, i); 2460 - spin_unlock_irq(&adapter->work_lock); 2461 2464 continue; 2462 2465 } 2463 - spin_unlock_irq(&adapter->work_lock); 2464 2466 2465 2467 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { 2466 2468 t3_xgm_intr_disable(adapter, i);
+5 -6
drivers/net/cxgb3/sge.c
··· 355 355 (*d->pg_chunk.p_cnt)--; 356 356 if (!*d->pg_chunk.p_cnt) 357 357 pci_unmap_page(pdev, 358 - pci_unmap_addr(&d->pg_chunk, mapping), 358 + d->pg_chunk.mapping, 359 359 q->alloc_size, PCI_DMA_FROMDEVICE); 360 360 361 361 put_page(d->pg_chunk.page); ··· 454 454 q->pg_chunk.offset = 0; 455 455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 456 456 0, q->alloc_size, PCI_DMA_FROMDEVICE); 457 - pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); 457 + q->pg_chunk.mapping = mapping; 458 458 } 459 459 sd->pg_chunk = q->pg_chunk; 460 460 ··· 511 511 nomem: q->alloc_failed++; 512 512 break; 513 513 } 514 - mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + 515 - sd->pg_chunk.offset; 514 + mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 516 515 pci_unmap_addr_set(sd, dma_addr, mapping); 517 516 518 517 add_one_rx_chunk(mapping, d, q->gen); ··· 881 882 (*sd->pg_chunk.p_cnt)--; 882 883 if (!*sd->pg_chunk.p_cnt) 883 884 pci_unmap_page(adap->pdev, 884 - pci_unmap_addr(&sd->pg_chunk, mapping), 885 + sd->pg_chunk.mapping, 885 886 fl->alloc_size, 886 887 PCI_DMA_FROMDEVICE); 887 888 if (!skb) { ··· 2090 2091 (*sd->pg_chunk.p_cnt)--; 2091 2092 if (!*sd->pg_chunk.p_cnt) 2092 2093 pci_unmap_page(adap->pdev, 2093 - pci_unmap_addr(&sd->pg_chunk, mapping), 2094 + sd->pg_chunk.mapping, 2094 2095 fl->alloc_size, 2095 2096 PCI_DMA_FROMDEVICE); 2096 2097
+5
drivers/net/cxgb3/t3_hw.c
··· 1288 1288 A_XGM_INT_STATUS + mac->offset); 1289 1289 link_fault &= F_LINKFAULTCHANGE; 1290 1290 1291 + link_ok = lc->link_ok; 1292 + speed = lc->speed; 1293 + duplex = lc->duplex; 1294 + fc = lc->fc; 1295 + 1291 1296 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc); 1292 1297 1293 1298 if (link_fault) {
+3 -2
drivers/net/e1000/e1000_main.c
··· 4035 4035 PCI_DMA_FROMDEVICE); 4036 4036 4037 4037 length = le16_to_cpu(rx_desc->length); 4038 - 4039 - if (unlikely(!(status & E1000_RXD_STAT_EOP))) { 4038 + /* !EOP means multiple descriptors were used to store a single 4039 + * packet, also make sure the frame isn't just CRC only */ 4040 + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 4040 4041 /* All receives must fit into a single buffer */ 4041 4042 E1000_DBG("%s: Receive packet consumed multiple" 4042 4043 " buffers\n", netdev->name);
+13 -2
drivers/net/forcedeth.c
··· 898 898 }; 899 899 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED; 900 900 901 + /* 902 + * Power down phy when interface is down (persists through reboot; 903 + * older Linux and other OSes may not power it up again) 904 + */ 905 + static int phy_power_down = 0; 906 + 901 907 static inline struct fe_priv *get_nvpriv(struct net_device *dev) 902 908 { 903 909 return netdev_priv(dev); ··· 1509 1503 1510 1504 /* restart auto negotiation, power down phy */ 1511 1505 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1512 - mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN); 1506 + mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1507 + if (phy_power_down) { 1508 + mii_control |= BMCR_PDOWN; 1509 + } 1513 1510 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1514 1511 return PHY_ERROR; 1515 1512 } ··· 5543 5534 5544 5535 nv_drain_rxtx(dev); 5545 5536 5546 - if (np->wolenabled) { 5537 + if (np->wolenabled || !phy_power_down) { 5547 5538 nv_txrx_gate(dev, false); 5548 5539 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5549 5540 nv_start_rx(dev); ··· 6399 6390 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); 6400 6391 module_param(phy_cross, int, 0); 6401 6392 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0."); 6393 + module_param(phy_power_down, int, 0); 6394 + MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0)."); 6402 6395 6403 6396 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); 6404 6397 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
+1 -1
drivers/net/gianfar.h
··· 259 259 (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 260 260 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ 261 261 | IEVENT_CRL | IEVENT_XFUN | IEVENT_DPE | IEVENT_PERR \ 262 - | IEVENT_MAG) 262 + | IEVENT_MAG | IEVENT_BABR) 263 263 264 264 #define IMASK_INIT_CLEAR 0x00000000 265 265 #define IMASK_BABR 0x80000000
+6 -6
drivers/net/mac8390.c
··· 304 304 if (!MACH_IS_MAC) 305 305 return ERR_PTR(-ENODEV); 306 306 307 - dev = alloc_ei_netdev(); 307 + dev = ____alloc_ei_netdev(0); 308 308 if (!dev) 309 309 return ERR_PTR(-ENOMEM); 310 310 ··· 481 481 static const struct net_device_ops mac8390_netdev_ops = { 482 482 .ndo_open = mac8390_open, 483 483 .ndo_stop = mac8390_close, 484 - .ndo_start_xmit = ei_start_xmit, 485 - .ndo_tx_timeout = ei_tx_timeout, 486 - .ndo_get_stats = ei_get_stats, 487 - .ndo_set_multicast_list = ei_set_multicast_list, 484 + .ndo_start_xmit = __ei_start_xmit, 485 + .ndo_tx_timeout = __ei_tx_timeout, 486 + .ndo_get_stats = __ei_get_stats, 487 + .ndo_set_multicast_list = __ei_set_multicast_list, 488 488 .ndo_validate_addr = eth_validate_addr, 489 489 .ndo_set_mac_address = eth_mac_addr, 490 490 .ndo_change_mtu = eth_change_mtu, 491 491 #ifdef CONFIG_NET_POLL_CONTROLLER 492 - .ndo_poll_controller = ei_poll, 492 + .ndo_poll_controller = __ei_poll, 493 493 #endif 494 494 }; 495 495
+4 -4
drivers/net/mlx4/en_tx.c
··· 388 388 389 389 INC_PERF_COUNTER(priv->pstats.tx_poll); 390 390 391 - if (!spin_trylock(&ring->comp_lock)) { 391 + if (!spin_trylock_irq(&ring->comp_lock)) { 392 392 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 393 393 return; 394 394 } ··· 401 401 if (inflight && priv->port_up) 402 402 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 403 403 404 - spin_unlock(&ring->comp_lock); 404 + spin_unlock_irq(&ring->comp_lock); 405 405 } 406 406 407 407 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, ··· 444 444 445 445 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 446 446 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 447 - if (spin_trylock(&ring->comp_lock)) { 447 + if (spin_trylock_irq(&ring->comp_lock)) { 448 448 mlx4_en_process_tx_cq(priv->dev, cq); 449 - spin_unlock(&ring->comp_lock); 449 + spin_unlock_irq(&ring->comp_lock); 450 450 } 451 451 } 452 452
+62 -50
drivers/net/r8169.c
··· 3561 3561 int handled = 0; 3562 3562 int status; 3563 3563 3564 + /* loop handling interrupts until we have no new ones or 3565 + * we hit a invalid/hotplug case. 3566 + */ 3564 3567 status = RTL_R16(IntrStatus); 3568 + while (status && status != 0xffff) { 3569 + handled = 1; 3565 3570 3566 - /* hotplug/major error/no more work/shared irq */ 3567 - if ((status == 0xffff) || !status) 3568 - goto out; 3569 - 3570 - handled = 1; 3571 - 3572 - if (unlikely(!netif_running(dev))) { 3573 - rtl8169_asic_down(ioaddr); 3574 - goto out; 3575 - } 3576 - 3577 - status &= tp->intr_mask; 3578 - RTL_W16(IntrStatus, 3579 - (status & RxFIFOOver) ? (status | RxOverflow) : status); 3580 - 3581 - if (!(status & tp->intr_event)) 3582 - goto out; 3583 - 3584 - /* Work around for rx fifo overflow */ 3585 - if (unlikely(status & RxFIFOOver) && 3586 - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 3587 - netif_stop_queue(dev); 3588 - rtl8169_tx_timeout(dev); 3589 - goto out; 3590 - } 3591 - 3592 - if (unlikely(status & SYSErr)) { 3593 - rtl8169_pcierr_interrupt(dev); 3594 - goto out; 3595 - } 3596 - 3597 - if (status & LinkChg) 3598 - rtl8169_check_link_status(dev, tp, ioaddr); 3599 - 3600 - if (status & tp->napi_event) { 3601 - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); 3602 - tp->intr_mask = ~tp->napi_event; 3603 - 3604 - if (likely(napi_schedule_prep(&tp->napi))) 3605 - __napi_schedule(&tp->napi); 3606 - else if (netif_msg_intr(tp)) { 3607 - printk(KERN_INFO "%s: interrupt %04x in poll\n", 3608 - dev->name, status); 3571 + /* Handle all of the error cases first. These will reset 3572 + * the chip, so just exit the loop. 3573 + */ 3574 + if (unlikely(!netif_running(dev))) { 3575 + rtl8169_asic_down(ioaddr); 3576 + break; 3609 3577 } 3578 + 3579 + /* Work around for rx fifo overflow */ 3580 + if (unlikely(status & RxFIFOOver) && 3581 + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 3582 + netif_stop_queue(dev); 3583 + rtl8169_tx_timeout(dev); 3584 + break; 3585 + } 3586 + 3587 + if (unlikely(status & SYSErr)) { 3588 + rtl8169_pcierr_interrupt(dev); 3589 + break; 3590 + } 3591 + 3592 + if (status & LinkChg) 3593 + rtl8169_check_link_status(dev, tp, ioaddr); 3594 + 3595 + /* We need to see the lastest version of tp->intr_mask to 3596 + * avoid ignoring an MSI interrupt and having to wait for 3597 + * another event which may never come. 3598 + */ 3599 + smp_rmb(); 3600 + if (status & tp->intr_mask & tp->napi_event) { 3601 + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); 3602 + tp->intr_mask = ~tp->napi_event; 3603 + 3604 + if (likely(napi_schedule_prep(&tp->napi))) 3605 + __napi_schedule(&tp->napi); 3606 + else if (netif_msg_intr(tp)) { 3607 + printk(KERN_INFO "%s: interrupt %04x in poll\n", 3608 + dev->name, status); 3609 + } 3610 + } 3611 + 3612 + /* We only get a new MSI interrupt when all active irq 3613 + * sources on the chip have been acknowledged. So, ack 3614 + * everything we've seen and check if new sources have become 3615 + * active to avoid blocking all interrupts from the chip. 3616 + */ 3617 + RTL_W16(IntrStatus, 3618 + (status & RxFIFOOver) ? (status | RxOverflow) : status); 3619 + status = RTL_R16(IntrStatus); 3610 3620 } 3611 - out: 3621 + 3612 3622 return IRQ_RETVAL(handled); 3613 3623 } 3614 3624 ··· 3634 3624 3635 3625 if (work_done < budget) { 3636 3626 napi_complete(napi); 3637 - tp->intr_mask = 0xffff; 3638 - /* 3639 - * 20040426: the barrier is not strictly required but the 3640 - * behavior of the irq handler could be less predictable 3641 - * without it. Btw, the lack of flush for the posted pci 3642 - * write is safe - FR 3627 + 3628 + /* We need for force the visibility of tp->intr_mask 3629 + * for other CPUs, as we can loose an MSI interrupt 3630 + * and potentially wait for a retransmit timeout if we don't. 3631 + * The posted write to IntrMask is safe, as it will 3632 + * eventually make it to the chip and we won't loose anything 3633 + * until it does. 3643 3634 */ 3635 + tp->intr_mask = 0xffff; 3644 3636 smp_wmb(); 3645 3637 RTL_W16(IntrMask, tp->intr_event); 3646 3638 }
+1
drivers/net/wireless/Kconfig
··· 431 431 ASUS P5B Deluxe 432 432 Toshiba Satellite Pro series of laptops 433 433 Asus Wireless Link 434 + Linksys WUSB54GC-EU 434 435 435 436 Thanks to Realtek for their support! 436 437
+6 -6
drivers/net/wireless/at76c50x-usb.c
··· 1873 1873 if (ret != CMD_STATUS_COMPLETE) { 1874 1874 queue_delayed_work(priv->hw->workqueue, &priv->dwork_hw_scan, 1875 1875 SCAN_POLL_INTERVAL); 1876 - goto exit; 1876 + mutex_unlock(&priv->mtx); 1877 + return; 1877 1878 } 1878 - 1879 - ieee80211_scan_completed(priv->hw, false); 1880 1879 1881 1880 if (is_valid_ether_addr(priv->bssid)) 1882 1881 at76_join(priv); 1883 1882 1884 - ieee80211_wake_queues(priv->hw); 1885 - 1886 - exit: 1887 1883 mutex_unlock(&priv->mtx); 1884 + 1885 + ieee80211_scan_completed(priv->hw, false); 1886 + 1887 + ieee80211_wake_queues(priv->hw); 1888 1888 } 1889 1889 1890 1890 static int at76_hw_scan(struct ieee80211_hw *hw,
+2
drivers/net/wireless/rtl818x/rtl8187_dev.c
··· 74 74 {USB_DEVICE(0x18E8, 0x6232), .driver_info = DEVICE_RTL8187}, 75 75 /* AirLive */ 76 76 {USB_DEVICE(0x1b75, 0x8187), .driver_info = DEVICE_RTL8187}, 77 + /* Linksys */ 78 + {USB_DEVICE(0x1737, 0x0073), .driver_info = DEVICE_RTL8187B}, 77 79 {} 78 80 }; 79 81
+1
firmware/cis/.gitignore
··· 1 + *.cis
+4
include/linux/netfilter/nf_conntrack_tcp.h
··· 35 35 /* Has unacknowledged data */ 36 36 #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 37 37 38 + /* The field td_maxack has been set */ 39 + #define IP_CT_TCP_FLAG_MAXACK_SET 0x20 40 + 38 41 struct nf_ct_tcp_flags { 39 42 __u8 flags; 40 43 __u8 mask; ··· 49 46 u_int32_t td_end; /* max of seq + len */ 50 47 u_int32_t td_maxend; /* max of ack + max(win, 1) */ 51 48 u_int32_t td_maxwin; /* max(win) */ 49 + u_int32_t td_maxack; /* max of ack */ 52 50 u_int8_t td_scale; /* window scale factor */ 53 51 u_int8_t flags; /* per direction options */ 54 52 };
-6
net/bluetooth/hci_sysfs.c
··· 90 90 struct hci_conn *conn = container_of(work, struct hci_conn, work_add); 91 91 struct hci_dev *hdev = conn->hdev; 92 92 93 - /* ensure previous del is complete */ 94 - flush_work(&conn->work_del); 95 - 96 93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 97 94 98 95 if (device_add(&conn->dev) < 0) { ··· 114 117 { 115 118 struct hci_conn *conn = container_of(work, struct hci_conn, work_del); 116 119 struct hci_dev *hdev = conn->hdev; 117 - 118 - /* ensure previous add is complete */ 119 - flush_work(&conn->work_add); 120 120 121 121 if (!device_is_registered(&conn->dev)) 122 122 return;
+9 -2
net/ipv4/tcp_vegas.c
··· 158 158 } 159 159 EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); 160 160 161 + static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) 162 + { 163 + return min(tp->snd_ssthresh, tp->snd_cwnd-1); 164 + } 165 + 161 166 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 162 167 { 163 168 struct tcp_sock *tp = tcp_sk(sk); ··· 226 221 */ 227 222 diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; 228 223 229 - if (diff > gamma && tp->snd_ssthresh > 2 ) { 224 + if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) { 230 225 /* Going too fast. Time to slow down 231 226 * and switch to congestion avoidance. 232 227 */ 233 - tp->snd_ssthresh = 2; 234 228 235 229 /* Set cwnd to match the actual rate 236 230 * exactly: ··· 239 235 * utilization. 240 236 */ 241 237 tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); 238 + tp->snd_ssthresh = tcp_vegas_ssthresh(tp); 242 239 243 240 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 244 241 /* Slow start. */ ··· 255 250 * we slow down. 256 251 */ 257 252 tp->snd_cwnd--; 253 + tp->snd_ssthresh 254 + = tcp_vegas_ssthresh(tp); 258 255 } else if (diff < alpha) { 259 256 /* We don't have enough extra packets 260 257 * in the network, so speed up.
+4
net/netfilter/nf_conntrack_proto_dccp.c
··· 22 22 #include <linux/netfilter/nfnetlink_conntrack.h> 23 23 #include <net/netfilter/nf_conntrack.h> 24 24 #include <net/netfilter/nf_conntrack_l4proto.h> 25 + #include <net/netfilter/nf_conntrack_ecache.h> 25 26 #include <net/netfilter/nf_log.h> 26 27 27 28 static DEFINE_RWLOCK(dccp_lock); ··· 553 552 ct->proto.dccp.last_pkt = type; 554 553 ct->proto.dccp.state = new_state; 555 554 write_unlock_bh(&dccp_lock); 555 + 556 + if (new_state != old_state) 557 + nf_conntrack_event_cache(IPCT_PROTOINFO, ct); 556 558 557 559 dn = dccp_pernet(net); 558 560 nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
+18
net/netfilter/nf_conntrack_proto_tcp.c
··· 634 634 sender->td_end = end; 635 635 sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED; 636 636 } 637 + if (tcph->ack) { 638 + if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) { 639 + sender->td_maxack = ack; 640 + sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET; 641 + } else if (after(ack, sender->td_maxack)) 642 + sender->td_maxack = ack; 643 + } 644 + 637 645 /* 638 646 * Update receiver data. 639 647 */ ··· 926 918 "nf_ct_tcp: invalid state "); 927 919 return -NF_ACCEPT; 928 920 case TCP_CONNTRACK_CLOSE: 921 + if (index == TCP_RST_SET 922 + && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) 923 + && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) { 924 + /* Invalid RST */ 925 + write_unlock_bh(&tcp_lock); 926 + if (LOG_INVALID(net, IPPROTO_TCP)) 927 + nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 928 + "nf_ct_tcp: invalid RST "); 929 + return -NF_ACCEPT; 930 + } 929 931 if (index == TCP_RST_SET 930 932 && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status) 931 933 && ct->proto.tcp.last_index == TCP_SYN_SET)
+1 -1
net/netfilter/xt_hashlimit.c
··· 926 926 if (!hlist_empty(&htable->hash[*bucket])) { 927 927 hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) 928 928 if (dl_seq_real_show(ent, htable->family, s)) 929 - return 1; 929 + return -1; 930 930 } 931 931 return 0; 932 932 }
+17 -6
net/sched/cls_api.c
··· 135 135 unsigned long cl; 136 136 unsigned long fh; 137 137 int err; 138 + int tp_created = 0; 138 139 139 140 if (net != &init_net) 140 141 return -EINVAL; ··· 267 266 goto errout; 268 267 } 269 268 270 - spin_lock_bh(root_lock); 271 - tp->next = *back; 272 - *back = tp; 273 - spin_unlock_bh(root_lock); 269 + tp_created = 1; 274 270 275 271 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) 276 272 goto errout; ··· 294 296 switch (n->nlmsg_type) { 295 297 case RTM_NEWTFILTER: 296 298 err = -EEXIST; 297 - if (n->nlmsg_flags & NLM_F_EXCL) 299 + if (n->nlmsg_flags & NLM_F_EXCL) { 300 + if (tp_created) 301 + tcf_destroy(tp); 298 302 goto errout; 303 + } 299 304 break; 300 305 case RTM_DELTFILTER: 301 306 err = tp->ops->delete(tp, fh); ··· 315 314 } 316 315 317 316 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh); 318 - if (err == 0) 317 + if (err == 0) { 318 + if (tp_created) { 319 + spin_lock_bh(root_lock); 320 + tp->next = *back; 321 + *back = tp; 322 + spin_unlock_bh(root_lock); 323 + } 319 324 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 325 + } else { 326 + if (tp_created) 327 + tcf_destroy(tp); 328 + } 320 329 321 330 errout: 322 331 if (cl)
+11 -11
net/sched/cls_cgroup.c
··· 98 98 struct tcf_result *res) 99 99 { 100 100 struct cls_cgroup_head *head = tp->root; 101 - struct cgroup_cls_state *cs; 102 - int ret = 0; 101 + u32 classid; 103 102 104 103 /* 105 104 * Due to the nature of the classifier it is required to ignore all ··· 114 115 return -1; 115 116 116 117 rcu_read_lock(); 117 - cs = task_cls_state(current); 118 - if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { 119 - res->classid = cs->classid; 120 - res->class = 0; 121 - ret = tcf_exts_exec(skb, &head->exts, res); 122 - } else 123 - ret = -1; 124 - 118 + classid = task_cls_state(current)->classid; 125 119 rcu_read_unlock(); 126 120 127 - return ret; 121 + if (!classid) 122 + return -1; 123 + 124 + if (!tcf_em_tree_match(skb, &head->ematches, NULL)) 125 + return -1; 126 + 127 + res->classid = classid; 128 + res->class = 0; 129 + return tcf_exts_exec(skb, &head->exts, res); 128 130 } 129 131 130 132 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)