Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
dca: disable dca on IOAT ver.3.0 multiple-IOH platforms
netpoll: Disable IRQ around RCU dereference in netpoll_rx
sctp: Do not reset the packet during sctp_packet_config().
net/llc: storing negative error codes in unsigned short
MAINTAINERS: move atlx discussions to netdev
drivers/net/cxgb3/cxgb3_main.c: prevent reading uninitialized stack memory
drivers/net/eql.c: prevent reading uninitialized stack memory
drivers/net/usb/hso.c: prevent reading uninitialized memory
xfrm: dont assume rcu_read_lock in xfrm_output_one()
r8169: Handle rxfifo errors on 8168 chips
3c59x: Remove atomic context inside vortex_{set|get}_wol
tcp: Prevent overzealous packetization by SWS logic.
net: RPS needs to depend upon USE_GENERIC_SMP_HELPERS
phylib: fix PAL state machine restart on resume
net: use rcu_barrier() in rollback_registered_many
bonding: correctly process non-linear skbs
ipv4: enable getsockopt() for IP_NODEFRAG
ipv4: force_igmp_version ignored when a IGMPv3 query received
ppp: potential NULL dereference in ppp_mp_explode()
net/llc: make opt unsigned in llc_ui_setsockopt()
...

+136 -35
+1 -1
MAINTAINERS
··· 1135 1135 M: Jay Cliburn <jcliburn@gmail.com> 1136 1136 M: Chris Snook <chris.snook@gmail.com> 1137 1137 M: Jie Yang <jie.yang@atheros.com> 1138 - L: atl1-devel@lists.sourceforge.net 1138 + L: netdev@vger.kernel.org 1139 1139 W: http://sourceforge.net/projects/atl1 1140 1140 W: http://atl1.sourceforge.net 1141 1141 S: Maintained
+79 -6
drivers/dca/dca-core.c
··· 39 39 40 40 static LIST_HEAD(dca_domains); 41 41 42 + static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); 43 + 44 + static int dca_providers_blocked; 45 + 42 46 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) 43 47 { 44 48 struct pci_dev *pdev = to_pci_dev(dev); ··· 74 70 kfree(domain); 75 71 } 76 72 73 + static int dca_provider_ioat_ver_3_0(struct device *dev) 74 + { 75 + struct pci_dev *pdev = to_pci_dev(dev); 76 + 77 + return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && 78 + ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || 79 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || 80 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || 81 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || 82 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || 83 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || 84 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || 85 + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); 86 + } 87 + 88 + static void unregister_dca_providers(void) 89 + { 90 + struct dca_provider *dca, *_dca; 91 + struct list_head unregistered_providers; 92 + struct dca_domain *domain; 93 + unsigned long flags; 94 + 95 + blocking_notifier_call_chain(&dca_provider_chain, 96 + DCA_PROVIDER_REMOVE, NULL); 97 + 98 + INIT_LIST_HEAD(&unregistered_providers); 99 + 100 + spin_lock_irqsave(&dca_lock, flags); 101 + 102 + if (list_empty(&dca_domains)) { 103 + spin_unlock_irqrestore(&dca_lock, flags); 104 + return; 105 + } 106 + 107 + /* at this point only one domain in the list is expected */ 108 + domain = list_first_entry(&dca_domains, struct dca_domain, node); 109 + if (!domain) 110 + return; 111 + 112 + list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { 113 + list_del(&dca->node); 114 + list_add(&dca->node, &unregistered_providers); 115 + } 116 + 117 + dca_free_domain(domain); 118 + 119 + spin_unlock_irqrestore(&dca_lock, flags); 120 + 121 + list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { 122 + dca_sysfs_remove_provider(dca); 123 + list_del(&dca->node); 124 + } 125 + } 126 + 77 127 static struct dca_domain *dca_find_domain(struct pci_bus *rc) 78 128 { 79 129 struct dca_domain *domain; ··· 148 90 domain = dca_find_domain(rc); 149 91 150 92 if (!domain) { 151 - domain = dca_allocate_domain(rc); 152 - if (domain) 153 - list_add(&domain->node, &dca_domains); 93 + if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { 94 + dca_providers_blocked = 1; 95 + } else { 96 + domain = dca_allocate_domain(rc); 97 + if (domain) 98 + list_add(&domain->node, &dca_domains); 99 + } 154 100 } 155 101 156 102 return domain; ··· 355 293 } 356 294 EXPORT_SYMBOL_GPL(free_dca_provider); 357 295 358 - static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); 359 - 360 296 /** 361 297 * register_dca_provider - register a dca provider 362 298 * @dca - struct created by alloc_dca_provider() ··· 366 306 unsigned long flags; 367 307 struct dca_domain *domain; 368 308 309 + spin_lock_irqsave(&dca_lock, flags); 310 + if (dca_providers_blocked) { 311 + spin_unlock_irqrestore(&dca_lock, flags); 312 + return -ENODEV; 313 + } 314 + spin_unlock_irqrestore(&dca_lock, flags); 315 + 369 316 err = dca_sysfs_add_provider(dca, dev); 370 317 if (err) 371 318 return err; ··· 380 313 spin_lock_irqsave(&dca_lock, flags); 381 314 domain = dca_get_domain(dev); 382 315 if (!domain) { 383 - spin_unlock_irqrestore(&dca_lock, flags); 316 + if (dca_providers_blocked) { 317 + spin_unlock_irqrestore(&dca_lock, flags); 318 + dca_sysfs_remove_provider(dca); 319 + unregister_dca_providers(); 320 + } else { 321 + spin_unlock_irqrestore(&dca_lock, flags); 322 + } 384 323 return -ENODEV; 385 324 } 386 325 list_add(&dca->node, &domain->dca_providers);
+3 -4
drivers/net/3c59x.c
··· 635 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 636 636 large_frames:1, /* accept large frames */ 637 637 handling_irq:1; /* private in_irq indicator */ 638 + /* {get|set}_wol operations are already serialized by rtnl. 639 + * no additional locking is required for the enable_wol and acpi_set_WOL() 640 + */ 638 641 int drv_flags; 639 642 u16 status_enable; 640 643 u16 intr_enable; ··· 2942 2939 { 2943 2940 struct vortex_private *vp = netdev_priv(dev); 2944 2941 2945 - spin_lock_irq(&vp->lock); 2946 2942 wol->supported = WAKE_MAGIC; 2947 2943 2948 2944 wol->wolopts = 0; 2949 2945 if (vp->enable_wol) 2950 2946 wol->wolopts |= WAKE_MAGIC; 2951 - spin_unlock_irq(&vp->lock); 2952 2947 } 2953 2948 2954 2949 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ··· 2955 2954 if (wol->wolopts & ~WAKE_MAGIC) 2956 2955 return -EINVAL; 2957 2956 2958 - spin_lock_irq(&vp->lock); 2959 2957 if (wol->wolopts & WAKE_MAGIC) 2960 2958 vp->enable_wol = 1; 2961 2959 else 2962 2960 vp->enable_wol = 0; 2963 2961 acpi_set_WOL(dev); 2964 - spin_unlock_irq(&vp->lock); 2965 2962 2966 2963 return 0; 2967 2964 }
+3
drivers/net/bonding/bond_3ad.c
··· 2466 2466 if (!(dev->flags & IFF_MASTER)) 2467 2467 goto out; 2468 2468 2469 + if (!pskb_may_pull(skb, sizeof(struct lacpdu))) 2470 + goto out; 2471 + 2469 2472 read_lock(&bond->lock); 2470 2473 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2471 2474 orig_dev);
+3
drivers/net/bonding/bond_alb.c
··· 362 362 goto out; 363 363 } 364 364 365 + if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) 366 + goto out; 367 + 365 368 if (skb->len < sizeof(struct arp_pkt)) { 366 369 pr_debug("Packet is too small to be an ARP\n"); 367 370 goto out;
+2
drivers/net/cxgb3/cxgb3_main.c
··· 2296 2296 case CHELSIO_GET_QSET_NUM:{ 2297 2297 struct ch_reg edata; 2298 2298 2299 + memset(&edata, 0, sizeof(struct ch_reg)); 2300 + 2299 2301 edata.cmd = CHELSIO_GET_QSET_NUM; 2300 2302 edata.val = pi->nqsets; 2301 2303 if (copy_to_user(useraddr, &edata, sizeof(edata)))
+2
drivers/net/eql.c
··· 555 555 equalizer_t *eql; 556 556 master_config_t mc; 557 557 558 + memset(&mc, 0, sizeof(master_config_t)); 559 + 558 560 if (eql_is_master(dev)) { 559 561 eql = netdev_priv(dev); 560 562 mc.max_slaves = eql->max_slaves;
+2 -2
drivers/net/phy/mdio_bus.c
··· 308 308 * may call phy routines that try to grab the same lock, and that may 309 309 * lead to a deadlock. 310 310 */ 311 - if (phydev->attached_dev) 311 + if (phydev->attached_dev && phydev->adjust_link) 312 312 phy_stop_machine(phydev); 313 313 314 314 if (!mdio_bus_phy_may_suspend(phydev)) ··· 331 331 return ret; 332 332 333 333 no_resume: 334 - if (phydev->attached_dev) 334 + if (phydev->attached_dev && phydev->adjust_link) 335 335 phy_start_machine(phydev, NULL); 336 336 337 337 return 0;
+7 -2
drivers/net/ppp_generic.c
··· 1314 1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1315 1315 i = 0; 1316 1316 list_for_each_entry(pch, &ppp->channels, clist) { 1317 - navail += pch->avail = (pch->chan != NULL); 1318 - pch->speed = pch->chan->speed; 1317 + if (pch->chan) { 1318 + pch->avail = 1; 1319 + navail++; 1320 + pch->speed = pch->chan->speed; 1321 + } else { 1322 + pch->avail = 0; 1323 + } 1319 1324 if (pch->avail) { 1320 1325 if (skb_queue_empty(&pch->file.xq) || 1321 1326 !pch->had_frag) {
+2 -3
drivers/net/r8169.c
··· 2934 2934 .hw_start = rtl_hw_start_8168, 2935 2935 .region = 2, 2936 2936 .align = 8, 2937 - .intr_event = SYSErr | LinkChg | RxOverflow | 2937 + .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2938 2938 TxErr | TxOK | RxOK | RxErr, 2939 2939 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2940 2940 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, ··· 4625 4625 } 4626 4626 4627 4627 /* Work around for rx fifo overflow */ 4628 - if (unlikely(status & RxFIFOOver) && 4629 - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { 4628 + if (unlikely(status & RxFIFOOver)) { 4630 4629 netif_stop_queue(dev); 4631 4630 rtl8169_tx_timeout(dev); 4632 4631 break;
+2
drivers/net/usb/hso.c
··· 1652 1652 struct uart_icount cnow; 1653 1653 struct hso_tiocmget *tiocmget = serial->tiocmget; 1654 1654 1655 + memset(&icount, 0, sizeof(struct serial_icounter_struct)); 1656 + 1655 1657 if (!tiocmget) 1656 1658 return -ENOENT; 1657 1659 spin_lock_irq(&serial->serial_lock);
+4 -4
include/linux/netpoll.h
··· 63 63 unsigned long flags; 64 64 bool ret = false; 65 65 66 - rcu_read_lock_bh(); 66 + local_irq_save(flags); 67 67 npinfo = rcu_dereference_bh(skb->dev->npinfo); 68 68 69 69 if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) 70 70 goto out; 71 71 72 - spin_lock_irqsave(&npinfo->rx_lock, flags); 72 + spin_lock(&npinfo->rx_lock); 73 73 /* check rx_flags again with the lock held */ 74 74 if (npinfo->rx_flags && __netpoll_rx(skb)) 75 75 ret = true; 76 - spin_unlock_irqrestore(&npinfo->rx_lock, flags); 76 + spin_unlock(&npinfo->rx_lock); 77 77 78 78 out: 79 - rcu_read_unlock_bh(); 79 + local_irq_restore(flags); 80 80 return ret; 81 81 } 82 82
+16 -2
include/net/tcp.h
··· 475 475 /* Bound MSS / TSO packet size with the half of the window */ 476 476 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 477 477 { 478 - if (tp->max_window && pktsize > (tp->max_window >> 1)) 479 - return max(tp->max_window >> 1, 68U - tp->tcp_header_len); 478 + int cutoff; 479 + 480 + /* When peer uses tiny windows, there is no use in packetizing 481 + * to sub-MSS pieces for the sake of SWS or making sure there 482 + * are enough packets in the pipe for fast recovery. 483 + * 484 + * On the other hand, for extremely large MSS devices, handling 485 + * smaller than MSS windows in this way does make sense. 486 + */ 487 + if (tp->max_window >= 512) 488 + cutoff = (tp->max_window >> 1); 489 + else 490 + cutoff = tp->max_window; 491 + 492 + if (cutoff && pktsize > cutoff) 493 + return max_t(int, cutoff, 68U - tp->tcp_header_len); 480 494 else 481 495 return pktsize; 482 496 }
+1 -1
net/Kconfig
··· 217 217 218 218 config RPS 219 219 boolean 220 - depends on SMP && SYSFS 220 + depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 221 221 default y 222 222 223 223 menu "Network testing"
+1 -1
net/core/dev.c
··· 4845 4845 dev = list_first_entry(head, struct net_device, unreg_list); 4846 4846 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 4847 4847 4848 - synchronize_net(); 4848 + rcu_barrier(); 4849 4849 4850 4850 list_for_each_entry(dev, head, unreg_list) 4851 4851 dev_put(dev);
+1 -1
net/ipv4/igmp.c
··· 834 834 int mark = 0; 835 835 836 836 837 - if (len == 8) { 837 + if (len == 8 || IGMP_V2_SEEN(in_dev)) { 838 838 if (ih->code == 0) { 839 839 /* Alas, old v1 router presents here. */ 840 840
+3
net/ipv4/ip_sockglue.c
··· 1129 1129 case IP_HDRINCL: 1130 1130 val = inet->hdrincl; 1131 1131 break; 1132 + case IP_NODEFRAG: 1133 + val = inet->nodefrag; 1134 + break; 1132 1135 case IP_MTU_DISCOVER: 1133 1136 val = inet->pmtudisc; 1134 1137 break;
+2 -1
net/llc/af_llc.c
··· 1024 1024 { 1025 1025 struct sock *sk = sock->sk; 1026 1026 struct llc_sock *llc = llc_sk(sk); 1027 - int rc = -EINVAL, opt; 1027 + unsigned int opt; 1028 + int rc = -EINVAL; 1028 1029 1029 1030 lock_sock(sk); 1030 1031 if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
+1 -1
net/llc/llc_station.c
··· 689 689 690 690 int __init llc_station_init(void) 691 691 { 692 - u16 rc = -ENOBUFS; 692 + int rc = -ENOBUFS; 693 693 struct sk_buff *skb; 694 694 struct llc_station_state_ev *ev; 695 695
-4
net/sched/sch_atm.c
··· 255 255 error = -EINVAL; 256 256 goto err_out; 257 257 } 258 - if (!list_empty(&flow->list)) { 259 - error = -EEXIST; 260 - goto err_out; 261 - } 262 258 } else { 263 259 int i; 264 260 unsigned long cl;
-1
net/sctp/output.c
··· 92 92 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, 93 93 packet, vtag); 94 94 95 - sctp_packet_reset(packet); 96 95 packet->vtag = vtag; 97 96 98 97 if (ecn_capable && sctp_packet_empty(packet)) {
+1 -1
net/xfrm/xfrm_output.c
··· 101 101 err = -EHOSTUNREACH; 102 102 goto error_nolock; 103 103 } 104 - skb_dst_set_noref(skb, dst); 104 + skb_dst_set(skb, dst_clone(dst)); 105 105 x = dst->xfrm; 106 106 } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); 107 107