Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[IPV4/IPV6]: Setting 0 for unused port field in RAW IP recvmsg().
[IPV4] ipmr: ip multicast route bug fix.
[TG3]: Update version and reldate
[TG3]: Handle tg3_init_rings() failures
[TG3]: Add tg3_restart_hw()
[IPV4]: Clear the whole IPCB, this clears also IPCB(skb)->flags.
[IPV6]: Clean skb cb on IPv6 input.
[NETFILTER]: Demote xt_sctp to EXPERIMENTAL
[NETFILTER]: bridge netfilter: add deferred output hooks to feature-removal-schedule
[NETFILTER]: xt_pkttype: fix mismatches on locally generated packets
[NETFILTER]: SNMP NAT: fix byteorder confusion
[NETFILTER]: conntrack: fix SYSCTL=n compile
[NETFILTER]: nf_queue: handle NF_STOP and unknown verdicts in nf_reinject
[NETFILTER]: H.323 helper: fix possible NULL-ptr dereference

+167 -51
+16
Documentation/feature-removal-schedule.txt
··· 258 258 Who: Jean Delvare <khali@linux-fr.org> 259 259 260 260 --------------------------- 261 + 262 + What: Bridge netfilter deferred IPv4/IPv6 output hook calling 263 + When: January 2007 264 + Why: The deferred output hooks are a layering violation causing unusual 265 + and broken behaviour on bridge devices. Examples of things they 266 + break include QoS classifation using the MARK or CLASSIFY targets, 267 + the IPsec policy match and connection tracking with VLANs on a 268 + bridge. Their only use is to enable bridge output port filtering 269 + within iptables with the physdev match, which can also be done by 270 + combining iptables and ebtables using netfilter marks. Until it 271 + will get removed the hook deferral is disabled by default and is 272 + only enabled when needed. 273 + 274 + Who: Patrick McHardy <kaber@trash.net> 275 + 276 + ---------------------------
+87 -29
drivers/net/tg3.c
··· 68 68 69 69 #define DRV_MODULE_NAME "tg3" 70 70 #define PFX DRV_MODULE_NAME ": " 71 - #define DRV_MODULE_VERSION "3.62" 72 - #define DRV_MODULE_RELDATE "June 30, 2006" 71 + #define DRV_MODULE_VERSION "3.63" 72 + #define DRV_MODULE_RELDATE "July 25, 2006" 73 73 74 74 #define TG3_DEF_MAC_MODE 0 75 75 #define TG3_DEF_RX_MODE 0 ··· 3590 3590 static int tg3_init_hw(struct tg3 *, int); 3591 3591 static int tg3_halt(struct tg3 *, int, int); 3592 3592 3593 + /* Restart hardware after configuration changes, self-test, etc. 3594 + * Invoked with tp->lock held. 3595 + */ 3596 + static int tg3_restart_hw(struct tg3 *tp, int reset_phy) 3597 + { 3598 + int err; 3599 + 3600 + err = tg3_init_hw(tp, reset_phy); 3601 + if (err) { 3602 + printk(KERN_ERR PFX "%s: Failed to re-initialize device, " 3603 + "aborting.\n", tp->dev->name); 3604 + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 3605 + tg3_full_unlock(tp); 3606 + del_timer_sync(&tp->timer); 3607 + tp->irq_sync = 0; 3608 + netif_poll_enable(tp->dev); 3609 + dev_close(tp->dev); 3610 + tg3_full_lock(tp, 0); 3611 + } 3612 + return err; 3613 + } 3614 + 3593 3615 #ifdef CONFIG_NET_POLL_CONTROLLER 3594 3616 static void tg3_poll_controller(struct net_device *dev) 3595 3617 { ··· 3652 3630 } 3653 3631 3654 3632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); 3655 - tg3_init_hw(tp, 1); 3633 + if (tg3_init_hw(tp, 1)) 3634 + goto out; 3656 3635 3657 3636 tg3_netif_start(tp); 3658 3637 3659 3638 if (restart_timer) 3660 3639 mod_timer(&tp->timer, jiffies + 1); 3661 3640 3641 + out: 3662 3642 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK; 3663 3643 3664 3644 tg3_full_unlock(tp); ··· 4148 4124 static int tg3_change_mtu(struct net_device *dev, int new_mtu) 4149 4125 { 4150 4126 struct tg3 *tp = netdev_priv(dev); 4127 + int err; 4151 4128 4152 4129 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) 4153 4130 return -EINVAL; ··· 4169 4144 4170 4145 tg3_set_mtu(dev, tp, new_mtu); 4171 4146 4172 - tg3_init_hw(tp, 0); 4147 + err = tg3_restart_hw(tp, 0); 4173 4148 4174 - tg3_netif_start(tp); 4149 + if (!err) 4150 + tg3_netif_start(tp); 4175 4151 4176 4152 tg3_full_unlock(tp); 4177 4153 4178 - return 0; 4154 + return err; 4179 4155 } 4180 4156 4181 4157 /* Free up pending packets in all rx/tx rings. ··· 4258 4232 * end up in the driver. tp->{tx,}lock are held and thus 4259 4233 * we may not sleep. 4260 4234 */ 4261 - static void tg3_init_rings(struct tg3 *tp) 4235 + static int tg3_init_rings(struct tg3 *tp) 4262 4236 { 4263 4237 u32 i; 4264 4238 ··· 4307 4281 4308 4282 /* Now allocate fresh SKBs for each rx ring. */ 4309 4283 for (i = 0; i < tp->rx_pending; i++) { 4310 - if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, 4311 - -1, i) < 0) 4284 + if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) { 4285 + printk(KERN_WARNING PFX 4286 + "%s: Using a smaller RX standard ring, " 4287 + "only %d out of %d buffers were allocated " 4288 + "successfully.\n", 4289 + tp->dev->name, i, tp->rx_pending); 4290 + if (i == 0) 4291 + return -ENOMEM; 4292 + tp->rx_pending = i; 4312 4293 break; 4294 + } 4313 4295 } 4314 4296 4315 4297 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) { 4316 4298 for (i = 0; i < tp->rx_jumbo_pending; i++) { 4317 4299 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO, 4318 - -1, i) < 0) 4300 + -1, i) < 0) { 4301 + printk(KERN_WARNING PFX 4302 + "%s: Using a smaller RX jumbo ring, " 4303 + "only %d out of %d buffers were " 4304 + "allocated successfully.\n", 4305 + tp->dev->name, i, tp->rx_jumbo_pending); 4306 + if (i == 0) { 4307 + tg3_free_rings(tp); 4308 + return -ENOMEM; 4309 + } 4310 + tp->rx_jumbo_pending = i; 4319 4311 break; 4312 + } 4320 4313 } 4321 4314 } 4315 + return 0; 4322 4316 } 4323 4317 4324 4318 /* ··· 5861 5815 { 5862 5816 struct tg3 *tp = netdev_priv(dev); 5863 5817 struct sockaddr *addr = p; 5818 + int err = 0; 5864 5819 5865 5820 if (!is_valid_ether_addr(addr->sa_data)) 5866 5821 return -EINVAL; ··· 5879 5832 tg3_full_lock(tp, 1); 5880 5833 5881 5834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5882 - tg3_init_hw(tp, 0); 5883 - 5884 - tg3_netif_start(tp); 5835 + err = tg3_restart_hw(tp, 0); 5836 + if (!err) 5837 + tg3_netif_start(tp); 5885 5838 tg3_full_unlock(tp); 5886 5839 } else { 5887 5840 spin_lock_bh(&tp->lock); ··· 5889 5842 spin_unlock_bh(&tp->lock); 5890 5843 } 5891 5844 5892 - return 0; 5845 + return err; 5893 5846 } 5894 5847 5895 5848 /* tp->lock is held. */ ··· 5989 5942 * can only do this after the hardware has been 5990 5943 * successfully reset. 5991 5944 */ 5992 - tg3_init_rings(tp); 5945 + err = tg3_init_rings(tp); 5946 + if (err) 5947 + return err; 5993 5948 5994 5949 /* This value is determined during the probe time DMA 5995 5950 * engine test, tg3_test_dma. ··· 8005 7956 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) 8006 7957 { 8007 7958 struct tg3 *tp = netdev_priv(dev); 8008 - int irq_sync = 0; 7959 + int irq_sync = 0, err = 0; 8009 7960 8010 7961 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 8011 7962 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || ··· 8029 7980 8030 7981 if (netif_running(dev)) { 8031 7982 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8032 - tg3_init_hw(tp, 1); 8033 - tg3_netif_start(tp); 7983 + err = tg3_restart_hw(tp, 1); 7984 + if (!err) 7985 + tg3_netif_start(tp); 8034 7986 } 8035 7987 8036 7988 tg3_full_unlock(tp); 8037 7989 8038 - return 0; 7990 + return err; 8039 7991 } 8040 7992 8041 7993 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) ··· 8051 8001 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) 8052 8002 { 8053 8003 struct tg3 *tp = netdev_priv(dev); 8054 - int irq_sync = 0; 8004 + int irq_sync = 0, err = 0; 8055 8005 8056 8006 if (netif_running(dev)) { 8057 8007 tg3_netif_stop(tp); ··· 8075 8025 8076 8026 if (netif_running(dev)) { 8077 8027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8078 - tg3_init_hw(tp, 1); 8079 - tg3_netif_start(tp); 8028 + err = tg3_restart_hw(tp, 1); 8029 + if (!err) 8030 + tg3_netif_start(tp); 8080 8031 } 8081 8032 8082 8033 tg3_full_unlock(tp); 8083 8034 8084 - return 0; 8035 + return err; 8085 8036 } 8086 8037 8087 8038 static u32 tg3_get_rx_csum(struct net_device *dev) ··· 8717 8666 if (!netif_running(tp->dev)) 8718 8667 return TG3_LOOPBACK_FAILED; 8719 8668 8720 - tg3_reset_hw(tp, 1); 8669 + err = tg3_reset_hw(tp, 1); 8670 + if (err) 8671 + return TG3_LOOPBACK_FAILED; 8721 8672 8722 8673 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 8723 8674 err |= TG3_MAC_LOOPBACK_FAILED; ··· 8793 8740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 8794 8741 if (netif_running(dev)) { 8795 8742 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 8796 - tg3_init_hw(tp, 1); 8797 - tg3_netif_start(tp); 8743 + if (!tg3_restart_hw(tp, 1)) 8744 + tg3_netif_start(tp); 8798 8745 } 8799 8746 8800 8747 tg3_full_unlock(tp); ··· 11752 11699 tg3_full_lock(tp, 0); 11753 11700 11754 11701 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11755 - tg3_init_hw(tp, 1); 11702 + if (tg3_restart_hw(tp, 1)) 11703 + goto out; 11756 11704 11757 11705 tp->timer.expires = jiffies + tp->timer_offset; 11758 11706 add_timer(&tp->timer); ··· 11761 11707 netif_device_attach(dev); 11762 11708 tg3_netif_start(tp); 11763 11709 11710 + out: 11764 11711 tg3_full_unlock(tp); 11765 11712 } 11766 11713 ··· 11788 11733 tg3_full_lock(tp, 0); 11789 11734 11790 11735 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; 11791 - tg3_init_hw(tp, 1); 11736 + err = tg3_restart_hw(tp, 1); 11737 + if (err) 11738 + goto out; 11792 11739 11793 11740 tp->timer.expires = jiffies + tp->timer_offset; 11794 11741 add_timer(&tp->timer); 11795 11742 11796 11743 tg3_netif_start(tp); 11797 11744 11745 + out: 11798 11746 tg3_full_unlock(tp); 11799 11747 11800 - return 0; 11748 + return err; 11801 11749 } 11802 11750 11803 11751 static struct pci_driver tg3_driver = {
+2
include/linux/netfilter_bridge.h
··· 79 79 __u32 ipv4; 80 80 } daddr; 81 81 }; 82 + 83 + extern int brnf_deferred_hooks; 82 84 #endif /* CONFIG_BRIDGE_NETFILTER */ 83 85 84 86 #endif /* __KERNEL__ */
+5
net/bridge/br_netfilter.c
··· 61 61 #define brnf_filter_vlan_tagged 1 62 62 #endif 63 63 64 + int brnf_deferred_hooks; 65 + EXPORT_SYMBOL_GPL(brnf_deferred_hooks); 66 + 64 67 static __be16 inline vlan_proto(const struct sk_buff *skb) 65 68 { 66 69 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; ··· 892 889 if (ip->version == 4 && !brnf_call_iptables) 893 890 return NF_ACCEPT; 894 891 else if (ip->version == 6 && !brnf_call_ip6tables) 892 + return NF_ACCEPT; 893 + else if (!brnf_deferred_hooks) 895 894 return NF_ACCEPT; 896 895 #endif 897 896 if (hook == NF_IP_POST_ROUTING)
+1 -1
net/ipv4/ip_input.c
··· 429 429 } 430 430 431 431 /* Remove any debris in the socket control block */ 432 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 432 + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 433 433 434 434 return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, 435 435 ip_rcv_finish);
+13 -6
net/ipv4/ipmr.c
··· 1578 1578 cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); 1579 1579 1580 1580 if (cache==NULL) { 1581 + struct sk_buff *skb2; 1581 1582 struct net_device *dev; 1582 1583 int vif; 1583 1584 ··· 1592 1591 read_unlock(&mrt_lock); 1593 1592 return -ENODEV; 1594 1593 } 1595 - skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); 1596 - skb->nh.iph->ihl = sizeof(struct iphdr)>>2; 1597 - skb->nh.iph->saddr = rt->rt_src; 1598 - skb->nh.iph->daddr = rt->rt_dst; 1599 - skb->nh.iph->version = 0; 1600 - err = ipmr_cache_unresolved(vif, skb); 1594 + skb2 = skb_clone(skb, GFP_ATOMIC); 1595 + if (!skb2) { 1596 + read_unlock(&mrt_lock); 1597 + return -ENOMEM; 1598 + } 1599 + 1600 + skb2->nh.raw = skb_push(skb2, sizeof(struct iphdr)); 1601 + skb2->nh.iph->ihl = sizeof(struct iphdr)>>2; 1602 + skb2->nh.iph->saddr = rt->rt_src; 1603 + skb2->nh.iph->daddr = rt->rt_dst; 1604 + skb2->nh.iph->version = 0; 1605 + err = ipmr_cache_unresolved(vif, skb2); 1601 1606 read_unlock(&mrt_lock); 1602 1607 return err; 1603 1608 }
+1 -1
net/ipv4/netfilter/ip_conntrack_helper_h323.c
··· 1200 1200 tuple.dst.protonum = IPPROTO_TCP; 1201 1201 1202 1202 exp = __ip_conntrack_expect_find(&tuple); 1203 - if (exp->master == ct) 1203 + if (exp && exp->master == ct) 1204 1204 return exp; 1205 1205 return NULL; 1206 1206 }
+2 -2
net/ipv4/netfilter/ip_conntrack_standalone.c
··· 534 534 535 535 /* Sysctl support */ 536 536 537 + int ip_conntrack_checksum = 1; 538 + 537 539 #ifdef CONFIG_SYSCTL 538 540 539 541 /* From ip_conntrack_core.c */ ··· 569 567 /* Log invalid packets of a given protocol */ 570 568 static int log_invalid_proto_min = 0; 571 569 static int log_invalid_proto_max = 255; 572 - 573 - int ip_conntrack_checksum = 1; 574 570 575 571 static struct ctl_table_header *ip_ct_sysctl_header; 576 572
+2 -2
net/ipv4/netfilter/ip_nat_snmp_basic.c
··· 1255 1255 struct udphdr *udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl); 1256 1256 1257 1257 /* SNMP replies and originating SNMP traps get mangled */ 1258 - if (udph->source == ntohs(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1258 + if (udph->source == htons(SNMP_PORT) && dir != IP_CT_DIR_REPLY) 1259 1259 return NF_ACCEPT; 1260 - if (udph->dest == ntohs(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1260 + if (udph->dest == htons(SNMP_TRAP_PORT) && dir != IP_CT_DIR_ORIGINAL) 1261 1261 return NF_ACCEPT; 1262 1262 1263 1263 /* No NAT? */
+1
net/ipv4/raw.c
··· 609 609 if (sin) { 610 610 sin->sin_family = AF_INET; 611 611 sin->sin_addr.s_addr = skb->nh.iph->saddr; 612 + sin->sin_port = 0; 612 613 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 613 614 } 614 615 if (inet->cmsg_flags)
+2
net/ipv6/ip6_input.c
··· 71 71 goto out; 72 72 } 73 73 74 + memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 75 + 74 76 /* 75 77 * Store incoming device index. When the packet will 76 78 * be queued, we cannot refer to skb->dev anymore.
+1
net/ipv6/raw.c
··· 411 411 /* Copy the address. */ 412 412 if (sin6) { 413 413 sin6->sin6_family = AF_INET6; 414 + sin6->sin6_port = 0; 414 415 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 415 416 sin6->sin6_flowinfo = 0; 416 417 sin6->sin6_scope_id = 0;
+2 -2
net/netfilter/Kconfig
··· 386 386 <file:Documentation/modules.txt>. If unsure, say `N'. 387 387 388 388 config NETFILTER_XT_MATCH_SCTP 389 - tristate '"sctp" protocol match support' 390 - depends on NETFILTER_XTABLES 389 + tristate '"sctp" protocol match support (EXPERIMENTAL)' 390 + depends on NETFILTER_XTABLES && EXPERIMENTAL 391 391 help 392 392 With this option enabled, you will be able to use the 393 393 `sctp' match in order to match on SCTP source/destination ports
+2 -2
net/netfilter/nf_conntrack_standalone.c
··· 428 428 429 429 /* Sysctl support */ 430 430 431 + int nf_conntrack_checksum = 1; 432 + 431 433 #ifdef CONFIG_SYSCTL 432 434 433 435 /* From nf_conntrack_core.c */ ··· 460 458 /* Log invalid packets of a given protocol */ 461 459 static int log_invalid_proto_min = 0; 462 460 static int log_invalid_proto_max = 255; 463 - 464 - int nf_conntrack_checksum = 1; 465 461 466 462 static struct ctl_table_header *nf_ct_sysctl_header; 467 463
+4 -5
net/netfilter/nf_queue.c
··· 219 219 220 220 switch (verdict & NF_VERDICT_MASK) { 221 221 case NF_ACCEPT: 222 + case NF_STOP: 222 223 info->okfn(skb); 224 + case NF_STOLEN: 223 225 break; 224 - 225 226 case NF_QUEUE: 226 227 if (!nf_queue(&skb, elem, info->pf, info->hook, 227 228 info->indev, info->outdev, info->okfn, 228 229 verdict >> NF_VERDICT_BITS)) 229 230 goto next_hook; 230 231 break; 232 + default: 233 + kfree_skb(skb); 231 234 } 232 235 rcu_read_unlock(); 233 - 234 - if (verdict == NF_DROP) 235 - kfree_skb(skb); 236 - 237 236 kfree(info); 238 237 return; 239 238 }
+15
net/netfilter/xt_physdev.c
··· 113 113 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) || 114 114 info->bitmask & ~XT_PHYSDEV_OP_MASK) 115 115 return 0; 116 + if (brnf_deferred_hooks == 0 && 117 + info->bitmask & XT_PHYSDEV_OP_OUT && 118 + (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 119 + info->invert & XT_PHYSDEV_OP_BRIDGED) && 120 + hook_mask & ((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) | 121 + (1 << NF_IP_POST_ROUTING))) { 122 + printk(KERN_WARNING "physdev match: using --physdev-out in the " 123 + "OUTPUT, FORWARD and POSTROUTING chains for non-bridged " 124 + "traffic is deprecated and breaks other things, it will " 125 + "be removed in January 2007. See Documentation/" 126 + "feature-removal-schedule.txt for details. This doesn't " 127 + "affect you in case you're using it for purely bridged " 128 + "traffic.\n"); 129 + brnf_deferred_hooks = 1; 130 + } 116 131 return 1; 117 132 } 118 133
+11 -1
net/netfilter/xt_pkttype.c
··· 9 9 #include <linux/skbuff.h> 10 10 #include <linux/if_ether.h> 11 11 #include <linux/if_packet.h> 12 + #include <linux/in.h> 13 + #include <linux/ip.h> 12 14 13 15 #include <linux/netfilter/xt_pkttype.h> 14 16 #include <linux/netfilter/x_tables.h> ··· 30 28 unsigned int protoff, 31 29 int *hotdrop) 32 30 { 31 + u_int8_t type; 33 32 const struct xt_pkttype_info *info = matchinfo; 34 33 35 - return (skb->pkt_type == info->pkttype) ^ info->invert; 34 + if (skb->pkt_type == PACKET_LOOPBACK) 35 + type = (MULTICAST(skb->nh.iph->daddr) 36 + ? PACKET_MULTICAST 37 + : PACKET_BROADCAST); 38 + else 39 + type = skb->pkt_type; 40 + 41 + return (type == info->pkttype) ^ info->invert; 36 42 } 37 43 38 44 static struct xt_match pkttype_match = {