Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
drivers/net/gianfar.c

+76 -12
+1
drivers/atm/solos-pci.c
··· 685 685 out_release_regions: 686 686 pci_release_regions(dev); 687 687 out: 688 + kfree(card); 688 689 return err; 689 690 } 690 691
+2
drivers/isdn/hardware/mISDN/hfcmulti.c
··· 4599 4599 printk(KERN_ERR "%s: no memory for coeffs\n", 4600 4600 __func__); 4601 4601 ret = -ENOMEM; 4602 + kfree(bch); 4602 4603 goto free_chan; 4603 4604 } 4604 4605 bch->nr = ch; ··· 4768 4767 printk(KERN_ERR "%s: no memory for coeffs\n", 4769 4768 __func__); 4770 4769 ret = -ENOMEM; 4770 + kfree(bch); 4771 4771 goto free_chan; 4772 4772 } 4773 4773 bch->nr = ch + 1;
+6
drivers/net/gianfar.c
··· 1668 1668 if (napi_schedule_prep(&priv->napi)) { 1669 1669 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1670 1670 __napi_schedule(&priv->napi); 1671 + } else { 1672 + /* 1673 + * Clear IEVENT, so interrupts aren't called again 1674 + * because of the packets that have already arrived. 1675 + */ 1676 + gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1671 1677 } 1672 1678 1673 1679 spin_unlock(&priv->rxlock);
+1 -1
drivers/net/netxen/netxen_nic.h
··· 1203 1203 #define NETXEN_IS_MSI_FAMILY(adapter) \ 1204 1204 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1205 1205 1206 - #define MSIX_ENTRIES_PER_ADAPTER 8 1206 + #define MSIX_ENTRIES_PER_ADAPTER 1 1207 1207 #define NETXEN_MSIX_TBL_SPACE 8192 1208 1208 #define NETXEN_PCI_REG_MSIX_TBL 0x44 1209 1209
+12 -1
drivers/net/netxen/netxen_nic_main.c
··· 76 76 #endif 77 77 static irqreturn_t netxen_intr(int irq, void *data); 78 78 static irqreturn_t netxen_msi_intr(int irq, void *data); 79 + static irqreturn_t netxen_msix_intr(int irq, void *data); 79 80 80 81 /* PCI Device ID Table */ 81 82 #define ENTRY(device) \ ··· 1085 1084 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1086 1085 netxen_post_rx_buffers(adapter, ctx, ring); 1087 1086 } 1088 - if (NETXEN_IS_MSI_FAMILY(adapter)) 1087 + if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) 1088 + handler = netxen_msix_intr; 1089 + else if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 1089 1090 handler = netxen_msi_intr; 1090 1091 else { 1091 1092 flags |= IRQF_SHARED; ··· 1610 1607 /* clear interrupt */ 1611 1608 adapter->pci_write_immediate(adapter, 1612 1609 msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); 1610 + 1611 + napi_schedule(&adapter->napi); 1612 + return IRQ_HANDLED; 1613 + } 1614 + 1615 + static irqreturn_t netxen_msix_intr(int irq, void *data) 1616 + { 1617 + struct netxen_adapter *adapter = data; 1613 1618 1614 1619 napi_schedule(&adapter->napi); 1615 1620 return IRQ_HANDLED;
+2 -1
drivers/net/tulip/de2104x.c
··· 464 464 drop = 1; 465 465 466 466 rx_next: 467 - de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); 468 467 if (rx_tail == (DE_RX_RING_SIZE - 1)) 469 468 de->rx_ring[rx_tail].opts2 = 470 469 cpu_to_le32(RingEnd | de->rx_buf_sz); 471 470 else 472 471 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); 473 472 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); 473 + wmb(); 474 + de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); 474 475 rx_tail = NEXT_RX(rx_tail); 475 476 } 476 477
+8 -2
drivers/net/tun.c
··· 260 260 261 261 nexact = n; 262 262 263 - /* The rest is hashed */ 263 + /* Remaining multicast addresses are hashed, 264 + * unicast will leave the filter disabled. */ 264 265 memset(filter->mask, 0, sizeof(filter->mask)); 265 - for (; n < uf.count; n++) 266 + for (; n < uf.count; n++) { 267 + if (!is_multicast_ether_addr(addr[n].u)) { 268 + err = 0; /* no filter */ 269 + goto done; 270 + } 266 271 addr_hash_set(filter->mask, addr[n].u); 272 + } 267 273 268 274 /* For ALLMULTI just set the mask to all ones. 269 275 * This overrides the mask populated above. */
+6 -1
net/bridge/br_forward.c
··· 67 67 { 68 68 struct net_device *indev; 69 69 70 + if (skb_warn_if_lro(skb)) { 71 + kfree_skb(skb); 72 + return; 73 + } 74 + 70 75 indev = skb->dev; 71 76 skb->dev = to->dev; 72 77 skb_forward_csum(skb); ··· 94 89 /* called with rcu_read_lock */ 95 90 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 96 91 { 97 - if (!skb_warn_if_lro(skb) && should_deliver(to, skb)) { 92 + if (should_deliver(to, skb)) { 98 93 __br_forward(to, skb); 99 94 return; 100 95 }
+1 -1
net/ipv6/ip6_tunnel.c
··· 249 249 } 250 250 251 251 t = netdev_priv(dev); 252 - ip6_tnl_dev_init(dev); 253 252 t->parms = *p; 253 + ip6_tnl_dev_init(dev); 254 254 255 255 if ((err = register_netdevice(dev)) < 0) 256 256 goto failed_free;
+23 -2
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
··· 49 49 static const u_int8_t invmap[] = { 50 50 [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1, 51 51 [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1, 52 - [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1, 53 - [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1 52 + [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_REPLY + 1, 53 + [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_QUERY +1 54 + }; 55 + 56 + static const u_int8_t noct_valid_new[] = { 57 + [ICMPV6_MGM_QUERY - 130] = 1, 58 + [ICMPV6_MGM_REPORT -130] = 1, 59 + [ICMPV6_MGM_REDUCTION - 130] = 1, 60 + [NDISC_ROUTER_SOLICITATION - 130] = 1, 61 + [NDISC_ROUTER_ADVERTISEMENT - 130] = 1, 62 + [NDISC_NEIGHBOUR_SOLICITATION - 130] = 1, 63 + [NDISC_NEIGHBOUR_ADVERTISEMENT - 130] = 1, 64 + [ICMPV6_MLD2_REPORT - 130] = 1 54 65 }; 55 66 56 67 static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple, ··· 189 178 { 190 179 const struct icmp6hdr *icmp6h; 191 180 struct icmp6hdr _ih; 181 + int type; 192 182 193 183 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 194 184 if (icmp6h == NULL) { ··· 204 192 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 205 193 "nf_ct_icmpv6: ICMPv6 checksum failed\n"); 206 194 return -NF_ACCEPT; 195 + } 196 + 197 + type = icmp6h->icmp6_type - 130; 198 + if (type >= 0 && type < sizeof(noct_valid_new) && 199 + noct_valid_new[type]) { 200 + skb->nfct = &nf_conntrack_untracked.ct_general; 201 + skb->nfctinfo = IP_CT_NEW; 202 + nf_conntrack_get(skb->nfct); 203 + return NF_ACCEPT; 207 204 } 208 205 209 206 /* is not error message ? */
+13 -2
net/netfilter/nf_conntrack_netlink.c
··· 434 434 } else 435 435 return NOTIFY_DONE; 436 436 437 - if (!nfnetlink_has_listeners(group)) 437 + if (!item->report && !nfnetlink_has_listeners(group)) 438 438 return NOTIFY_DONE; 439 439 440 440 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC); ··· 1215 1215 } 1216 1216 } 1217 1217 1218 + #ifdef CONFIG_NF_NAT_NEEDED 1219 + if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) { 1220 + err = ctnetlink_change_nat_seq_adj(ct, cda); 1221 + if (err < 0) { 1222 + rcu_read_unlock(); 1223 + goto err; 1224 + } 1225 + } 1226 + #endif 1227 + 1218 1228 if (cda[CTA_PROTOINFO]) { 1219 1229 err = ctnetlink_change_protoinfo(ct, cda); 1220 1230 if (err < 0) { ··· 1502 1492 } else 1503 1493 return NOTIFY_DONE; 1504 1494 1505 - if (!nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1495 + if (!item->report && 1496 + !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW)) 1506 1497 return NOTIFY_DONE; 1507 1498 1508 1499 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+1 -1
net/netfilter/xt_sctp.c
··· 105 105 106 106 switch (chunk_match_type) { 107 107 case SCTP_CHUNK_MATCH_ALL: 108 - return SCTP_CHUNKMAP_IS_CLEAR(info->chunkmap); 108 + return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy); 109 109 case SCTP_CHUNK_MATCH_ANY: 110 110 return false; 111 111 case SCTP_CHUNK_MATCH_ONLY: