Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[BNX2]: Add PHY workaround for 5709 A1.
[PPP] L2TP: Fix skb handling in pppol2tp_xmit
[PPP] L2TP: Fix skb handling in pppol2tp_recv_core
[PPP] L2TP: Disallow non-UDP datagram sockets
[PPP] pppoe: Fix double-free on skb after transmit failure
[PKT_SCHED]: Fix 'SFQ qdisc crashes with limit of 2 packets'
[NETFILTER]: MAINTAINERS update
[NETFILTER]: nfnetlink_log: fix sending of multipart messages

+73 -84
+3 -3
MAINTAINERS
··· 2622 P: Jozsef Kadlecsik 2623 P: Patrick McHardy 2624 M: kaber@trash.net 2625 - L: netfilter-devel@lists.netfilter.org 2626 - L: netfilter@lists.netfilter.org (subscribers-only) 2627 L: coreteam@netfilter.org 2628 W: http://www.netfilter.org/ 2629 W: http://www.iptables.org/ ··· 2676 P: Hideaki YOSHIFUJI 2677 M: yoshfuji@linux-ipv6.org 2678 P: Patrick McHardy 2679 - M: kaber@coreworks.de 2680 L: netdev@vger.kernel.org 2681 T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git 2682 S: Maintained
··· 2622 P: Jozsef Kadlecsik 2623 P: Patrick McHardy 2624 M: kaber@trash.net 2625 + L: netfilter-devel@vger.kernel.org 2626 + L: netfilter@vger.kernel.org 2627 L: coreteam@netfilter.org 2628 W: http://www.netfilter.org/ 2629 W: http://www.iptables.org/ ··· 2676 P: Hideaki YOSHIFUJI 2677 M: yoshfuji@linux-ipv6.org 2678 P: Patrick McHardy 2679 + M: kaber@trash.net 2680 L: netdev@vger.kernel.org 2681 T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git 2682 S: Maintained
+4 -3
drivers/net/bnx2.c
··· 54 55 #define DRV_MODULE_NAME "bnx2" 56 #define PFX DRV_MODULE_NAME ": " 57 - #define DRV_MODULE_VERSION "1.6.4" 58 - #define DRV_MODULE_RELDATE "August 3, 2007" 59 60 #define RUN_AT(x) (jiffies + (x)) 61 ··· 6727 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 6728 CHIP_NUM(bp) == CHIP_NUM_5708) 6729 bp->phy_flags |= PHY_CRC_FIX_FLAG; 6730 - else if (CHIP_ID(bp) == CHIP_ID_5709_A0) 6731 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; 6732 6733 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
··· 54 55 #define DRV_MODULE_NAME "bnx2" 56 #define PFX DRV_MODULE_NAME ": " 57 + #define DRV_MODULE_VERSION "1.6.5" 58 + #define DRV_MODULE_RELDATE "September 20, 2007" 59 60 #define RUN_AT(x) (jiffies + (x)) 61 ··· 6727 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 6728 CHIP_NUM(bp) == CHIP_NUM_5708) 6729 bp->phy_flags |= PHY_CRC_FIX_FLAG; 6730 + else if (CHIP_ID(bp) == CHIP_ID_5709_A0 || 6731 + CHIP_ID(bp) == CHIP_ID_5709_A1) 6732 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG; 6733 6734 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
+1 -2
drivers/net/pppoe.c
··· 879 dev->hard_header(skb, dev, ETH_P_PPP_SES, 880 po->pppoe_pa.remote, NULL, data_len); 881 882 - if (dev_queue_xmit(skb) < 0) 883 - goto abort; 884 885 return 1; 886
··· 879 dev->hard_header(skb, dev, ETH_P_PPP_SES, 880 po->pppoe_pa.remote, NULL, data_len); 881 882 + dev_queue_xmit(skb); 883 884 return 1; 885
+53 -65
drivers/net/pppol2tp.c
··· 491 u16 hdrflags; 492 u16 tunnel_id, session_id; 493 int length; 494 - struct udphdr *uh; 495 496 tunnel = pppol2tp_sock_to_tunnel(sock); 497 if (tunnel == NULL) 498 goto error; 499 500 /* Short packet? */ 501 - if (skb->len < sizeof(struct udphdr)) { 502 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, 503 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); 504 goto error; 505 } 506 507 /* Point to L2TP header */ 508 - ptr = skb->data + sizeof(struct udphdr); 509 510 /* Get L2TP header flags */ 511 hdrflags = ntohs(*(__be16*)ptr); 512 513 /* Trace packet contents, if enabled */ 514 if (tunnel->debug & PPPOL2TP_MSG_DATA) { 515 printk(KERN_DEBUG "%s: recv: ", tunnel->name); 516 517 - for (length = 0; length < 16; length++) 518 - printk(" %02X", ptr[length]); 519 printk("\n"); 520 } 521 522 /* Get length of L2TP packet */ 523 - uh = (struct udphdr *) skb_transport_header(skb); 524 - length = ntohs(uh->len) - sizeof(struct udphdr); 525 - 526 - /* Too short? */ 527 - if (length < 12) { 528 - PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, 529 - "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length); 530 - goto error; 531 - } 532 533 /* If type is control packet, it is handled by userspace. */ 534 if (hdrflags & L2TP_HDRFLAG_T) { ··· 608 "%s: recv data has no seq numbers when required. " 609 "Discarding\n", session->name); 610 session->stats.rx_seq_discards++; 611 - session->stats.rx_errors++; 612 goto discard; 613 } 614 ··· 626 "%s: recv data has no seq numbers when required. " 627 "Discarding\n", session->name); 628 session->stats.rx_seq_discards++; 629 - session->stats.rx_errors++; 630 goto discard; 631 } 632 ··· 634 } 635 636 /* If offset bit set, skip it. */ 637 - if (hdrflags & L2TP_HDRFLAG_O) 638 - ptr += 2 + ntohs(*(__be16 *) ptr); 639 640 - skb_pull(skb, ptr - skb->data); 641 642 /* Skip PPP header, if present. In testing, Microsoft L2TP clients 643 * don't send the PPP header (PPP header compression enabled), but ··· 677 */ 678 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { 679 session->stats.rx_seq_discards++; 680 - session->stats.rx_errors++; 681 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, 682 "%s: oos pkt %hu len %d discarded, " 683 "waiting for %hu, reorder_q_len=%d\n", ··· 701 return 0; 702 703 discard: 704 kfree_skb(skb); 705 sock_put(session->sock); 706 ··· 962 int data_len = skb->len; 963 struct inet_sock *inet; 964 __wsum csum = 0; 965 - struct sk_buff *skb2 = NULL; 966 struct udphdr *uh; 967 unsigned int len; 968 ··· 992 */ 993 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 994 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 995 - if (skb_headroom(skb) < headroom) { 996 - skb2 = skb_realloc_headroom(skb, headroom); 997 - if (skb2 == NULL) 998 - goto abort; 999 - } else 1000 - skb2 = skb; 1001 - 1002 - /* Check that the socket has room */ 1003 - if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf) 1004 - skb_set_owner_w(skb2, sk_tun); 1005 - else 1006 - goto discard; 1007 1008 /* Setup PPP header */ 1009 - skb_push(skb2, sizeof(ppph)); 1010 - skb2->data[0] = ppph[0]; 1011 - skb2->data[1] = ppph[1]; 1012 1013 /* Setup L2TP header */ 1014 - skb_push(skb2, hdr_len); 1015 - pppol2tp_build_l2tp_header(session, skb2->data); 1016 1017 /* Setup UDP header */ 1018 inet = inet_sk(sk_tun); 1019 - skb_push(skb2, sizeof(struct udphdr)); 1020 - skb_reset_transport_header(skb2); 1021 - uh = (struct udphdr *) skb2->data; 1022 uh->source = inet->sport; 1023 uh->dest = inet->dport; 1024 uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); 1025 uh->check = 0; 1026 1027 - /* Calculate UDP checksum if configured to do so */ 1028 if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) 1029 - csum = udp_csum_outgoing(sk_tun, skb2); 1030 1031 /* Debug */ 1032 if (session->send_seq) ··· 1028 1029 if (session->debug & PPPOL2TP_MSG_DATA) { 1030 int i; 1031 - unsigned char *datap = skb2->data; 1032 1033 printk(KERN_DEBUG "%s: xmit:", session->name); 1034 for (i = 0; i < data_len; i++) { ··· 1041 printk("\n"); 1042 } 1043 1044 - memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt)); 1045 - IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1046 - IPSKB_REROUTED); 1047 - nf_reset(skb2); 1048 1049 /* Get routing info from the tunnel socket */ 1050 - dst_release(skb2->dst); 1051 - skb2->dst = sk_dst_get(sk_tun); 1052 1053 /* Queue the packet to IP for output */ 1054 - len = skb2->len; 1055 - rc = ip_queue_xmit(skb2, 1); 1056 1057 /* Update stats */ 1058 if (rc >= 0) { ··· 1065 session->stats.tx_errors++; 1066 } 1067 1068 - /* Free the original skb */ 1069 - kfree_skb(skb); 1070 - 1071 return 1; 1072 1073 - discard: 1074 - /* Free the new skb. Caller will free original skb. */ 1075 - if (skb2 != skb) 1076 - kfree_skb(skb2); 1077 abort: 1078 - return 0; 1079 } 1080 1081 /***************************************************************************** ··· 1313 goto err; 1314 } 1315 1316 /* Quick sanity checks */ 1317 - err = -ESOCKTNOSUPPORT; 1318 - if (sock->type != SOCK_DGRAM) { 1319 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, 1320 - "tunl %hu: fd %d wrong type, got %d, expected %d\n", 1321 - tunnel_id, fd, sock->type, SOCK_DGRAM); 1322 goto err; 1323 } 1324 err = -EAFNOSUPPORT; ··· 1332 } 1333 1334 err = -ENOTCONN; 1335 - sk = sock->sk; 1336 1337 /* Check if this socket has already been prepped */ 1338 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
··· 491 u16 hdrflags; 492 u16 tunnel_id, session_id; 493 int length; 494 + int offset; 495 496 tunnel = pppol2tp_sock_to_tunnel(sock); 497 if (tunnel == NULL) 498 goto error; 499 500 + /* UDP always verifies the packet length. */ 501 + __skb_pull(skb, sizeof(struct udphdr)); 502 + 503 /* Short packet? */ 504 + if (!pskb_may_pull(skb, 12)) { 505 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO, 506 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); 507 goto error; 508 } 509 510 /* Point to L2TP header */ 511 + ptr = skb->data; 512 513 /* Get L2TP header flags */ 514 hdrflags = ntohs(*(__be16*)ptr); 515 516 /* Trace packet contents, if enabled */ 517 if (tunnel->debug & PPPOL2TP_MSG_DATA) { 518 + length = min(16u, skb->len); 519 + if (!pskb_may_pull(skb, length)) 520 + goto error; 521 + 522 printk(KERN_DEBUG "%s: recv: ", tunnel->name); 523 524 + offset = 0; 525 + do { 526 + printk(" %02X", ptr[offset]); 527 + } while (++offset < length); 528 + 529 printk("\n"); 530 } 531 532 /* Get length of L2TP packet */ 533 + length = skb->len; 534 535 /* If type is control packet, it is handled by userspace. */ 536 if (hdrflags & L2TP_HDRFLAG_T) { ··· 606 "%s: recv data has no seq numbers when required. " 607 "Discarding\n", session->name); 608 session->stats.rx_seq_discards++; 609 goto discard; 610 } 611 ··· 625 "%s: recv data has no seq numbers when required. " 626 "Discarding\n", session->name); 627 session->stats.rx_seq_discards++; 628 goto discard; 629 } 630 ··· 634 } 635 636 /* If offset bit set, skip it. */ 637 + if (hdrflags & L2TP_HDRFLAG_O) { 638 + offset = ntohs(*(__be16 *)ptr); 639 + skb->transport_header += 2 + offset; 640 + if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2)) 641 + goto discard; 642 + } 643 644 + __skb_pull(skb, skb_transport_offset(skb)); 645 646 /* Skip PPP header, if present. In testing, Microsoft L2TP clients 647 * don't send the PPP header (PPP header compression enabled), but ··· 673 */ 674 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) { 675 session->stats.rx_seq_discards++; 676 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, 677 "%s: oos pkt %hu len %d discarded, " 678 "waiting for %hu, reorder_q_len=%d\n", ··· 698 return 0; 699 700 discard: 701 + session->stats.rx_errors++; 702 kfree_skb(skb); 703 sock_put(session->sock); 704 ··· 958 int data_len = skb->len; 959 struct inet_sock *inet; 960 __wsum csum = 0; 961 struct udphdr *uh; 962 unsigned int len; 963 ··· 989 */ 990 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 991 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 992 + if (skb_cow_head(skb, headroom)) 993 + goto abort; 994 995 /* Setup PPP header */ 996 + __skb_push(skb, sizeof(ppph)); 997 + skb->data[0] = ppph[0]; 998 + skb->data[1] = ppph[1]; 999 1000 /* Setup L2TP header */ 1001 + pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len)); 1002 1003 /* Setup UDP header */ 1004 inet = inet_sk(sk_tun); 1005 + __skb_push(skb, sizeof(*uh)); 1006 + skb_reset_transport_header(skb); 1007 + uh = udp_hdr(skb); 1008 uh->source = inet->sport; 1009 uh->dest = inet->dport; 1010 uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); 1011 uh->check = 0; 1012 1013 + /* *BROKEN* Calculate UDP checksum if configured to do so */ 1014 if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT) 1015 + csum = udp_csum_outgoing(sk_tun, skb); 1016 1017 /* Debug */ 1018 if (session->send_seq) ··· 1036 1037 if (session->debug & PPPOL2TP_MSG_DATA) { 1038 int i; 1039 + unsigned char *datap = skb->data; 1040 1041 printk(KERN_DEBUG "%s: xmit:", session->name); 1042 for (i = 0; i < data_len; i++) { ··· 1049 printk("\n"); 1050 } 1051 1052 + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1053 + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1054 + IPSKB_REROUTED); 1055 + nf_reset(skb); 1056 1057 /* Get routing info from the tunnel socket */ 1058 + dst_release(skb->dst); 1059 + skb->dst = sk_dst_get(sk_tun); 1060 1061 /* Queue the packet to IP for output */ 1062 + len = skb->len; 1063 + rc = ip_queue_xmit(skb, 1); 1064 1065 /* Update stats */ 1066 if (rc >= 0) { ··· 1073 session->stats.tx_errors++; 1074 } 1075 1076 return 1; 1077 1078 abort: 1079 + /* Free the original skb */ 1080 + kfree_skb(skb); 1081 + return 1; 1082 } 1083 1084 /***************************************************************************** ··· 1326 goto err; 1327 } 1328 1329 + sk = sock->sk; 1330 + 1331 /* Quick sanity checks */ 1332 + err = -EPROTONOSUPPORT; 1333 + if (sk->sk_protocol != IPPROTO_UDP) { 1334 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR, 1335 + "tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1336 + tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1337 goto err; 1338 } 1339 err = -EAFNOSUPPORT; ··· 1343 } 1344 1345 err = -ENOTCONN; 1346 1347 /* Check if this socket has already been prepped */ 1348 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
+5 -5
net/sched/sch_sfq.c
··· 270 q->tail = x; 271 } 272 } 273 - if (++sch->q.qlen < q->limit-1) { 274 sch->bstats.bytes += skb->len; 275 sch->bstats.packets++; 276 return 0; ··· 306 q->tail = x; 307 } 308 } 309 - if (++sch->q.qlen < q->limit - 1) { 310 sch->qstats.requeues++; 311 return 0; 312 } ··· 391 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 392 q->perturb_period = ctl->perturb_period*HZ; 393 if (ctl->limit) 394 - q->limit = min_t(u32, ctl->limit, SFQ_DEPTH); 395 396 qlen = sch->q.qlen; 397 - while (sch->q.qlen >= q->limit-1) 398 sfq_drop(sch); 399 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); 400 ··· 423 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 424 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 425 } 426 - q->limit = SFQ_DEPTH; 427 q->max_depth = 0; 428 q->tail = SFQ_DEPTH; 429 if (opt == NULL) {
··· 270 q->tail = x; 271 } 272 } 273 + if (++sch->q.qlen <= q->limit) { 274 sch->bstats.bytes += skb->len; 275 sch->bstats.packets++; 276 return 0; ··· 306 q->tail = x; 307 } 308 } 309 + if (++sch->q.qlen <= q->limit) { 310 sch->qstats.requeues++; 311 return 0; 312 } ··· 391 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 392 q->perturb_period = ctl->perturb_period*HZ; 393 if (ctl->limit) 394 + q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 2); 395 396 qlen = sch->q.qlen; 397 + while (sch->q.qlen > q->limit) 398 sfq_drop(sch); 399 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); 400 ··· 423 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 424 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 425 } 426 + q->limit = SFQ_DEPTH - 2; 427 q->max_depth = 0; 428 q->tail = SFQ_DEPTH; 429 if (opt == NULL) {