Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

+28 -12
+1 -1
net/ipv4/esp4.c
··· 478 478 { 479 479 struct xfrm_decap_state decap; 480 480 481 - if (sizeof(struct esp_decap_data) < 481 + if (sizeof(struct esp_decap_data) > 482 482 sizeof(decap.decap_data)) { 483 483 extern void decap_data_too_small(void); 484 484
+10
net/ipv4/netfilter/ip_queue.c
··· 3 3 * communicating with userspace via netlink. 4 4 * 5 5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au> 6 + * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> 6 7 * 7 8 * This program is free software; you can redistribute it and/or modify 8 9 * it under the terms of the GNU General Public License version 2 as ··· 18 17 * 2005-01-10: Added /proc counter for dropped packets; fixed so 19 18 * packets aren't delivered to user space if they're going 20 19 * to be dropped. 20 + * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) 21 21 * 22 22 */ 23 23 #include <linux/module.h> ··· 73 71 static void 74 72 ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 75 73 { 74 + /* TCP input path (and probably other bits) assume to be called 75 + * from softirq context, not from syscall, like ipq_issue_verdict is 76 + * called. TCP input path deadlocks with locks taken from timer 77 + * softirq, e.g. We therefore emulate this by local_bh_disable() */ 78 + 79 + local_bh_disable(); 76 80 nf_reinject(entry->skb, entry->info, verdict); 81 + local_bh_enable(); 82 + 77 83 kfree(entry); 78 84 } 79 85
+6 -6
net/ipv4/udp.c
··· 738 738 unsigned long amount; 739 739 740 740 amount = 0; 741 - spin_lock_irq(&sk->sk_receive_queue.lock); 741 + spin_lock_bh(&sk->sk_receive_queue.lock); 742 742 skb = skb_peek(&sk->sk_receive_queue); 743 743 if (skb != NULL) { 744 744 /* ··· 748 748 */ 749 749 amount = skb->len - sizeof(struct udphdr); 750 750 } 751 - spin_unlock_irq(&sk->sk_receive_queue.lock); 751 + spin_unlock_bh(&sk->sk_receive_queue.lock); 752 752 return put_user(amount, (int __user *)arg); 753 753 } 754 754 ··· 848 848 /* Clear queue. */ 849 849 if (flags&MSG_PEEK) { 850 850 int clear = 0; 851 - spin_lock_irq(&sk->sk_receive_queue.lock); 851 + spin_lock_bh(&sk->sk_receive_queue.lock); 852 852 if (skb == skb_peek(&sk->sk_receive_queue)) { 853 853 __skb_unlink(skb, &sk->sk_receive_queue); 854 854 clear = 1; 855 855 } 856 - spin_unlock_irq(&sk->sk_receive_queue.lock); 856 + spin_unlock_bh(&sk->sk_receive_queue.lock); 857 857 if (clear) 858 858 kfree_skb(skb); 859 859 } ··· 1334 1334 struct sk_buff_head *rcvq = &sk->sk_receive_queue; 1335 1335 struct sk_buff *skb; 1336 1336 1337 - spin_lock_irq(&rcvq->lock); 1337 + spin_lock_bh(&rcvq->lock); 1338 1338 while ((skb = skb_peek(rcvq)) != NULL) { 1339 1339 if (udp_checksum_complete(skb)) { 1340 1340 UDP_INC_STATS_BH(UDP_MIB_INERRORS); ··· 1345 1345 break; 1346 1346 } 1347 1347 } 1348 - spin_unlock_irq(&rcvq->lock); 1348 + spin_unlock_bh(&rcvq->lock); 1349 1349 1350 1350 /* nothing to see, move along */ 1351 1351 if (skb == NULL)
+11 -5
net/sched/sch_dsmark.c
··· 18 18 #include <asm/byteorder.h> 19 19 20 20 21 - #if 1 /* control */ 21 + #if 0 /* control */ 22 22 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 23 23 #else 24 24 #define DPRINTK(format,args...) ··· 73 73 74 74 DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new, 75 75 old); 76 - if (!new) 77 - new = &noop_qdisc; 76 + 77 + if (new == NULL) { 78 + new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); 79 + if (new == NULL) 80 + new = &noop_qdisc; 81 + } 82 + 78 83 sch_tree_lock(sch); 79 84 *old = xchg(&p->q,new); 80 85 if (*old) ··· 168 163 return; 169 164 for (i = 0; i < p->indices; i++) { 170 165 if (p->mask[i] == 0xff && !p->value[i]) 171 - continue; 166 + goto ignore; 172 167 if (walker->count >= walker->skip) { 173 168 if (walker->fn(sch, i+1, walker) < 0) { 174 169 walker->stop = 1; 175 170 break; 176 171 } 177 172 } 178 - walker->count++; 173 + ignore: 174 + walker->count++; 179 175 } 180 176 } 181 177