Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[NET]: Allow skb headroom to be overridden
[TCP]: Kill unused extern decl for tcp_v4_hash_connecting()
[NET]: add SO_RCVBUF comment
[NET]: Deinline some larger functions from netdevice.h
[DCCP]: Use NULL for pointers, comfort sparse.
[DECNET]: Fix refcount

+117 -63
+4 -1
include/asm-powerpc/system.h
··· 365 365 * powers of 2 writes until it reaches sufficient alignment). 366 366 * 367 367 * Based on this we disable the IP header alignment in network drivers. 368 + * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining 369 + * cacheline alignment of buffers. 368 370 */ 369 - #define NET_IP_ALIGN 0 371 + #define NET_IP_ALIGN 0 372 + #define NET_SKB_PAD L1_CACHE_BYTES 370 373 #endif 371 374 372 375 #define arch_align_stack(x) (x)
+5 -50
include/linux/netdevice.h
··· 598 598 599 599 #define HAVE_NETIF_QUEUE 600 600 601 - static inline void __netif_schedule(struct net_device *dev) 602 - { 603 - if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 604 - unsigned long flags; 605 - struct softnet_data *sd; 606 - 607 - local_irq_save(flags); 608 - sd = &__get_cpu_var(softnet_data); 609 - dev->next_sched = sd->output_queue; 610 - sd->output_queue = dev; 611 - raise_softirq_irqoff(NET_TX_SOFTIRQ); 612 - local_irq_restore(flags); 613 - } 614 - } 601 + extern void __netif_schedule(struct net_device *dev); 615 602 616 603 static inline void netif_schedule(struct net_device *dev) 617 604 { ··· 662 675 /* Use this variant in places where it could be invoked 663 676 * either from interrupt or non-interrupt context. 664 677 */ 665 - static inline void dev_kfree_skb_any(struct sk_buff *skb) 666 - { 667 - if (in_irq() || irqs_disabled()) 668 - dev_kfree_skb_irq(skb); 669 - else 670 - dev_kfree_skb(skb); 671 - } 678 + extern void dev_kfree_skb_any(struct sk_buff *skb); 672 679 673 680 #define HAVE_NETIF_RX 1 674 681 extern int netif_rx(struct sk_buff *skb); ··· 749 768 return test_bit(__LINK_STATE_PRESENT, &dev->state); 750 769 } 751 770 752 - static inline void netif_device_detach(struct net_device *dev) 753 - { 754 - if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 755 - netif_running(dev)) { 756 - netif_stop_queue(dev); 757 - } 758 - } 771 + extern void netif_device_detach(struct net_device *dev); 759 772 760 - static inline void netif_device_attach(struct net_device *dev) 761 - { 762 - if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 763 - netif_running(dev)) { 764 - netif_wake_queue(dev); 765 - __netdev_watchdog_up(dev); 766 - } 767 - } 773 + extern void netif_device_attach(struct net_device *dev); 768 774 769 775 /* 770 776 * Network interface message level settings ··· 819 851 * already been called and returned 1. 820 852 */ 821 853 822 - static inline void __netif_rx_schedule(struct net_device *dev) 823 - { 824 - unsigned long flags; 825 - 826 - local_irq_save(flags); 827 - dev_hold(dev); 828 - list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); 829 - if (dev->quota < 0) 830 - dev->quota += dev->weight; 831 - else 832 - dev->quota = dev->weight; 833 - __raise_softirq_irqoff(NET_RX_SOFTIRQ); 834 - local_irq_restore(flags); 835 - } 854 + extern void __netif_rx_schedule(struct net_device *dev); 836 855 837 856 /* Try to reschedule poll. Called by irq handler. */ 838 857
+25 -4
include/linux/skbuff.h
··· 941 941 #define NET_IP_ALIGN 2 942 942 #endif 943 943 944 + /* 945 + * The networking layer reserves some headroom in skb data (via 946 + * dev_alloc_skb). This is used to avoid having to reallocate skb data when 947 + * the header has to grow. In the default case, if the header has to grow 948 + * 16 bytes or less we avoid the reallocation. 949 + * 950 + * Unfortunately this headroom changes the DMA alignment of the resulting 951 + * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 952 + * on some architectures. An architecture can override this value, 953 + * perhaps setting it to a cacheline in size (since that will maintain 954 + * cacheline alignment of the DMA). It must be a power of 2. 955 + * 956 + * Various parts of the networking layer expect at least 16 bytes of 957 + * headroom, you should not reduce this. 958 + */ 959 + #ifndef NET_SKB_PAD 960 + #define NET_SKB_PAD 16 961 + #endif 962 + 944 963 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc); 945 964 946 965 static inline void __skb_trim(struct sk_buff *skb, unsigned int len) ··· 1049 1030 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1050 1031 gfp_t gfp_mask) 1051 1032 { 1052 - struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1033 + struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1053 1034 if (likely(skb)) 1054 - skb_reserve(skb, 16); 1035 + skb_reserve(skb, NET_SKB_PAD); 1055 1036 return skb; 1056 1037 } 1057 1038 #else ··· 1089 1070 */ 1090 1071 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1091 1072 { 1092 - int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb); 1073 + int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - 1074 + skb_headroom(skb); 1093 1075 1094 1076 if (delta < 0) 1095 1077 delta = 0; 1096 1078 1097 1079 if (delta || skb_cloned(skb)) 1098 - return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC); 1080 + return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & 1081 + ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); 1099 1082 return 0; 1100 1083 } 1101 1084
-3
include/net/tcp.h
··· 405 405 406 406 extern void tcp_unhash(struct sock *sk); 407 407 408 - extern int tcp_v4_hash_connecting(struct sock *sk); 409 - 410 - 411 408 /* From syncookies.c */ 412 409 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 413 410 struct ip_options *opt);
+64
net/core/dev.c
··· 1080 1080 rcu_read_unlock(); 1081 1081 } 1082 1082 1083 + 1084 + void __netif_schedule(struct net_device *dev) 1085 + { 1086 + if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { 1087 + unsigned long flags; 1088 + struct softnet_data *sd; 1089 + 1090 + local_irq_save(flags); 1091 + sd = &__get_cpu_var(softnet_data); 1092 + dev->next_sched = sd->output_queue; 1093 + sd->output_queue = dev; 1094 + raise_softirq_irqoff(NET_TX_SOFTIRQ); 1095 + local_irq_restore(flags); 1096 + } 1097 + } 1098 + EXPORT_SYMBOL(__netif_schedule); 1099 + 1100 + void __netif_rx_schedule(struct net_device *dev) 1101 + { 1102 + unsigned long flags; 1103 + 1104 + local_irq_save(flags); 1105 + dev_hold(dev); 1106 + list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); 1107 + if (dev->quota < 0) 1108 + dev->quota += dev->weight; 1109 + else 1110 + dev->quota = dev->weight; 1111 + __raise_softirq_irqoff(NET_RX_SOFTIRQ); 1112 + local_irq_restore(flags); 1113 + } 1114 + EXPORT_SYMBOL(__netif_rx_schedule); 1115 + 1116 + void dev_kfree_skb_any(struct sk_buff *skb) 1117 + { 1118 + if (in_irq() || irqs_disabled()) 1119 + dev_kfree_skb_irq(skb); 1120 + else 1121 + dev_kfree_skb(skb); 1122 + } 1123 + EXPORT_SYMBOL(dev_kfree_skb_any); 1124 + 1125 + 1126 + /* Hot-plugging. */ 1127 + void netif_device_detach(struct net_device *dev) 1128 + { 1129 + if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && 1130 + netif_running(dev)) { 1131 + netif_stop_queue(dev); 1132 + } 1133 + } 1134 + EXPORT_SYMBOL(netif_device_detach); 1135 + 1136 + void netif_device_attach(struct net_device *dev) 1137 + { 1138 + if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && 1139 + netif_running(dev)) { 1140 + netif_wake_queue(dev); 1141 + __netdev_watchdog_up(dev); 1142 + } 1143 + } 1144 + EXPORT_SYMBOL(netif_device_attach); 1145 + 1146 + 1083 1147 /* 1084 1148 * Invalidate hardware checksum when packet is to be mangled, and 1085 1149 * complete checksum manually on outgoing path.
+15 -1
net/core/sock.c
··· 385 385 val = sysctl_rmem_max; 386 386 set_rcvbuf: 387 387 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 388 - /* FIXME: is this lower bound the right one? */ 388 + /* 389 + * We double it on the way in to account for 390 + * "struct sk_buff" etc. overhead. Applications 391 + * assume that the SO_RCVBUF setting they make will 392 + * allow that much actual data to be received on that 393 + * socket. 394 + * 395 + * Applications are unaware that "struct sk_buff" and 396 + * other overheads allocate from the receive buffer 397 + * during socket buffer allocation. 398 + * 399 + * And after considering the possible alternatives, 400 + * returning the value we actually used in getsockopt 401 + * is the most desirable behavior. 402 + */ 389 403 if ((val * 2) < SOCK_MIN_RCVBUF) 390 404 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 391 405 else
+3 -3
net/dccp/feat.c
··· 204 204 if (rc) { 205 205 kfree(opt->dccpop_sc->dccpoc_val); 206 206 kfree(opt->dccpop_sc); 207 - opt->dccpop_sc = 0; 207 + opt->dccpop_sc = NULL; 208 208 return rc; 209 209 } 210 210 ··· 322 322 opt->dccpop_type = type == DCCPO_CHANGE_L ? DCCPO_CONFIRM_R : 323 323 DCCPO_CONFIRM_L; 324 324 opt->dccpop_feat = feature; 325 - opt->dccpop_val = 0; 325 + opt->dccpop_val = NULL; 326 326 opt->dccpop_len = 0; 327 327 328 328 /* change feature */ ··· 523 523 * once... 524 524 */ 525 525 /* the master socket no longer needs to worry about confirms */ 526 - opt->dccpop_sc = 0; /* it's not a memleak---new socket has it */ 526 + opt->dccpop_sc = NULL; /* it's not a memleak---new socket has it */ 527 527 528 528 /* reset state for a new socket */ 529 529 opt->dccpop_conf = 0;
+1 -1
net/decnet/dn_dev.c
··· 620 620 } 621 621 write_unlock(&dndev_lock); 622 622 if (old) 623 - dev_put(dev); 623 + dev_put(old); 624 624 return rv; 625 625 } 626 626