Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits)
[NET]: Fix more per-cpu typos
[SECURITY]: Fix build with CONFIG_SECURITY disabled.
[I/OAT]: Remove CPU hotplug lock from net_dma_rebalance
[DECNET]: Fix for routing bug
[AF_UNIX]: Kernel memory leak fix for af_unix datagram getpeersec patch
[NET]: skb_queue_lock_key() is no longer used.
[NET]: Remove lockdep_set_class() call from skb_queue_head_init().
[IPV6]: SNMPv2 "ipv6IfStatsOutFragCreates" counter error
[IPV6]: SNMPv2 "ipv6IfStatsInHdrErrors" counter error
[NET]: Kill the WARN_ON() calls for checksum fixups.
[NETFILTER]: xt_hashlimit/xt_string: missing string validation
[NETFILTER]: SIP helper: expect RTP streams in both directions
[E1000]: Convert to netdev_alloc_skb
[TG3]: Convert to netdev_alloc_skb
[NET]: Add netdev_alloc_skb().
[TCP]: Process linger2 timeout consistently.
[SECURITY] secmark: nul-terminate secdata
[NET] infiniband: Cleanup ib_addr module to use the netevents
[NET]: Core net changes to generate netevents
[NET]: Network Event Notifier Mechanism.
...

+632 -187
+14 -16
drivers/infiniband/core/addr.c
··· 35 #include <net/arp.h> 36 #include <net/neighbour.h> 37 #include <net/route.h> 38 #include <rdma/ib_addr.h> 39 40 MODULE_AUTHOR("Sean Hefty"); ··· 327 } 328 EXPORT_SYMBOL(rdma_addr_cancel); 329 330 - static int addr_arp_recv(struct sk_buff *skb, struct net_device *dev, 331 - struct packet_type *pkt, struct net_device *orig_dev) 332 { 333 - struct arphdr *arp_hdr; 334 335 - arp_hdr = (struct arphdr *) skb->nh.raw; 336 - 337 - if (arp_hdr->ar_op == htons(ARPOP_REQUEST) || 338 - arp_hdr->ar_op == htons(ARPOP_REPLY)) 339 - set_timeout(jiffies); 340 - 341 - kfree_skb(skb); 342 return 0; 343 } 344 345 - static struct packet_type addr_arp = { 346 - .type = __constant_htons(ETH_P_ARP), 347 - .func = addr_arp_recv, 348 - .af_packet_priv = (void*) 1, 349 }; 350 351 static int addr_init(void) ··· 351 if (!addr_wq) 352 return -ENOMEM; 353 354 - dev_add_pack(&addr_arp); 355 return 0; 356 } 357 358 static void addr_cleanup(void) 359 { 360 - dev_remove_pack(&addr_arp); 361 destroy_workqueue(addr_wq); 362 } 363
··· 35 #include <net/arp.h> 36 #include <net/neighbour.h> 37 #include <net/route.h> 38 + #include <net/netevent.h> 39 #include <rdma/ib_addr.h> 40 41 MODULE_AUTHOR("Sean Hefty"); ··· 326 } 327 EXPORT_SYMBOL(rdma_addr_cancel); 328 329 + static int netevent_callback(struct notifier_block *self, unsigned long event, 330 + void *ctx) 331 { 332 + if (event == NETEVENT_NEIGH_UPDATE) { 333 + struct neighbour *neigh = ctx; 334 335 + if (neigh->dev->type == ARPHRD_INFINIBAND && 336 + (neigh->nud_state & NUD_VALID)) { 337 + set_timeout(jiffies); 338 + } 339 + } 340 return 0; 341 } 342 343 + static struct notifier_block nb = { 344 + .notifier_call = netevent_callback 345 }; 346 347 static int addr_init(void) ··· 353 if (!addr_wq) 354 return -ENOMEM; 355 356 + register_netevent_notifier(&nb); 357 return 0; 358 } 359 360 static void addr_cleanup(void) 361 { 362 + unregister_netevent_notifier(&nb); 363 destroy_workqueue(addr_wq); 364 } 365
+1 -1
drivers/net/appletalk/Kconfig
··· 29 even politically correct people are allowed to say Y here. 30 31 config DEV_APPLETALK 32 - bool "Appletalk interfaces support" 33 depends on ATALK 34 help 35 AppleTalk is the protocol that Apple computers can use to communicate
··· 29 even politically correct people are allowed to say Y here. 30 31 config DEV_APPLETALK 32 + tristate "Appletalk interfaces support" 33 depends on ATALK 34 help 35 AppleTalk is the protocol that Apple computers can use to communicate
+6 -5
drivers/net/e1000/e1000_main.c
··· 3127 break; 3128 } 3129 3130 - /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3131 * means we reserve 2 more, this pushes us to allocate from the next 3132 * larger slab size 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ ··· 3708 #define E1000_CB_LENGTH 256 3709 if (length < E1000_CB_LENGTH) { 3710 struct sk_buff *new_skb = 3711 - dev_alloc_skb(length + NET_IP_ALIGN); 3712 if (new_skb) { 3713 skb_reserve(new_skb, NET_IP_ALIGN); 3714 new_skb->dev = netdev; ··· 3979 3980 while (cleaned_count--) { 3981 if (!(skb = buffer_info->skb)) 3982 - skb = dev_alloc_skb(bufsz); 3983 else { 3984 skb_trim(skb, 0); 3985 goto map_skb; ··· 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3998 "at %p\n", bufsz, skb->data); 3999 /* Try again, without freeing the previous */ 4000 - skb = dev_alloc_skb(bufsz); 4001 /* Failed allocation, critical failure */ 4002 if (!skb) { 4003 dev_kfree_skb(oldskb); ··· 4121 rx_desc->read.buffer_addr[j+1] = ~0; 4122 } 4123 4124 - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4125 4126 if (unlikely(!skb)) { 4127 adapter->alloc_rx_buff_failed++;
··· 3127 break; 3128 } 3129 3130 + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3131 * means we reserve 2 more, this pushes us to allocate from the next 3132 * larger slab size 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ ··· 3708 #define E1000_CB_LENGTH 256 3709 if (length < E1000_CB_LENGTH) { 3710 struct sk_buff *new_skb = 3711 + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3712 if (new_skb) { 3713 skb_reserve(new_skb, NET_IP_ALIGN); 3714 new_skb->dev = netdev; ··· 3979 3980 while (cleaned_count--) { 3981 if (!(skb = buffer_info->skb)) 3982 + skb = netdev_alloc_skb(netdev, bufsz); 3983 else { 3984 skb_trim(skb, 0); 3985 goto map_skb; ··· 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3998 "at %p\n", bufsz, skb->data); 3999 /* Try again, without freeing the previous */ 4000 + skb = netdev_alloc_skb(netdev, bufsz); 4001 /* Failed allocation, critical failure */ 4002 if (!skb) { 4003 dev_kfree_skb(oldskb); ··· 4121 rx_desc->read.buffer_addr[j+1] = ~0; 4122 } 4123 4124 + skb = netdev_alloc_skb(netdev, 4125 + adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4126 4127 if (unlikely(!skb)) { 4128 adapter->alloc_rx_buff_failed++;
+5 -5
drivers/net/tg3.c
··· 68 69 #define DRV_MODULE_NAME "tg3" 70 #define PFX DRV_MODULE_NAME ": " 71 - #define DRV_MODULE_VERSION "3.63" 72 - #define DRV_MODULE_RELDATE "July 25, 2006" 73 74 #define TG3_DEF_MAC_MODE 0 75 #define TG3_DEF_RX_MODE 0 ··· 3097 * Callers depend upon this behavior and assume that 3098 * we leave everything unchanged if we fail. 3099 */ 3100 - skb = dev_alloc_skb(skb_size); 3101 if (skb == NULL) 3102 return -ENOMEM; 3103 ··· 3270 tg3_recycle_rx(tp, opaque_key, 3271 desc_idx, *post_ptr); 3272 3273 - copy_skb = dev_alloc_skb(len + 2); 3274 if (copy_skb == NULL) 3275 goto drop_it_no_recycle; 3276 ··· 8618 err = -EIO; 8619 8620 tx_len = 1514; 8621 - skb = dev_alloc_skb(tx_len); 8622 if (!skb) 8623 return -ENOMEM; 8624
··· 68 69 #define DRV_MODULE_NAME "tg3" 70 #define PFX DRV_MODULE_NAME ": " 71 + #define DRV_MODULE_VERSION "3.64" 72 + #define DRV_MODULE_RELDATE "July 31, 2006" 73 74 #define TG3_DEF_MAC_MODE 0 75 #define TG3_DEF_RX_MODE 0 ··· 3097 * Callers depend upon this behavior and assume that 3098 * we leave everything unchanged if we fail. 3099 */ 3100 + skb = netdev_alloc_skb(tp->dev, skb_size); 3101 if (skb == NULL) 3102 return -ENOMEM; 3103 ··· 3270 tg3_recycle_rx(tp, opaque_key, 3271 desc_idx, *post_ptr); 3272 3273 + copy_skb = netdev_alloc_skb(tp->dev, len + 2); 3274 if (copy_skb == NULL) 3275 goto drop_it_no_recycle; 3276 ··· 8618 err = -EIO; 8619 8620 tx_len = 1514; 8621 + skb = netdev_alloc_skb(tp->dev, tx_len); 8622 if (!skb) 8623 return -ENOMEM; 8624
-1
include/linux/netfilter_bridge.h
··· 6 7 #include <linux/netfilter.h> 8 #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER) 9 - #include <asm/atomic.h> 10 #include <linux/if_ether.h> 11 #endif 12
··· 6 7 #include <linux/netfilter.h> 8 #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER) 9 #include <linux/if_ether.h> 10 #endif 11
+34 -6
include/linux/security.h
··· 1109 * @name contains the name of the security module being unstacked. 1110 * @ops contains a pointer to the struct security_operations of the module to unstack. 1111 * 1112 * This is the main security structure. 1113 */ 1114 struct security_operations { ··· 1299 1300 int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1301 int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1302 1303 #ifdef CONFIG_SECURITY_NETWORK 1304 int (*unix_stream_connect) (struct socket * sock, ··· 1329 int (*socket_shutdown) (struct socket * sock, int how); 1330 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); 1331 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); 1332 - int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen); 1333 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1334 void (*sk_free_security) (struct sock *sk); 1335 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); ··· 2071 return security_ops->netlink_recv(skb, cap); 2072 } 2073 2074 /* prototypes */ 2075 extern int security_init (void); 2076 extern int register_security (struct security_operations *ops); ··· 2747 { 2748 } 2749 2750 #endif /* CONFIG_SECURITY */ 2751 2752 #ifdef CONFIG_SECURITY_NETWORK ··· 2870 return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); 2871 } 2872 2873 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 2874 - u32 *seclen) 2875 { 2876 - return security_ops->socket_getpeersec_dgram(skb, secdata, seclen); 2877 } 2878 2879 static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) ··· 2997 return -ENOPROTOOPT; 2998 } 2999 3000 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 3001 - u32 *seclen) 3002 { 3003 return -ENOPROTOOPT; 3004 }
··· 1109 * @name contains the name of the security module being unstacked. 1110 * @ops contains a pointer to the struct security_operations of the module to unstack. 1111 * 1112 + * @secid_to_secctx: 1113 + * Convert secid to security context. 1114 + * @secid contains the security ID. 1115 + * @secdata contains the pointer that stores the converted security context. 1116 + * 1117 + * @release_secctx: 1118 + * Release the security context. 1119 + * @secdata contains the security context. 1120 + * @seclen contains the length of the security context. 1121 + * 1122 * This is the main security structure. 1123 */ 1124 struct security_operations { ··· 1289 1290 int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1291 int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1292 + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); 1293 + void (*release_secctx)(char *secdata, u32 seclen); 1294 1295 #ifdef CONFIG_SECURITY_NETWORK 1296 int (*unix_stream_connect) (struct socket * sock, ··· 1317 int (*socket_shutdown) (struct socket * sock, int how); 1318 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); 1319 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); 1320 + int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); 1321 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1322 void (*sk_free_security) (struct sock *sk); 1323 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); ··· 2059 return security_ops->netlink_recv(skb, cap); 2060 } 2061 2062 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2063 + { 2064 + return security_ops->secid_to_secctx(secid, secdata, seclen); 2065 + } 2066 + 2067 + static inline void security_release_secctx(char *secdata, u32 seclen) 2068 + { 2069 + return security_ops->release_secctx(secdata, seclen); 2070 + } 2071 + 2072 /* prototypes */ 2073 extern int security_init (void); 2074 extern int register_security (struct security_operations *ops); ··· 2725 { 2726 } 2727 2728 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2729 + { 2730 + return -EOPNOTSUPP; 2731 + } 2732 + 2733 + static inline void security_release_secctx(char *secdata, u32 seclen) 2734 + { 2735 + } 2736 #endif /* CONFIG_SECURITY */ 2737 2738 #ifdef CONFIG_SECURITY_NETWORK ··· 2840 return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); 2841 } 2842 2843 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 2844 { 2845 + return security_ops->socket_getpeersec_dgram(sock, skb, secid); 2846 } 2847 2848 static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) ··· 2968 return -ENOPROTOOPT; 2969 } 2970 2971 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 2972 { 2973 return -ENOPROTOOPT; 2974 }
+30 -3
include/linux/skbuff.h
··· 604 return list_->qlen; 605 } 606 607 - extern struct lock_class_key skb_queue_lock_key; 608 - 609 static inline void skb_queue_head_init(struct sk_buff_head *list) 610 { 611 spin_lock_init(&list->lock); 612 - lockdep_set_class(&list->lock, &skb_queue_lock_key); 613 list->prev = list->next = (struct sk_buff *)list; 614 list->qlen = 0; 615 } ··· 1107 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1108 { 1109 return __dev_alloc_skb(length, GFP_ATOMIC); 1110 } 1111 1112 /**
··· 604 return list_->qlen; 605 } 606 607 + /* 608 + * This function creates a split out lock class for each invocation; 609 + * this is needed for now since a whole lot of users of the skb-queue 610 + * infrastructure in drivers have different locking usage (in hardirq) 611 + * than the networking core (in softirq only). In the long run either the 612 + * network layer or drivers should need annotation to consolidate the 613 + * main types of usage into 3 classes. 614 + */ 615 static inline void skb_queue_head_init(struct sk_buff_head *list) 616 { 617 spin_lock_init(&list->lock); 618 list->prev = list->next = (struct sk_buff *)list; 619 list->qlen = 0; 620 } ··· 1102 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1103 { 1104 return __dev_alloc_skb(length, GFP_ATOMIC); 1105 + } 1106 + 1107 + extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1108 + unsigned int length, gfp_t gfp_mask); 1109 + 1110 + /** 1111 + * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1112 + * @dev: network device to receive on 1113 + * @length: length to allocate 1114 + * 1115 + * Allocate a new &sk_buff and assign it a usage count of one. The 1116 + * buffer has unspecified headroom built in. Users should allocate 1117 + * the headroom they think they need without accounting for the 1118 + * built in space. The built in space is used for optimisations. 1119 + * 1120 + * %NULL is returned if there is no free memory. Although this function 1121 + * allocates memory it can be called from an interrupt. 1122 + */ 1123 + static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1124 + unsigned int length) 1125 + { 1126 + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1127 } 1128 1129 /**
+2 -4
include/net/af_unix.h
··· 54 struct ucred creds; /* Skb credentials */ 55 struct scm_fp_list *fp; /* Passed files */ 56 #ifdef CONFIG_SECURITY_NETWORK 57 - char *secdata; /* Security context */ 58 - u32 seclen; /* Security length */ 59 #endif 60 }; 61 62 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 63 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 64 - #define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata) 65 - #define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen) 66 67 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 68 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
··· 54 struct ucred creds; /* Skb credentials */ 55 struct scm_fp_list *fp; /* Passed files */ 56 #ifdef CONFIG_SECURITY_NETWORK 57 + u32 secid; /* Security ID */ 58 #endif 59 }; 60 61 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 62 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 63 + #define UNIXSID(skb) (&UNIXCB((skb)).secid) 64 65 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 66 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
+9 -3
include/net/ip6_route.h
··· 139 /* 140 * Store a destination cache entry in a socket 141 */ 142 - static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 - struct in6_addr *daddr) 144 { 145 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct rt6_info *rt = (struct rt6_info *) dst; 147 148 - write_lock(&sk->sk_dst_lock); 149 sk_setup_caps(sk, dst); 150 np->daddr_cache = daddr; 151 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 152 write_unlock(&sk->sk_dst_lock); 153 } 154
··· 139 /* 140 * Store a destination cache entry in a socket 141 */ 142 + static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 + struct in6_addr *daddr) 144 { 145 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct rt6_info *rt = (struct rt6_info *) dst; 147 148 sk_setup_caps(sk, dst); 149 np->daddr_cache = daddr; 150 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 151 + } 152 + 153 + static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 154 + struct in6_addr *daddr) 155 + { 156 + write_lock(&sk->sk_dst_lock); 157 + __ip6_dst_store(sk, dst, daddr); 158 write_unlock(&sk->sk_dst_lock); 159 } 160
+3
include/net/ipv6.h
··· 468 extern int ip6_dst_lookup(struct sock *sk, 469 struct dst_entry **dst, 470 struct flowi *fl); 471 472 /* 473 * skb processing functions
··· 468 extern int ip6_dst_lookup(struct sock *sk, 469 struct dst_entry **dst, 470 struct flowi *fl); 471 + extern int ip6_sk_dst_lookup(struct sock *sk, 472 + struct dst_entry **dst, 473 + struct flowi *fl); 474 475 /* 476 * skb processing functions
+1 -1
include/net/netdma.h
··· 29 { 30 struct dma_chan *chan; 31 rcu_read_lock(); 32 - chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); 33 if (chan) 34 dma_chan_get(chan); 35 rcu_read_unlock();
··· 29 { 30 struct dma_chan *chan; 31 rcu_read_lock(); 32 + chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); 33 if (chan) 34 dma_chan_get(chan); 35 rcu_read_unlock();
+33
include/net/netevent.h
···
··· 1 + #ifndef _NET_EVENT_H 2 + #define _NET_EVENT_H 3 + 4 + /* 5 + * Generic netevent notifiers 6 + * 7 + * Authors: 8 + * Tom Tucker <tom@opengridcomputing.com> 9 + * Steve Wise <swise@opengridcomputing.com> 10 + * 11 + * Changes: 12 + */ 13 + #ifdef __KERNEL__ 14 + 15 + #include <net/dst.h> 16 + 17 + struct netevent_redirect { 18 + struct dst_entry *old; 19 + struct dst_entry *new; 20 + }; 21 + 22 + enum netevent_notif_type { 23 + NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ 24 + NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */ 25 + NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ 26 + }; 27 + 28 + extern int register_netevent_notifier(struct notifier_block *nb); 29 + extern int unregister_netevent_notifier(struct notifier_block *nb); 30 + extern int call_netevent_notifiers(unsigned long val, void *v); 31 + 32 + #endif 33 + #endif
+25 -4
include/net/scm.h
··· 3 4 #include <linux/limits.h> 5 #include <linux/net.h> 6 7 /* Well, we should have at least one descriptor open 8 * to accept passed FDs 8) ··· 21 struct ucred creds; /* Skb credentials */ 22 struct scm_fp_list *fp; /* Passed files */ 23 #ifdef CONFIG_SECURITY_NETWORK 24 - char *secdata; /* Security context */ 25 - u32 seclen; /* Security length */ 26 #endif 27 unsigned long seq; /* Connection seqno */ 28 }; ··· 31 extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); 32 extern void __scm_destroy(struct scm_cookie *scm); 33 extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); 34 35 static __inline__ void scm_destroy(struct scm_cookie *scm) 36 { ··· 57 scm->creds.pid = p->tgid; 58 scm->fp = NULL; 59 scm->seq = 0; 60 if (msg->msg_controllen <= 0) 61 return 0; 62 return __scm_send(sock, msg, scm); ··· 66 #ifdef CONFIG_SECURITY_NETWORK 67 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 68 { 69 - if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL) 70 - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata); 71 } 72 #else 73 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
··· 3 4 #include <linux/limits.h> 5 #include <linux/net.h> 6 + #include <linux/security.h> 7 8 /* Well, we should have at least one descriptor open 9 * to accept passed FDs 8) ··· 20 struct ucred creds; /* Skb credentials */ 21 struct scm_fp_list *fp; /* Passed files */ 22 #ifdef CONFIG_SECURITY_NETWORK 23 + u32 secid; /* Passed security ID */ 24 #endif 25 unsigned long seq; /* Connection seqno */ 26 }; ··· 31 extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); 32 extern void __scm_destroy(struct scm_cookie *scm); 33 extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); 34 + 35 + #ifdef CONFIG_SECURITY_NETWORK 36 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 37 + { 38 + security_socket_getpeersec_dgram(sock, NULL, &scm->secid); 39 + } 40 + #else 41 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 42 + { } 43 + #endif /* CONFIG_SECURITY_NETWORK */ 44 45 static __inline__ void scm_destroy(struct scm_cookie *scm) 46 { ··· 47 scm->creds.pid = p->tgid; 48 scm->fp = NULL; 49 scm->seq = 0; 50 + unix_get_peersec_dgram(sock, scm); 51 if (msg->msg_controllen <= 0) 52 return 0; 53 return __scm_send(sock, msg, scm); ··· 55 #ifdef CONFIG_SECURITY_NETWORK 56 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 57 { 58 + char *secdata; 59 + u32 seclen; 60 + int err; 61 + 62 + if (test_bit(SOCK_PASSSEC, &sock->flags)) { 63 + err = security_secid_to_secctx(scm->secid, &secdata, &seclen); 64 + 65 + if (!err) { 66 + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); 67 + security_release_secctx(secdata, seclen); 68 + } 69 + } 70 } 71 #else 72 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+3
include/net/tcp.h
··· 914 915 static inline void tcp_done(struct sock *sk) 916 { 917 tcp_set_state(sk, TCP_CLOSE); 918 tcp_clear_xmit_timers(sk); 919
··· 914 915 static inline void tcp_done(struct sock *sk) 916 { 917 + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 918 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 919 + 920 tcp_set_state(sk, TCP_CLOSE); 921 tcp_clear_xmit_timers(sk); 922
+1 -1
net/core/Makefile
··· 7 8 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 9 10 - obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 13 obj-$(CONFIG_XFRM) += flow.o
··· 7 8 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 9 10 + obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 13 obj-$(CONFIG_XFRM) += flow.o
+2 -17
net/core/dev.c
··· 1166 goto out_set_summed; 1167 1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1169 - static int warned; 1170 - 1171 - WARN_ON(!warned); 1172 - warned = 1; 1173 - 1174 /* Let GSO fix up the checksum. */ 1175 goto out_set_summed; 1176 } ··· 1215 __skb_pull(skb, skb->mac_len); 1216 1217 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1218 - static int warned; 1219 - 1220 - WARN_ON(!warned); 1221 - warned = 1; 1222 - 1223 if (skb_header_cloned(skb) && 1224 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1225 return ERR_PTR(err); ··· 3419 unsigned int cpu, i, n; 3420 struct dma_chan *chan; 3421 3422 - lock_cpu_hotplug(); 3423 - 3424 if (net_dma_count == 0) { 3425 for_each_online_cpu(cpu) 3426 - rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3427 - unlock_cpu_hotplug(); 3428 return; 3429 } 3430 ··· 3434 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3435 3436 while(n) { 3437 - per_cpu(softnet_data.net_dma, cpu) = chan; 3438 cpu = next_cpu(cpu, cpu_online_map); 3439 n--; 3440 } 3441 i++; 3442 } 3443 rcu_read_unlock(); 3444 - 3445 - unlock_cpu_hotplug(); 3446 } 3447 3448 /**
··· 1166 goto out_set_summed; 1167 1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1169 /* Let GSO fix up the checksum. */ 1170 goto out_set_summed; 1171 } ··· 1220 __skb_pull(skb, skb->mac_len); 1221 1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1223 if (skb_header_cloned(skb) && 1224 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1225 return ERR_PTR(err); ··· 3429 unsigned int cpu, i, n; 3430 struct dma_chan *chan; 3431 3432 if (net_dma_count == 0) { 3433 for_each_online_cpu(cpu) 3434 + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3435 return; 3436 } 3437 ··· 3447 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3448 3449 while(n) { 3450 + per_cpu(softnet_data, cpu).net_dma = chan; 3451 cpu = next_cpu(cpu, cpu_online_map); 3452 n--; 3453 } 3454 i++; 3455 } 3456 rcu_read_unlock(); 3457 } 3458 3459 /**
+8 -6
net/core/neighbour.c
··· 29 #include <net/neighbour.h> 30 #include <net/dst.h> 31 #include <net/sock.h> 32 #include <linux/rtnetlink.h> 33 #include <linux/random.h> 34 #include <linux/string.h> ··· 755 neigh->nud_state = NUD_STALE; 756 neigh->updated = jiffies; 757 neigh_suspect(neigh); 758 } 759 } else if (state & NUD_DELAY) { 760 if (time_before_eq(now, ··· 764 neigh->nud_state = NUD_REACHABLE; 765 neigh->updated = jiffies; 766 neigh_connect(neigh); 767 next = neigh->confirmed + neigh->parms->reachable_time; 768 } else { 769 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); ··· 822 out: 823 write_unlock(&neigh->lock); 824 } 825 826 #ifdef CONFIG_ARPD 827 if (notify && neigh->parms->app_probes) ··· 931 { 932 u8 old; 933 int err; 934 - #ifdef CONFIG_ARPD 935 int notify = 0; 936 - #endif 937 struct net_device *dev; 938 int update_isrouter = 0; 939 ··· 951 neigh_suspect(neigh); 952 neigh->nud_state = new; 953 err = 0; 954 - #ifdef CONFIG_ARPD 955 notify = old & NUD_VALID; 956 - #endif 957 goto out; 958 } 959 ··· 1023 if (!(new & NUD_CONNECTED)) 1024 neigh->confirmed = jiffies - 1025 (neigh->parms->base_reachable_time << 1); 1026 - #ifdef CONFIG_ARPD 1027 notify = 1; 1028 - #endif 1029 } 1030 if (new == old) 1031 goto out; ··· 1055 (neigh->flags & ~NTF_ROUTER); 1056 } 1057 write_unlock_bh(&neigh->lock); 1058 #ifdef CONFIG_ARPD 1059 if (notify && neigh->parms->app_probes) 1060 neigh_app_notify(neigh);
··· 29 #include <net/neighbour.h> 30 #include <net/dst.h> 31 #include <net/sock.h> 32 + #include <net/netevent.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/random.h> 35 #include <linux/string.h> ··· 754 neigh->nud_state = NUD_STALE; 755 neigh->updated = jiffies; 756 neigh_suspect(neigh); 757 + notify = 1; 758 } 759 } else if (state & NUD_DELAY) { 760 if (time_before_eq(now, ··· 762 neigh->nud_state = NUD_REACHABLE; 763 neigh->updated = jiffies; 764 neigh_connect(neigh); 765 + notify = 1; 766 next = neigh->confirmed + neigh->parms->reachable_time; 767 } else { 768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); ··· 819 out: 820 write_unlock(&neigh->lock); 821 } 822 + if (notify) 823 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 824 825 #ifdef CONFIG_ARPD 826 if (notify && neigh->parms->app_probes) ··· 926 { 927 u8 old; 928 int err; 929 int notify = 0; 930 struct net_device *dev; 931 int update_isrouter = 0; 932 ··· 948 neigh_suspect(neigh); 949 neigh->nud_state = new; 950 err = 0; 951 notify = old & NUD_VALID; 952 goto out; 953 } 954 ··· 1022 if (!(new & NUD_CONNECTED)) 1023 neigh->confirmed = jiffies - 1024 (neigh->parms->base_reachable_time << 1); 1025 notify = 1; 1026 } 1027 if (new == old) 1028 goto out; ··· 1056 (neigh->flags & ~NTF_ROUTER); 1057 } 1058 write_unlock_bh(&neigh->lock); 1059 + 1060 + if (notify) 1061 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 1062 #ifdef CONFIG_ARPD 1063 if (notify && neigh->parms->app_probes) 1064 neigh_app_notify(neigh);
+69
net/core/netevent.c
···
··· 1 + /* 2 + * Network event notifiers 3 + * 4 + * Authors: 5 + * Tom Tucker <tom@opengridcomputing.com> 6 + * Steve Wise <swise@opengridcomputing.com> 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the License, or (at your option) any later version. 12 + * 13 + * Fixes: 14 + */ 15 + 16 + #include <linux/rtnetlink.h> 17 + #include <linux/notifier.h> 18 + 19 + static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); 20 + 21 + /** 22 + * register_netevent_notifier - register a netevent notifier block 23 + * @nb: notifier 24 + * 25 + * Register a notifier to be called when a netevent occurs. 26 + * The notifier passed is linked into the kernel structures and must 27 + * not be reused until it has been unregistered. A negative errno code 28 + * is returned on a failure. 29 + */ 30 + int register_netevent_notifier(struct notifier_block *nb) 31 + { 32 + int err; 33 + 34 + err = atomic_notifier_chain_register(&netevent_notif_chain, nb); 35 + return err; 36 + } 37 + 38 + /** 39 + * netevent_unregister_notifier - unregister a netevent notifier block 40 + * @nb: notifier 41 + * 42 + * Unregister a notifier previously registered by 43 + * register_neigh_notifier(). The notifier is unlinked into the 44 + * kernel structures and may then be reused. A negative errno code 45 + * is returned on a failure. 46 + */ 47 + 48 + int unregister_netevent_notifier(struct notifier_block *nb) 49 + { 50 + return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); 51 + } 52 + 53 + /** 54 + * call_netevent_notifiers - call all netevent notifier blocks 55 + * @val: value passed unmodified to notifier function 56 + * @v: pointer passed unmodified to notifier function 57 + * 58 + * Call all neighbour notifier blocks. Parameters and return value 59 + * are as for notifier_call_chain(). 60 + */ 61 + 62 + int call_netevent_notifiers(unsigned long val, void *v) 63 + { 64 + return atomic_notifier_call_chain(&netevent_notif_chain, val, v); 65 + } 66 + 67 + EXPORT_SYMBOL_GPL(register_netevent_notifier); 68 + EXPORT_SYMBOL_GPL(unregister_netevent_notifier); 69 + EXPORT_SYMBOL_GPL(call_netevent_notifiers);
+34 -11
net/core/skbuff.c
··· 71 static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72 73 /* 74 - * lockdep: lock class key used by skb_queue_head_init(): 75 - */ 76 - struct lock_class_key skb_queue_lock_key; 77 - 78 - EXPORT_SYMBOL(skb_queue_lock_key); 79 - 80 - /* 81 * Keep out-of-line to prevent kernel bloat. 82 * __builtin_return_address is not used because it is not always 83 * reliable. ··· 249 goto out; 250 } 251 252 253 static void skb_drop_list(struct sk_buff **listp) 254 { ··· 862 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 863 return err; 864 865 - for (i = 0; i < nfrags; i++) { 866 int end = offset + skb_shinfo(skb)->frags[i].size; 867 868 if (end < len) { ··· 874 continue; 875 } 876 877 - if (len > offset) 878 - skb_shinfo(skb)->frags[i++].size = len - offset; 879 880 skb_shinfo(skb)->nr_frags = i; 881 882 for (; i < nfrags; i++) ··· 884 885 if (skb_shinfo(skb)->frag_list) 886 skb_drop_fraglist(skb); 887 - break; 888 } 889 890 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ··· 899 return -ENOMEM; 900 901 nfrag->next = frag->next; 902 frag = nfrag; 903 *fragp = frag; 904 } ··· 918 break; 919 } 920 921 if (len > skb_headlen(skb)) { 922 skb->data_len -= skb->len - len; 923 skb->len = len; ··· 2064 EXPORT_SYMBOL(kfree_skb); 2065 EXPORT_SYMBOL(__pskb_pull_tail); 2066 EXPORT_SYMBOL(__alloc_skb); 2067 EXPORT_SYMBOL(pskb_copy); 2068 EXPORT_SYMBOL(pskb_expand_head); 2069 EXPORT_SYMBOL(skb_checksum);
··· 71 static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72 73 /* 74 * Keep out-of-line to prevent kernel bloat. 75 * __builtin_return_address is not used because it is not always 76 * reliable. ··· 256 goto out; 257 } 258 259 + /** 260 + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 261 + * @dev: network device to receive on 262 + * @length: length to allocate 263 + * @gfp_mask: get_free_pages mask, passed to alloc_skb 264 + * 265 + * Allocate a new &sk_buff and assign it a usage count of one. The 266 + * buffer has unspecified headroom built in. Users should allocate 267 + * the headroom they think they need without accounting for the 268 + * built in space. The built in space is used for optimisations. 269 + * 270 + * %NULL is returned if there is no free memory. 271 + */ 272 + struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 273 + unsigned int length, gfp_t gfp_mask) 274 + { 275 + struct sk_buff *skb; 276 + 277 + skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 278 + if (likely(skb)) 279 + skb_reserve(skb, NET_SKB_PAD); 280 + return skb; 281 + } 282 283 static void skb_drop_list(struct sk_buff **listp) 284 { ··· 846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 847 return err; 848 849 + i = 0; 850 + if (offset >= len) 851 + goto drop_pages; 852 + 853 + for (; i < nfrags; i++) { 854 int end = offset + skb_shinfo(skb)->frags[i].size; 855 856 if (end < len) { ··· 854 continue; 855 } 856 857 + skb_shinfo(skb)->frags[i++].size = len - offset; 858 859 + drop_pages: 860 skb_shinfo(skb)->nr_frags = i; 861 862 for (; i < nfrags; i++) ··· 864 865 if (skb_shinfo(skb)->frag_list) 866 skb_drop_fraglist(skb); 867 + goto done; 868 } 869 870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ··· 879 return -ENOMEM; 880 881 nfrag->next = frag->next; 882 + kfree_skb(frag); 883 frag = nfrag; 884 *fragp = frag; 885 } ··· 897 break; 898 } 899 900 + done: 901 if (len > skb_headlen(skb)) { 902 skb->data_len -= skb->len - len; 903 skb->len = len; ··· 2042 EXPORT_SYMBOL(kfree_skb); 2043 EXPORT_SYMBOL(__pskb_pull_tail); 2044 EXPORT_SYMBOL(__alloc_skb); 2045 + EXPORT_SYMBOL(__netdev_alloc_skb); 2046 EXPORT_SYMBOL(pskb_copy); 2047 EXPORT_SYMBOL(pskb_expand_head); 2048 EXPORT_SYMBOL(skb_checksum);
+2 -2
net/dccp/ipv6.c
··· 230 ipv6_addr_copy(&np->saddr, saddr); 231 inet->rcv_saddr = LOOPBACK4_IPV6; 232 233 - ip6_dst_store(sk, dst, NULL); 234 235 icsk->icsk_ext_hdr_len = 0; 236 if (np->opt != NULL) ··· 863 * comment in that function for the gory details. -acme 864 */ 865 866 - ip6_dst_store(newsk, dst, NULL); 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 868 NETIF_F_TSO); 869 newdp6 = (struct dccp6_sock *)newsk;
··· 230 ipv6_addr_copy(&np->saddr, saddr); 231 inet->rcv_saddr = LOOPBACK4_IPV6; 232 233 + __ip6_dst_store(sk, dst, NULL); 234 235 icsk->icsk_ext_hdr_len = 0; 236 if (np->opt != NULL) ··· 863 * comment in that function for the gory details. -acme 864 */ 865 866 + __ip6_dst_store(newsk, dst, NULL); 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 868 NETIF_F_TSO); 869 newdp6 = (struct dccp6_sock *)newsk;
+7 -2
net/decnet/dn_route.c
··· 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 926 if (!dev_out->dn_ptr) 927 continue; 928 - if (dn_dev_islocal(dev_out, oldflp->fld_src)) 929 - break; 930 } 931 read_unlock(&dev_base_lock); 932 if (dev_out == NULL)
··· 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 926 if (!dev_out->dn_ptr) 927 continue; 928 + if (!dn_dev_islocal(dev_out, oldflp->fld_src)) 929 + continue; 930 + if ((dev_out->flags & IFF_LOOPBACK) && 931 + oldflp->fld_dst && 932 + !dn_dev_islocal(dev_out, oldflp->fld_dst)) 933 + continue; 934 + break; 935 } 936 read_unlock(&dev_base_lock); 937 if (dev_out == NULL)
+4 -3
net/ipv4/ip_output.c
··· 526 527 err = output(skb); 528 529 if (err || !frag) 530 break; 531 ··· 651 /* 652 * Put this fragment into the sending queue. 653 */ 654 - 655 - IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 656 - 657 iph->tot_len = htons(len + hlen); 658 659 ip_send_check(iph); ··· 658 err = output(skb2); 659 if (err) 660 goto fail; 661 } 662 kfree_skb(skb); 663 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
··· 526 527 err = output(skb); 528 529 + if (!err) 530 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 531 if (err || !frag) 532 break; 533 ··· 649 /* 650 * Put this fragment into the sending queue. 651 */ 652 iph->tot_len = htons(len + hlen); 653 654 ip_send_check(iph); ··· 659 err = output(skb2); 660 if (err) 661 goto fail; 662 + 663 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 664 } 665 kfree_skb(skb); 666 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+7 -2
net/ipv4/ip_sockglue.c
··· 112 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 113 { 114 char *secdata; 115 - u32 seclen; 116 int err; 117 118 - err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); 119 if (err) 120 return; 121 122 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 123 } 124 125
··· 112 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 113 { 114 char *secdata; 115 + u32 seclen, secid; 116 int err; 117 118 + err = security_socket_getpeersec_dgram(NULL, skb, &secid); 119 + if (err) 120 + return; 121 + 122 + err = security_secid_to_secctx(secid, &secdata, &seclen); 123 if (err) 124 return; 125 126 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 127 + security_release_secctx(secdata, seclen); 128 } 129 130
+1 -1
net/ipv4/netfilter/ip_conntrack_sip.c
··· 442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 443 sip[i].mask.src.u.udp.port = 0xFFFF; 444 sip[i].mask.dst.protonum = 0xFF; 445 - sip[i].max_expected = 1; 446 sip[i].timeout = 3 * 60; /* 3 minutes */ 447 sip[i].me = THIS_MODULE; 448 sip[i].help = sip_help;
··· 442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 443 sip[i].mask.src.u.udp.port = 0xFFFF; 444 sip[i].mask.dst.protonum = 0xFF; 445 + sip[i].max_expected = 2; 446 sip[i].timeout = 3 * 60; /* 3 minutes */ 447 sip[i].me = THIS_MODULE; 448 sip[i].help = sip_help;
+3
net/ipv4/netfilter/ipt_hashlimit.c
··· 508 if (!r->cfg.expire) 509 return 0; 510 511 /* This is the best we've got: We cannot release and re-grab lock, 512 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 513 * We also cannot grab the hashtable spinlock, since htable_create will
··· 508 if (!r->cfg.expire) 509 return 0; 510 511 + if (r->name[sizeof(r->name) - 1] != '\0') 512 + return 0; 513 + 514 /* This is the best we've got: We cannot release and re-grab lock, 515 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 516 * We also cannot grab the hashtable spinlock, since htable_create will
+8
net/ipv4/route.c
··· 104 #include <net/icmp.h> 105 #include <net/xfrm.h> 106 #include <net/ip_mp_alg.h> 107 #ifdef CONFIG_SYSCTL 108 #include <linux/sysctl.h> 109 #endif ··· 1126 struct rtable *rth, **rthp; 1127 u32 skeys[2] = { saddr, 0 }; 1128 int ikeys[2] = { dev->ifindex, 0 }; 1129 1130 if (!in_dev) 1131 return; ··· 1218 rt_drop(rt); 1219 goto do_next; 1220 } 1221 1222 rt_del(hash, rth); 1223 if (!rt_intern_hash(hash, rt, &rt)) ··· 1459 } 1460 dst->metrics[RTAX_MTU-1] = mtu; 1461 dst_set_expires(dst, ip_rt_mtu_expires); 1462 } 1463 } 1464
··· 104 #include <net/icmp.h> 105 #include <net/xfrm.h> 106 #include <net/ip_mp_alg.h> 107 + #include <net/netevent.h> 108 #ifdef CONFIG_SYSCTL 109 #include <linux/sysctl.h> 110 #endif ··· 1125 struct rtable *rth, **rthp; 1126 u32 skeys[2] = { saddr, 0 }; 1127 int ikeys[2] = { dev->ifindex, 0 }; 1128 + struct netevent_redirect netevent; 1129 1130 if (!in_dev) 1131 return; ··· 1216 rt_drop(rt); 1217 goto do_next; 1218 } 1219 + 1220 + netevent.old = &rth->u.dst; 1221 + netevent.new = &rt->u.dst; 1222 + call_netevent_notifiers(NETEVENT_REDIRECT, 1223 + &netevent); 1224 1225 rt_del(hash, rth); 1226 if (!rt_intern_hash(hash, rt, &rt)) ··· 1452 } 1453 dst->metrics[RTAX_MTU-1] = mtu; 1454 dst_set_expires(dst, ip_rt_mtu_expires); 1455 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 1456 } 1457 } 1458
+3 -2
net/ipv4/tcp.c
··· 1132 tp->ucopy.dma_chan = NULL; 1133 preempt_disable(); 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1135 - !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { 1136 preempt_enable_no_resched(); 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1138 } else ··· 1659 const int tmo = tcp_fin_time(sk); 1660 1661 if (tmo > TCP_TIMEWAIT_LEN) { 1662 - inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1663 } else { 1664 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1665 goto out;
··· 1132 tp->ucopy.dma_chan = NULL; 1133 preempt_disable(); 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1135 + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { 1136 preempt_enable_no_resched(); 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1138 } else ··· 1659 const int tmo = tcp_fin_time(sk); 1660 1661 if (tmo > TCP_TIMEWAIT_LEN) { 1662 + inet_csk_reset_keepalive_timer(sk, 1663 + tmo - TCP_TIMEWAIT_LEN); 1664 } else { 1665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1666 goto out;
-2
net/ipv4/tcp_ipv4.c
··· 438 It can f.e. if SYNs crossed. 439 */ 440 if (!sock_owned_by_user(sk)) { 441 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 442 sk->sk_err = err; 443 444 sk->sk_error_report(sk); ··· 873 drop_and_free: 874 reqsk_free(req); 875 drop: 876 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 877 return 0; 878 } 879
··· 438 It can f.e. if SYNs crossed. 439 */ 440 if (!sock_owned_by_user(sk)) { 441 sk->sk_err = err; 442 443 sk->sk_error_report(sk); ··· 874 drop_and_free: 875 reqsk_free(req); 876 drop: 877 return 0; 878 } 879
+3 -1
net/ipv4/tcp_minisocks.c
··· 589 /* RFC793: "second check the RST bit" and 590 * "fourth, check the SYN bit" 591 */ 592 - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 593 goto embryonic_reset; 594 595 /* ACK sequence verified above, just make sure ACK is 596 * set. If ACK not set, just silently drop the packet.
··· 589 /* RFC793: "second check the RST bit" and 590 * "fourth, check the SYN bit" 591 */ 592 + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 593 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 594 goto embryonic_reset; 595 + } 596 597 /* ACK sequence verified above, just make sure ACK is 598 * set. If ACK not set, just silently drop the packet.
+1 -1
net/ipv4/tcp_probe.c
··· 114 static ssize_t tcpprobe_read(struct file *file, char __user *buf, 115 size_t len, loff_t *ppos) 116 { 117 - int error = 0, cnt; 118 unsigned char *tbuf; 119 120 if (!buf || len < 0)
··· 114 static ssize_t tcpprobe_read(struct file *file, char __user *buf, 115 size_t len, loff_t *ppos) 116 { 117 + int error = 0, cnt = 0; 118 unsigned char *tbuf; 119 120 if (!buf || len < 0)
+166 -8
net/ipv6/addrconf.c
··· 1869 /* 1870 * Manual configuration of address on an interface 1871 */ 1872 - static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) 1873 { 1874 struct inet6_ifaddr *ifp; 1875 struct inet6_dev *idev; 1876 struct net_device *dev; 1877 int scope; 1878 1879 ASSERT_RTNL(); 1880 1881 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1882 return -ENODEV; 1883 ··· 1895 1896 scope = ipv6_addr_scope(pfx); 1897 1898 - ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); 1899 if (!IS_ERR(ifp)) { 1900 addrconf_dad_start(ifp, 0); 1901 in6_ifa_put(ifp); 1902 return 0; 1903 } 1904 ··· 1970 return -EFAULT; 1971 1972 rtnl_lock(); 1973 - err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 1974 rtnl_unlock(); 1975 return err; 1976 } ··· 2797 ifp->idev->nd_parms->retrans_time / HZ; 2798 #endif 2799 2800 - if (age >= ifp->valid_lft) { 2801 spin_unlock(&ifp->lock); 2802 in6_ifa_hold(ifp); 2803 read_unlock(&addrconf_hash_lock); 2804 ipv6_del_addr(ifp); 2805 goto restart; 2806 } else if (age >= ifp->prefered_lft) { 2807 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2808 int deprecate = 0; ··· 2883 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2884 } 2885 if (rta[IFA_LOCAL-1]) { 2886 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2887 return -EINVAL; 2888 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2889 } ··· 2895 } 2896 2897 static int 2898 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2899 { 2900 struct rtattr **rta = arg; 2901 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2902 struct in6_addr *pfx; 2903 2904 pfx = NULL; 2905 if (rta[IFA_ADDRESS-1]) { ··· 2958 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2959 } 2960 if (rta[IFA_LOCAL-1]) { 2961 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2962 return -EINVAL; 2963 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2964 } 2965 if (pfx == NULL) 2966 return -EINVAL; 2967 2968 - return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 2969 } 2970 2971 /* Maximum length of ifa_cacheinfo attributes */ ··· 3220 { 3221 enum addr_type_t type = ANYCAST_ADDR; 3222 return inet6_dump_addr(skb, cb, type); 3223 } 3224 3225 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) ··· 3520 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3521 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3522 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3523 - [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, 3524 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3525 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3526 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
··· 1869 /* 1870 * Manual configuration of address on an interface 1871 */ 1872 + static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, 1873 + __u32 prefered_lft, __u32 valid_lft) 1874 { 1875 struct inet6_ifaddr *ifp; 1876 struct inet6_dev *idev; 1877 struct net_device *dev; 1878 + __u8 ifa_flags = 0; 1879 int scope; 1880 1881 ASSERT_RTNL(); 1882 1883 + /* check the lifetime */ 1884 + if (!valid_lft || prefered_lft > valid_lft) 1885 + return -EINVAL; 1886 + 1887 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1888 return -ENODEV; 1889 ··· 1889 1890 scope = ipv6_addr_scope(pfx); 1891 1892 + if (valid_lft == INFINITY_LIFE_TIME) 1893 + ifa_flags |= IFA_F_PERMANENT; 1894 + else if (valid_lft >= 0x7FFFFFFF/HZ) 1895 + valid_lft = 0x7FFFFFFF/HZ; 1896 + 1897 + if (prefered_lft == 0) 1898 + ifa_flags |= IFA_F_DEPRECATED; 1899 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 1900 + (prefered_lft != INFINITY_LIFE_TIME)) 1901 + prefered_lft = 0x7FFFFFFF/HZ; 1902 + 1903 + ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 1904 + 1905 if (!IS_ERR(ifp)) { 1906 + spin_lock(&ifp->lock); 1907 + ifp->valid_lft = valid_lft; 1908 + ifp->prefered_lft = prefered_lft; 1909 + ifp->tstamp = jiffies; 1910 + spin_unlock(&ifp->lock); 1911 + 1912 addrconf_dad_start(ifp, 0); 1913 in6_ifa_put(ifp); 1914 + addrconf_verify(0); 1915 return 0; 1916 } 1917 ··· 1945 return -EFAULT; 1946 1947 rtnl_lock(); 1948 + err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, 1949 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 1950 rtnl_unlock(); 1951 return err; 1952 } ··· 2771 ifp->idev->nd_parms->retrans_time / HZ; 2772 #endif 2773 2774 + if (ifp->valid_lft != INFINITY_LIFE_TIME && 2775 + age >= ifp->valid_lft) { 2776 spin_unlock(&ifp->lock); 2777 in6_ifa_hold(ifp); 2778 read_unlock(&addrconf_hash_lock); 2779 ipv6_del_addr(ifp); 2780 goto restart; 2781 + } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 2782 + spin_unlock(&ifp->lock); 2783 + continue; 2784 } else if (age >= ifp->prefered_lft) { 2785 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2786 int deprecate = 0; ··· 2853 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2854 } 2855 if (rta[IFA_LOCAL-1]) { 2856 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2857 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2858 return -EINVAL; 2859 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2860 } ··· 2864 } 2865 2866 static int 2867 + inet6_addr_modify(int ifindex, struct in6_addr *pfx, 2868 + __u32 prefered_lft, __u32 valid_lft) 2869 + { 2870 + struct inet6_ifaddr *ifp = NULL; 2871 + struct net_device *dev; 2872 + int ifa_flags = 0; 2873 + 2874 + if ((dev = __dev_get_by_index(ifindex)) == NULL) 2875 + return -ENODEV; 2876 + 2877 + if (!(dev->flags&IFF_UP)) 2878 + return -ENETDOWN; 2879 + 2880 + if (!valid_lft || (prefered_lft > valid_lft)) 2881 + return -EINVAL; 2882 + 2883 + ifp = ipv6_get_ifaddr(pfx, dev, 1); 2884 + if (ifp == NULL) 2885 + return -ENOENT; 2886 + 2887 + if (valid_lft == INFINITY_LIFE_TIME) 2888 + ifa_flags = IFA_F_PERMANENT; 2889 + else if (valid_lft >= 0x7FFFFFFF/HZ) 2890 + valid_lft = 0x7FFFFFFF/HZ; 2891 + 2892 + if (prefered_lft == 0) 2893 + ifa_flags = IFA_F_DEPRECATED; 2894 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2895 + (prefered_lft != INFINITY_LIFE_TIME)) 2896 + prefered_lft = 0x7FFFFFFF/HZ; 2897 + 2898 + spin_lock_bh(&ifp->lock); 2899 + ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags; 2900 + 2901 + ifp->tstamp = jiffies; 2902 + ifp->valid_lft = valid_lft; 2903 + ifp->prefered_lft = prefered_lft; 2904 + 2905 + spin_unlock_bh(&ifp->lock); 2906 + if (!(ifp->flags&IFA_F_TENTATIVE)) 2907 + ipv6_ifa_notify(0, ifp); 2908 + in6_ifa_put(ifp); 2909 + 2910 + addrconf_verify(0); 2911 + 2912 + return 0; 2913 + } 2914 + 2915 + static int 2916 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2917 { 2918 struct rtattr **rta = arg; 2919 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2920 struct in6_addr *pfx; 2921 + __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME; 2922 2923 pfx = NULL; 2924 if (rta[IFA_ADDRESS-1]) { ··· 2877 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2878 } 2879 if (rta[IFA_LOCAL-1]) { 2880 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2881 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2882 return -EINVAL; 2883 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2884 } 2885 if (pfx == NULL) 2886 return -EINVAL; 2887 2888 + if (rta[IFA_CACHEINFO-1]) { 2889 + struct ifa_cacheinfo *ci; 2890 + if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci)) 2891 + return -EINVAL; 2892 + ci = RTA_DATA(rta[IFA_CACHEINFO-1]); 2893 + valid_lft = ci->ifa_valid; 2894 + prefered_lft = ci->ifa_prefered; 2895 + } 2896 + 2897 + if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2898 + int ret; 2899 + ret = inet6_addr_modify(ifm->ifa_index, pfx, 2900 + prefered_lft, valid_lft); 2901 + if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE)) 2902 + return ret; 2903 + } 2904 + 2905 + return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, 2906 + prefered_lft, valid_lft); 2907 + 2908 } 2909 2910 /* Maximum length of ifa_cacheinfo attributes */ ··· 3119 { 3120 enum addr_type_t type = ANYCAST_ADDR; 3121 return inet6_dump_addr(skb, cb, type); 3122 + } 3123 + 3124 + static int inet6_rtm_getaddr(struct sk_buff *in_skb, 3125 + struct nlmsghdr* nlh, void *arg) 3126 + { 3127 + struct rtattr **rta = arg; 3128 + struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 3129 + struct in6_addr *addr = NULL; 3130 + struct net_device *dev = NULL; 3131 + struct inet6_ifaddr *ifa; 3132 + struct sk_buff *skb; 3133 + int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE); 3134 + int err; 3135 + 3136 + if (rta[IFA_ADDRESS-1]) { 3137 + if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr)) 3138 + return -EINVAL; 3139 + addr = RTA_DATA(rta[IFA_ADDRESS-1]); 3140 + } 3141 + if (rta[IFA_LOCAL-1]) { 3142 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) || 3143 + (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr)))) 3144 + return -EINVAL; 3145 + addr = RTA_DATA(rta[IFA_LOCAL-1]); 3146 + } 3147 + if (addr == NULL) 3148 + return -EINVAL; 3149 + 3150 + if (ifm->ifa_index) 3151 + dev = __dev_get_by_index(ifm->ifa_index); 3152 + 3153 + if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL) 3154 + return -EADDRNOTAVAIL; 3155 + 3156 + if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) { 3157 + err = -ENOBUFS; 3158 + goto out; 3159 + } 3160 + 3161 + NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 3162 + err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3163 + nlh->nlmsg_seq, RTM_NEWADDR, 0); 3164 + if (err < 0) { 3165 + err = -EMSGSIZE; 3166 + goto out_free; 3167 + } 3168 + 3169 + err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 3170 + if (err > 0) 3171 + err = 0; 3172 + out: 3173 + in6_ifa_put(ifa); 3174 + return err; 3175 + out_free: 3176 + kfree_skb(skb); 3177 + goto out; 3178 } 3179 3180 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) ··· 3363 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3364 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3365 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3366 + [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr, 3367 + .dumpit = inet6_dump_ifaddr, }, 3368 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3369 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3370 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
+1 -1
net/ipv6/af_inet6.c
··· 658 return err; 659 } 660 661 - ip6_dst_store(sk, dst, NULL); 662 } 663 664 return 0;
··· 658 return err; 659 } 660 661 + __ip6_dst_store(sk, dst, NULL); 662 } 663 664 return 0;
+1 -1
net/ipv6/inet6_connection_sock.c
··· 185 return err; 186 } 187 188 - ip6_dst_store(sk, dst, NULL); 189 } 190 191 skb->dst = dst_clone(dst);
··· 185 return err; 186 } 187 188 + __ip6_dst_store(sk, dst, NULL); 189 } 190 191 skb->dst = dst_clone(dst);
+87 -42
net/ipv6/ip6_output.c
··· 356 skb->dev = dst->dev; 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 358 0, skb->dev); 359 360 kfree_skb(skb); 361 return -ETIMEDOUT; ··· 596 } 597 598 err = output(skb); 599 if (err || !frag) 600 break; 601 ··· 710 /* 711 * Put this fragment into the sending queue. 712 */ 713 - 714 - IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 715 - 716 err = output(frag); 717 if (err) 718 goto fail; 719 } 720 kfree_skb(skb); 721 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); ··· 726 return err; 727 } 728 729 - int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 730 { 731 - int err = 0; 732 733 - *dst = NULL; 734 - if (sk) { 735 - struct ipv6_pinfo *np = inet6_sk(sk); 736 - 737 - *dst = sk_dst_check(sk, np->dst_cookie); 738 - if (*dst) { 739 - struct rt6_info *rt = (struct rt6_info*)*dst; 740 - 741 - /* Yes, checking route validity in not connected 742 - * case is not very simple. Take into account, 743 - * that we do not support routing by source, TOS, 744 - * and MSG_DONTROUTE --ANK (980726) 745 - * 746 - * 1. If route was host route, check that 747 - * cached destination is current. 748 - * If it is network route, we still may 749 - * check its validity using saved pointer 750 - * to the last used address: daddr_cache. 751 - * We do not want to save whole address now, 752 - * (because main consumer of this service 753 - * is tcp, which has not this problem), 754 - * so that the last trick works only on connected 755 - * sockets. 756 - * 2. oif also should be the same. 757 - */ 758 - if (((rt->rt6i_dst.plen != 128 || 759 - !ipv6_addr_equal(&fl->fl6_dst, 760 - &rt->rt6i_dst.addr)) 761 - && (np->daddr_cache == NULL || 762 - !ipv6_addr_equal(&fl->fl6_dst, 763 - np->daddr_cache))) 764 - || (fl->oif && fl->oif != (*dst)->dev->ifindex)) { 765 - dst_release(*dst); 766 - *dst = NULL; 767 - } 768 - } 769 } 770 771 if (*dst == NULL) 772 *dst = ip6_route_output(sk, fl); ··· 779 780 if (ipv6_addr_any(&fl->fl6_src)) { 781 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 782 - 783 if (err) 784 goto out_err_release; 785 } ··· 791 return err; 792 } 793 794 EXPORT_SYMBOL_GPL(ip6_dst_lookup); 795 796 static inline int ip6_ufo_append_data(struct sock *sk, 797 int getfrag(void *from, char *to, int offset, int len,
··· 356 skb->dev = dst->dev; 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 358 0, skb->dev); 359 + IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 360 361 kfree_skb(skb); 362 return -ETIMEDOUT; ··· 595 } 596 597 err = output(skb); 598 + if(!err) 599 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 600 + 601 if (err || !frag) 602 break; 603 ··· 706 /* 707 * Put this fragment into the sending queue. 708 */ 709 err = output(frag); 710 if (err) 711 goto fail; 712 + 713 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 714 } 715 kfree_skb(skb); 716 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); ··· 723 return err; 724 } 725 726 + static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 727 + struct dst_entry *dst, 728 + struct flowi *fl) 729 { 730 + struct ipv6_pinfo *np = inet6_sk(sk); 731 + struct rt6_info *rt = (struct rt6_info *)dst; 732 733 + if (!dst) 734 + goto out; 735 + 736 + /* Yes, checking route validity in not connected 737 + * case is not very simple. Take into account, 738 + * that we do not support routing by source, TOS, 739 + * and MSG_DONTROUTE --ANK (980726) 740 + * 741 + * 1. If route was host route, check that 742 + * cached destination is current. 743 + * If it is network route, we still may 744 + * check its validity using saved pointer 745 + * to the last used address: daddr_cache. 746 + * We do not want to save whole address now, 747 + * (because main consumer of this service 748 + * is tcp, which has not this problem), 749 + * so that the last trick works only on connected 750 + * sockets. 751 + * 2. oif also should be the same. 752 + */ 753 + if (((rt->rt6i_dst.plen != 128 || 754 + !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr)) 755 + && (np->daddr_cache == NULL || 756 + !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache))) 757 + || (fl->oif && fl->oif != dst->dev->ifindex)) { 758 + dst_release(dst); 759 + dst = NULL; 760 } 761 + 762 + out: 763 + return dst; 764 + } 765 + 766 + static int ip6_dst_lookup_tail(struct sock *sk, 767 + struct dst_entry **dst, struct flowi *fl) 768 + { 769 + int err; 770 771 if (*dst == NULL) 772 *dst = ip6_route_output(sk, fl); ··· 773 774 if (ipv6_addr_any(&fl->fl6_src)) { 775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 776 if (err) 777 goto out_err_release; 778 } ··· 786 return err; 787 } 788 789 + /** 790 + * ip6_dst_lookup - perform route lookup on flow 791 + * @sk: socket which provides route info 792 + * @dst: pointer to dst_entry * for result 793 + * @fl: flow to lookup 794 + * 795 + * This function performs a route lookup on the given flow. 796 + * 797 + * It returns zero on success, or a standard errno code on error. 798 + */ 799 + int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 800 + { 801 + *dst = NULL; 802 + return ip6_dst_lookup_tail(sk, dst, fl); 803 + } 804 EXPORT_SYMBOL_GPL(ip6_dst_lookup); 805 + 806 + /** 807 + * ip6_sk_dst_lookup - perform socket cached route lookup on flow 808 + * @sk: socket which provides the dst cache and route info 809 + * @dst: pointer to dst_entry * for result 810 + * @fl: flow to lookup 811 + * 812 + * This function performs a route lookup on the given flow with the 813 + * possibility of using the cached route in the socket if it is valid. 814 + * It will take the socket dst lock when operating on the dst cache. 815 + * As a result, this function can only be used in process context. 816 + * 817 + * It returns zero on success, or a standard errno code on error. 818 + */ 819 + int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 820 + { 821 + *dst = NULL; 822 + if (sk) { 823 + *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 824 + *dst = ip6_sk_dst_check(sk, *dst, fl); 825 + } 826 + 827 + return ip6_dst_lookup_tail(sk, dst, fl); 828 + } 829 + EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); 830 831 static inline int ip6_ufo_append_data(struct sock *sk, 832 int getfrag(void *from, char *to, int offset, int len,
+7
net/ipv6/route.c
··· 53 #include <linux/rtnetlink.h> 54 #include <net/dst.h> 55 #include <net/xfrm.h> 56 57 #include <asm/uaccess.h> 58 ··· 743 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 744 } 745 dst->metrics[RTAX_MTU-1] = mtu; 746 } 747 } 748 ··· 1157 struct rt6_info *rt, *nrt = NULL; 1158 int strict; 1159 struct fib6_node *fn; 1160 1161 /* 1162 * Get the "current" route for this destination and ··· 1254 1255 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1256 goto out; 1257 1258 if (rt->rt6i_flags&RTF_CACHE) { 1259 ip6_del_rt(rt, NULL, NULL, NULL);
··· 53 #include <linux/rtnetlink.h> 54 #include <net/dst.h> 55 #include <net/xfrm.h> 56 + #include <net/netevent.h> 57 58 #include <asm/uaccess.h> 59 ··· 742 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 743 } 744 dst->metrics[RTAX_MTU-1] = mtu; 745 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 746 } 747 } 748 ··· 1155 struct rt6_info *rt, *nrt = NULL; 1156 int strict; 1157 struct fib6_node *fn; 1158 + struct netevent_redirect netevent; 1159 1160 /* 1161 * Get the "current" route for this destination and ··· 1251 1252 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1253 goto out; 1254 + 1255 + netevent.old = &rt->u.dst; 1256 + netevent.new = &nrt->u.dst; 1257 + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1258 1259 if (rt->rt6i_flags&RTF_CACHE) { 1260 ip6_del_rt(rt, NULL, NULL, NULL);
+2 -4
net/ipv6/tcp_ipv6.c
··· 270 inet->rcv_saddr = LOOPBACK4_IPV6; 271 272 sk->sk_gso_type = SKB_GSO_TCPV6; 273 - ip6_dst_store(sk, dst, NULL); 274 275 icsk->icsk_ext_hdr_len = 0; 276 if (np->opt) ··· 427 case TCP_SYN_RECV: /* Cannot happen. 428 It can, it SYNs are crossed. --ANK */ 429 if (!sock_owned_by_user(sk)) { 430 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 431 sk->sk_err = err; 432 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 433 ··· 830 if (req) 831 reqsk_free(req); 832 833 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 834 return 0; /* don't send reset */ 835 } 836 ··· 945 */ 946 947 sk->sk_gso_type = SKB_GSO_TCPV6; 948 - ip6_dst_store(newsk, dst, NULL); 949 950 newtcp6sk = (struct tcp6_sock *)newsk; 951 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
··· 270 inet->rcv_saddr = LOOPBACK4_IPV6; 271 272 sk->sk_gso_type = SKB_GSO_TCPV6; 273 + __ip6_dst_store(sk, dst, NULL); 274 275 icsk->icsk_ext_hdr_len = 0; 276 if (np->opt) ··· 427 case TCP_SYN_RECV: /* Cannot happen. 428 It can, it SYNs are crossed. --ANK */ 429 if (!sock_owned_by_user(sk)) { 430 sk->sk_err = err; 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 432 ··· 831 if (req) 832 reqsk_free(req); 833 834 return 0; /* don't send reset */ 835 } 836 ··· 947 */ 948 949 sk->sk_gso_type = SKB_GSO_TCPV6; 950 + __ip6_dst_store(newsk, dst, NULL); 951 952 newtcp6sk = (struct tcp6_sock *)newsk; 953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+1 -1
net/ipv6/udp.c
··· 782 connected = 0; 783 } 784 785 - err = ip6_dst_lookup(sk, &dst, fl); 786 if (err) 787 goto out; 788 if (final_p)
··· 782 connected = 0; 783 } 784 785 + err = ip6_sk_dst_lookup(sk, &dst, fl); 786 if (err) 787 goto out; 788 if (final_p)
+1 -1
net/ipv6/xfrm6_output.c
··· 125 if (!skb_is_gso(skb)) 126 return xfrm6_output_finish2(skb); 127 128 - skb->protocol = htons(ETH_P_IP); 129 segs = skb_gso_segment(skb, 0); 130 kfree_skb(skb); 131 if (unlikely(IS_ERR(segs)))
··· 125 if (!skb_is_gso(skb)) 126 return xfrm6_output_finish2(skb); 127 128 + skb->protocol = htons(ETH_P_IPV6); 129 segs = skb_gso_segment(skb, 0); 130 kfree_skb(skb); 131 if (unlikely(IS_ERR(segs)))
+2
net/netfilter/xt_SECMARK.c
··· 57 { 58 int err; 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 60 61 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 62 if (err) {
··· 57 { 58 int err; 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 60 + 61 + sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; 62 63 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 64 if (err) {
+4 -1
net/netfilter/xt_string.c
··· 55 /* Damn, can't handle this case properly with iptables... */ 56 if (conf->from_offset > conf->to_offset) 57 return 0; 58 - 59 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 60 GFP_KERNEL, TS_AUTOLOAD); 61 if (IS_ERR(ts_conf))
··· 55 /* Damn, can't handle this case properly with iptables... */ 56 if (conf->from_offset > conf->to_offset) 57 return 0; 58 + if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') 59 + return 0; 60 + if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) 61 + return 0; 62 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 63 GFP_KERNEL, TS_AUTOLOAD); 64 if (IS_ERR(ts_conf))
+5 -12
net/unix/af_unix.c
··· 128 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 129 130 #ifdef CONFIG_SECURITY_NETWORK 131 - static void unix_get_peersec_dgram(struct sk_buff *skb) 132 { 133 - int err; 134 - 135 - err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb), 136 - UNIXSECLEN(skb)); 137 - if (err) 138 - *(UNIXSECDATA(skb)) = NULL; 139 } 140 141 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 142 { 143 - scm->secdata = *UNIXSECDATA(skb); 144 - scm->seclen = *UNIXSECLEN(skb); 145 } 146 #else 147 - static inline void unix_get_peersec_dgram(struct sk_buff *skb) 148 { } 149 150 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) ··· 1316 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1317 if (siocb->scm->fp) 1318 unix_attach_fds(siocb->scm, skb); 1319 - 1320 - unix_get_peersec_dgram(skb); 1321 1322 skb->h.raw = skb->data; 1323 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
··· 128 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 129 130 #ifdef CONFIG_SECURITY_NETWORK 131 + static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 132 { 133 + memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); 134 } 135 136 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 137 { 138 + scm->secid = *UNIXSID(skb); 139 } 140 #else 141 + static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 142 { } 143 144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) ··· 1322 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1323 if (siocb->scm->fp) 1324 unix_attach_fds(siocb->scm, skb); 1325 + unix_get_secdata(siocb->scm, skb); 1326 1327 skb->h.raw = skb->data; 1328 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+12 -2
security/dummy.c
··· 791 return -ENOPROTOOPT; 792 } 793 794 - static int dummy_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 795 - u32 *seclen) 796 { 797 return -ENOPROTOOPT; 798 } ··· 873 static int dummy_setprocattr(struct task_struct *p, char *name, void *value, size_t size) 874 { 875 return -EINVAL; 876 } 877 878 #ifdef CONFIG_KEYS ··· 1036 set_to_dummy_if_null(ops, d_instantiate); 1037 set_to_dummy_if_null(ops, getprocattr); 1038 set_to_dummy_if_null(ops, setprocattr); 1039 #ifdef CONFIG_SECURITY_NETWORK 1040 set_to_dummy_if_null(ops, unix_stream_connect); 1041 set_to_dummy_if_null(ops, unix_may_send);
··· 791 return -ENOPROTOOPT; 792 } 793 794 + static int dummy_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 795 { 796 return -ENOPROTOOPT; 797 } ··· 874 static int dummy_setprocattr(struct task_struct *p, char *name, void *value, size_t size) 875 { 876 return -EINVAL; 877 + } 878 + 879 + static int dummy_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 880 + { 881 + return -EOPNOTSUPP; 882 + } 883 + 884 + static void dummy_release_secctx(char *secdata, u32 seclen) 885 + { 886 } 887 888 #ifdef CONFIG_KEYS ··· 1028 set_to_dummy_if_null(ops, d_instantiate); 1029 set_to_dummy_if_null(ops, getprocattr); 1030 set_to_dummy_if_null(ops, setprocattr); 1031 + set_to_dummy_if_null(ops, secid_to_secctx); 1032 + set_to_dummy_if_null(ops, release_secctx); 1033 #ifdef CONFIG_SECURITY_NETWORK 1034 set_to_dummy_if_null(ops, unix_stream_connect); 1035 set_to_dummy_if_null(ops, unix_may_send);
+24 -14
security/selinux/hooks.c
··· 3524 return err; 3525 } 3526 3527 - static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3528 { 3529 int err = 0; 3530 - u32 peer_sid; 3531 3532 - if (skb->sk->sk_family == PF_UNIX) 3533 - selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket), 3534 - &peer_sid); 3535 - else 3536 - peer_sid = selinux_socket_getpeer_dgram(skb); 3537 3538 - if (peer_sid == SECSID_NULL) 3539 - return -EINVAL; 3540 3541 - err = security_sid_to_context(peer_sid, secdata, seclen); 3542 - if (err) 3543 - return err; 3544 - 3545 - return 0; 3546 } 3547 3548 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) ··· 4403 return size; 4404 } 4405 4406 #ifdef CONFIG_KEYS 4407 4408 static int selinux_key_alloc(struct key *k, struct task_struct *tsk, ··· 4593 4594 .getprocattr = selinux_getprocattr, 4595 .setprocattr = selinux_setprocattr, 4596 4597 .unix_stream_connect = selinux_socket_unix_stream_connect, 4598 .unix_may_send = selinux_socket_unix_may_send,
··· 3524 return err; 3525 } 3526 3527 + static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 3528 { 3529 + u32 peer_secid = SECSID_NULL; 3530 int err = 0; 3531 3532 + if (sock && (sock->sk->sk_family == PF_UNIX)) 3533 + selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid); 3534 + else if (skb) 3535 + peer_secid = selinux_socket_getpeer_dgram(skb); 3536 3537 + if (peer_secid == SECSID_NULL) 3538 + err = -EINVAL; 3539 + *secid = peer_secid; 3540 3541 + return err; 3542 } 3543 3544 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) ··· 4407 return size; 4408 } 4409 4410 + static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 4411 + { 4412 + return security_sid_to_context(secid, secdata, seclen); 4413 + } 4414 + 4415 + static void selinux_release_secctx(char *secdata, u32 seclen) 4416 + { 4417 + if (secdata) 4418 + kfree(secdata); 4419 + } 4420 + 4421 #ifdef CONFIG_KEYS 4422 4423 static int selinux_key_alloc(struct key *k, struct task_struct *tsk, ··· 4586 4587 .getprocattr = selinux_getprocattr, 4588 .setprocattr = selinux_setprocattr, 4589 + 4590 + .secid_to_secctx = selinux_secid_to_secctx, 4591 + .release_secctx = selinux_release_secctx, 4592 4593 .unix_stream_connect = selinux_socket_unix_stream_connect, 4594 .unix_may_send = selinux_socket_unix_may_send,