Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits)
[NET]: Fix more per-cpu typos
[SECURITY]: Fix build with CONFIG_SECURITY disabled.
[I/OAT]: Remove CPU hotplug lock from net_dma_rebalance
[DECNET]: Fix for routing bug
[AF_UNIX]: Kernel memory leak fix for af_unix datagram getpeersec patch
[NET]: skb_queue_lock_key() is no longer used.
[NET]: Remove lockdep_set_class() call from skb_queue_head_init().
[IPV6]: SNMPv2 "ipv6IfStatsOutFragCreates" counter error
[IPV6]: SNMPv2 "ipv6IfStatsInHdrErrors" counter error
[NET]: Kill the WARN_ON() calls for checksum fixups.
[NETFILTER]: xt_hashlimit/xt_string: missing string validation
[NETFILTER]: SIP helper: expect RTP streams in both directions
[E1000]: Convert to netdev_alloc_skb
[TG3]: Convert to netdev_alloc_skb
[NET]: Add netdev_alloc_skb().
[TCP]: Process linger2 timeout consistently.
[SECURITY] secmark: nul-terminate secdata
[NET] infiniband: Cleanup ib_addr module to use the netevents
[NET]: Core net changes to generate netevents
[NET]: Network Event Notifier Mechanism.
...

+632 -187
+14 -16
drivers/infiniband/core/addr.c
··· 35 35 #include <net/arp.h> 36 36 #include <net/neighbour.h> 37 37 #include <net/route.h> 38 + #include <net/netevent.h> 38 39 #include <rdma/ib_addr.h> 39 40 40 41 MODULE_AUTHOR("Sean Hefty"); ··· 327 326 } 328 327 EXPORT_SYMBOL(rdma_addr_cancel); 329 328 330 - static int addr_arp_recv(struct sk_buff *skb, struct net_device *dev, 331 - struct packet_type *pkt, struct net_device *orig_dev) 329 + static int netevent_callback(struct notifier_block *self, unsigned long event, 330 + void *ctx) 332 331 { 333 - struct arphdr *arp_hdr; 332 + if (event == NETEVENT_NEIGH_UPDATE) { 333 + struct neighbour *neigh = ctx; 334 334 335 - arp_hdr = (struct arphdr *) skb->nh.raw; 336 - 337 - if (arp_hdr->ar_op == htons(ARPOP_REQUEST) || 338 - arp_hdr->ar_op == htons(ARPOP_REPLY)) 339 - set_timeout(jiffies); 340 - 341 - kfree_skb(skb); 335 + if (neigh->dev->type == ARPHRD_INFINIBAND && 336 + (neigh->nud_state & NUD_VALID)) { 337 + set_timeout(jiffies); 338 + } 339 + } 342 340 return 0; 343 341 } 344 342 345 - static struct packet_type addr_arp = { 346 - .type = __constant_htons(ETH_P_ARP), 347 - .func = addr_arp_recv, 348 - .af_packet_priv = (void*) 1, 343 + static struct notifier_block nb = { 344 + .notifier_call = netevent_callback 349 345 }; 350 346 351 347 static int addr_init(void) ··· 351 353 if (!addr_wq) 352 354 return -ENOMEM; 353 355 354 - dev_add_pack(&addr_arp); 356 + register_netevent_notifier(&nb); 355 357 return 0; 356 358 } 357 359 358 360 static void addr_cleanup(void) 359 361 { 360 - dev_remove_pack(&addr_arp); 362 + unregister_netevent_notifier(&nb); 361 363 destroy_workqueue(addr_wq); 362 364 } 363 365
+1 -1
drivers/net/appletalk/Kconfig
··· 29 29 even politically correct people are allowed to say Y here. 30 30 31 31 config DEV_APPLETALK 32 - bool "Appletalk interfaces support" 32 + tristate "Appletalk interfaces support" 33 33 depends on ATALK 34 34 help 35 35 AppleTalk is the protocol that Apple computers can use to communicate
+6 -5
drivers/net/e1000/e1000_main.c
··· 3127 3127 break; 3128 3128 } 3129 3129 3130 - /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3130 + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3131 3131 * means we reserve 2 more, this pushes us to allocate from the next 3132 3132 * larger slab size 3133 3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ ··· 3708 3708 #define E1000_CB_LENGTH 256 3709 3709 if (length < E1000_CB_LENGTH) { 3710 3710 struct sk_buff *new_skb = 3711 - dev_alloc_skb(length + NET_IP_ALIGN); 3711 + netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3712 3712 if (new_skb) { 3713 3713 skb_reserve(new_skb, NET_IP_ALIGN); 3714 3714 new_skb->dev = netdev; ··· 3979 3979 3980 3980 while (cleaned_count--) { 3981 3981 if (!(skb = buffer_info->skb)) 3982 - skb = dev_alloc_skb(bufsz); 3982 + skb = netdev_alloc_skb(netdev, bufsz); 3983 3983 else { 3984 3984 skb_trim(skb, 0); 3985 3985 goto map_skb; ··· 3997 3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 3998 3998 "at %p\n", bufsz, skb->data); 3999 3999 /* Try again, without freeing the previous */ 4000 - skb = dev_alloc_skb(bufsz); 4000 + skb = netdev_alloc_skb(netdev, bufsz); 4001 4001 /* Failed allocation, critical failure */ 4002 4002 if (!skb) { 4003 4003 dev_kfree_skb(oldskb); ··· 4121 4121 rx_desc->read.buffer_addr[j+1] = ~0; 4122 4122 } 4123 4123 4124 - skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4124 + skb = netdev_alloc_skb(netdev, 4125 + adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4125 4126 4126 4127 if (unlikely(!skb)) { 4127 4128 adapter->alloc_rx_buff_failed++;
+5 -5
drivers/net/tg3.c
··· 68 68 69 69 #define DRV_MODULE_NAME "tg3" 70 70 #define PFX DRV_MODULE_NAME ": " 71 - #define DRV_MODULE_VERSION "3.63" 72 - #define DRV_MODULE_RELDATE "July 25, 2006" 71 + #define DRV_MODULE_VERSION "3.64" 72 + #define DRV_MODULE_RELDATE "July 31, 2006" 73 73 74 74 #define TG3_DEF_MAC_MODE 0 75 75 #define TG3_DEF_RX_MODE 0 ··· 3097 3097 * Callers depend upon this behavior and assume that 3098 3098 * we leave everything unchanged if we fail. 3099 3099 */ 3100 - skb = dev_alloc_skb(skb_size); 3100 + skb = netdev_alloc_skb(tp->dev, skb_size); 3101 3101 if (skb == NULL) 3102 3102 return -ENOMEM; 3103 3103 ··· 3270 3270 tg3_recycle_rx(tp, opaque_key, 3271 3271 desc_idx, *post_ptr); 3272 3272 3273 - copy_skb = dev_alloc_skb(len + 2); 3273 + copy_skb = netdev_alloc_skb(tp->dev, len + 2); 3274 3274 if (copy_skb == NULL) 3275 3275 goto drop_it_no_recycle; 3276 3276 ··· 8618 8618 err = -EIO; 8619 8619 8620 8620 tx_len = 1514; 8621 - skb = dev_alloc_skb(tx_len); 8621 + skb = netdev_alloc_skb(tp->dev, tx_len); 8622 8622 if (!skb) 8623 8623 return -ENOMEM; 8624 8624
-1
include/linux/netfilter_bridge.h
··· 6 6 7 7 #include <linux/netfilter.h> 8 8 #if defined(__KERNEL__) && defined(CONFIG_BRIDGE_NETFILTER) 9 - #include <asm/atomic.h> 10 9 #include <linux/if_ether.h> 11 10 #endif 12 11
+34 -6
include/linux/security.h
··· 1109 1109 * @name contains the name of the security module being unstacked. 1110 1110 * @ops contains a pointer to the struct security_operations of the module to unstack. 1111 1111 * 1112 + * @secid_to_secctx: 1113 + * Convert secid to security context. 1114 + * @secid contains the security ID. 1115 + * @secdata contains the pointer that stores the converted security context. 1116 + * 1117 + * @release_secctx: 1118 + * Release the security context. 1119 + * @secdata contains the security context. 1120 + * @seclen contains the length of the security context. 1121 + * 1112 1122 * This is the main security structure. 1113 1123 */ 1114 1124 struct security_operations { ··· 1299 1289 1300 1290 int (*getprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1301 1291 int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size); 1292 + int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen); 1293 + void (*release_secctx)(char *secdata, u32 seclen); 1302 1294 1303 1295 #ifdef CONFIG_SECURITY_NETWORK 1304 1296 int (*unix_stream_connect) (struct socket * sock, ··· 1329 1317 int (*socket_shutdown) (struct socket * sock, int how); 1330 1318 int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); 1331 1319 int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); 1332 - int (*socket_getpeersec_dgram) (struct sk_buff *skb, char **secdata, u32 *seclen); 1320 + int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); 1333 1321 int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); 1334 1322 void (*sk_free_security) (struct sock *sk); 1335 1323 unsigned int (*sk_getsid) (struct sock *sk, struct flowi *fl, u8 dir); ··· 2071 2059 return security_ops->netlink_recv(skb, cap); 2072 2060 } 2073 2061 2062 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2063 + { 2064 + return security_ops->secid_to_secctx(secid, secdata, seclen); 2065 + } 2066 + 2067 + static inline void security_release_secctx(char *secdata, u32 seclen) 2068 + { 2069 + return security_ops->release_secctx(secdata, seclen); 2070 + } 2071 + 2074 2072 /* prototypes */ 2075 2073 extern int security_init (void); 2076 2074 extern int register_security (struct security_operations *ops); ··· 2747 2725 { 2748 2726 } 2749 2727 2728 + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 2729 + { 2730 + return -EOPNOTSUPP; 2731 + } 2732 + 2733 + static inline void security_release_secctx(char *secdata, u32 seclen) 2734 + { 2735 + } 2750 2736 #endif /* CONFIG_SECURITY */ 2751 2737 2752 2738 #ifdef CONFIG_SECURITY_NETWORK ··· 2870 2840 return security_ops->socket_getpeersec_stream(sock, optval, optlen, len); 2871 2841 } 2872 2842 2873 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 2874 - u32 *seclen) 2843 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 2875 2844 { 2876 - return security_ops->socket_getpeersec_dgram(skb, secdata, seclen); 2845 + return security_ops->socket_getpeersec_dgram(sock, skb, secid); 2877 2846 } 2878 2847 2879 2848 static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) ··· 2997 2968 return -ENOPROTOOPT; 2998 2969 } 2999 2970 3000 - static inline int security_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 3001 - u32 *seclen) 2971 + static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 3002 2972 { 3003 2973 return -ENOPROTOOPT; 3004 2974 }
+30 -3
include/linux/skbuff.h
··· 604 604 return list_->qlen; 605 605 } 606 606 607 - extern struct lock_class_key skb_queue_lock_key; 608 - 607 + /* 608 + * This function creates a split out lock class for each invocation; 609 + * this is needed for now since a whole lot of users of the skb-queue 610 + * infrastructure in drivers have different locking usage (in hardirq) 611 + * than the networking core (in softirq only). In the long run either the 612 + * network layer or drivers should need annotation to consolidate the 613 + * main types of usage into 3 classes. 614 + */ 609 615 static inline void skb_queue_head_init(struct sk_buff_head *list) 610 616 { 611 617 spin_lock_init(&list->lock); 612 - lockdep_set_class(&list->lock, &skb_queue_lock_key); 613 618 list->prev = list->next = (struct sk_buff *)list; 614 619 list->qlen = 0; 615 620 } ··· 1107 1102 static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1108 1103 { 1109 1104 return __dev_alloc_skb(length, GFP_ATOMIC); 1105 + } 1106 + 1107 + extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1108 + unsigned int length, gfp_t gfp_mask); 1109 + 1110 + /** 1111 + * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1112 + * @dev: network device to receive on 1113 + * @length: length to allocate 1114 + * 1115 + * Allocate a new &sk_buff and assign it a usage count of one. The 1116 + * buffer has unspecified headroom built in. Users should allocate 1117 + * the headroom they think they need without accounting for the 1118 + * built in space. The built in space is used for optimisations. 1119 + * 1120 + * %NULL is returned if there is no free memory. Although this function 1121 + * allocates memory it can be called from an interrupt. 1122 + */ 1123 + static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1124 + unsigned int length) 1125 + { 1126 + return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1110 1127 } 1111 1128 1112 1129 /**
+2 -4
include/net/af_unix.h
··· 54 54 struct ucred creds; /* Skb credentials */ 55 55 struct scm_fp_list *fp; /* Passed files */ 56 56 #ifdef CONFIG_SECURITY_NETWORK 57 - char *secdata; /* Security context */ 58 - u32 seclen; /* Security length */ 57 + u32 secid; /* Security ID */ 59 58 #endif 60 59 }; 61 60 62 61 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 63 62 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 64 - #define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata) 65 - #define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen) 63 + #define UNIXSID(skb) (&UNIXCB((skb)).secid) 66 64 67 65 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 68 66 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
+9 -3
include/net/ip6_route.h
··· 139 139 /* 140 140 * Store a destination cache entry in a socket 141 141 */ 142 - static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 - struct in6_addr *daddr) 142 + static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, 143 + struct in6_addr *daddr) 144 144 { 145 145 struct ipv6_pinfo *np = inet6_sk(sk); 146 146 struct rt6_info *rt = (struct rt6_info *) dst; 147 147 148 - write_lock(&sk->sk_dst_lock); 149 148 sk_setup_caps(sk, dst); 150 149 np->daddr_cache = daddr; 151 150 np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 151 + } 152 + 153 + static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 154 + struct in6_addr *daddr) 155 + { 156 + write_lock(&sk->sk_dst_lock); 157 + __ip6_dst_store(sk, dst, daddr); 152 158 write_unlock(&sk->sk_dst_lock); 153 159 } 154 160
+3
include/net/ipv6.h
··· 468 468 extern int ip6_dst_lookup(struct sock *sk, 469 469 struct dst_entry **dst, 470 470 struct flowi *fl); 471 + extern int ip6_sk_dst_lookup(struct sock *sk, 472 + struct dst_entry **dst, 473 + struct flowi *fl); 471 474 472 475 /* 473 476 * skb processing functions
+1 -1
include/net/netdma.h
··· 29 29 { 30 30 struct dma_chan *chan; 31 31 rcu_read_lock(); 32 - chan = rcu_dereference(__get_cpu_var(softnet_data.net_dma)); 32 + chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma); 33 33 if (chan) 34 34 dma_chan_get(chan); 35 35 rcu_read_unlock();
+33
include/net/netevent.h
··· 1 + #ifndef _NET_EVENT_H 2 + #define _NET_EVENT_H 3 + 4 + /* 5 + * Generic netevent notifiers 6 + * 7 + * Authors: 8 + * Tom Tucker <tom@opengridcomputing.com> 9 + * Steve Wise <swise@opengridcomputing.com> 10 + * 11 + * Changes: 12 + */ 13 + #ifdef __KERNEL__ 14 + 15 + #include <net/dst.h> 16 + 17 + struct netevent_redirect { 18 + struct dst_entry *old; 19 + struct dst_entry *new; 20 + }; 21 + 22 + enum netevent_notif_type { 23 + NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */ 24 + NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */ 25 + NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */ 26 + }; 27 + 28 + extern int register_netevent_notifier(struct notifier_block *nb); 29 + extern int unregister_netevent_notifier(struct notifier_block *nb); 30 + extern int call_netevent_notifiers(unsigned long val, void *v); 31 + 32 + #endif 33 + #endif
+25 -4
include/net/scm.h
··· 3 3 4 4 #include <linux/limits.h> 5 5 #include <linux/net.h> 6 + #include <linux/security.h> 6 7 7 8 /* Well, we should have at least one descriptor open 8 9 * to accept passed FDs 8) ··· 21 20 struct ucred creds; /* Skb credentials */ 22 21 struct scm_fp_list *fp; /* Passed files */ 23 22 #ifdef CONFIG_SECURITY_NETWORK 24 - char *secdata; /* Security context */ 25 - u32 seclen; /* Security length */ 23 + u32 secid; /* Passed security ID */ 26 24 #endif 27 25 unsigned long seq; /* Connection seqno */ 28 26 }; ··· 31 31 extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm); 32 32 extern void __scm_destroy(struct scm_cookie *scm); 33 33 extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl); 34 + 35 + #ifdef CONFIG_SECURITY_NETWORK 36 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 37 + { 38 + security_socket_getpeersec_dgram(sock, NULL, &scm->secid); 39 + } 40 + #else 41 + static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm) 42 + { } 43 + #endif /* CONFIG_SECURITY_NETWORK */ 34 44 35 45 static __inline__ void scm_destroy(struct scm_cookie *scm) 36 46 { ··· 57 47 scm->creds.pid = p->tgid; 58 48 scm->fp = NULL; 59 49 scm->seq = 0; 50 + unix_get_peersec_dgram(sock, scm); 60 51 if (msg->msg_controllen <= 0) 61 52 return 0; 62 53 return __scm_send(sock, msg, scm); ··· 66 55 #ifdef CONFIG_SECURITY_NETWORK 67 56 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 68 57 { 69 - if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL) 70 - put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata); 58 + char *secdata; 59 + u32 seclen; 60 + int err; 61 + 62 + if (test_bit(SOCK_PASSSEC, &sock->flags)) { 63 + err = security_secid_to_secctx(scm->secid, &secdata, &seclen); 64 + 65 + if (!err) { 66 + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, seclen, secdata); 67 + security_release_secctx(secdata, seclen); 68 + } 69 + } 71 70 } 72 71 #else 73 72 static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+3
include/net/tcp.h
··· 914 914 915 915 static inline void tcp_done(struct sock *sk) 916 916 { 917 + if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 918 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 919 + 917 920 tcp_set_state(sk, TCP_CLOSE); 918 921 tcp_clear_xmit_timers(sk); 919 922
+1 -1
net/core/Makefile
··· 7 7 8 8 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 9 9 10 - obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 10 + obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 11 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 12 13 13 obj-$(CONFIG_XFRM) += flow.o
+2 -17
net/core/dev.c
··· 1166 1166 goto out_set_summed; 1167 1167 1168 1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1169 - static int warned; 1170 - 1171 - WARN_ON(!warned); 1172 - warned = 1; 1173 - 1174 1169 /* Let GSO fix up the checksum. */ 1175 1170 goto out_set_summed; 1176 1171 } ··· 1215 1220 __skb_pull(skb, skb->mac_len); 1216 1221 1217 1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1218 - static int warned; 1219 - 1220 - WARN_ON(!warned); 1221 - warned = 1; 1222 - 1223 1223 if (skb_header_cloned(skb) && 1224 1224 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1225 1225 return ERR_PTR(err); ··· 3419 3429 unsigned int cpu, i, n; 3420 3430 struct dma_chan *chan; 3421 3431 3422 - lock_cpu_hotplug(); 3423 - 3424 3432 if (net_dma_count == 0) { 3425 3433 for_each_online_cpu(cpu) 3426 - rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3427 - unlock_cpu_hotplug(); 3434 + rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL); 3428 3435 return; 3429 3436 } 3430 3437 ··· 3434 3447 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3435 3448 3436 3449 while(n) { 3437 - per_cpu(softnet_data.net_dma, cpu) = chan; 3450 + per_cpu(softnet_data, cpu).net_dma = chan; 3438 3451 cpu = next_cpu(cpu, cpu_online_map); 3439 3452 n--; 3440 3453 } 3441 3454 i++; 3442 3455 } 3443 3456 rcu_read_unlock(); 3444 - 3445 - unlock_cpu_hotplug(); 3446 3457 } 3447 3458 3448 3459 /**
+8 -6
net/core/neighbour.c
··· 29 29 #include <net/neighbour.h> 30 30 #include <net/dst.h> 31 31 #include <net/sock.h> 32 + #include <net/netevent.h> 32 33 #include <linux/rtnetlink.h> 33 34 #include <linux/random.h> 34 35 #include <linux/string.h> ··· 755 754 neigh->nud_state = NUD_STALE; 756 755 neigh->updated = jiffies; 757 756 neigh_suspect(neigh); 757 + notify = 1; 758 758 } 759 759 } else if (state & NUD_DELAY) { 760 760 if (time_before_eq(now, ··· 764 762 neigh->nud_state = NUD_REACHABLE; 765 763 neigh->updated = jiffies; 766 764 neigh_connect(neigh); 765 + notify = 1; 767 766 next = neigh->confirmed + neigh->parms->reachable_time; 768 767 } else { 769 768 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); ··· 822 819 out: 823 820 write_unlock(&neigh->lock); 824 821 } 822 + if (notify) 823 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 825 824 826 825 #ifdef CONFIG_ARPD 827 826 if (notify && neigh->parms->app_probes) ··· 931 926 { 932 927 u8 old; 933 928 int err; 934 - #ifdef CONFIG_ARPD 935 929 int notify = 0; 936 - #endif 937 930 struct net_device *dev; 938 931 int update_isrouter = 0; 939 932 ··· 951 948 neigh_suspect(neigh); 952 949 neigh->nud_state = new; 953 950 err = 0; 954 - #ifdef CONFIG_ARPD 955 951 notify = old & NUD_VALID; 956 - #endif 957 952 goto out; 958 953 } 959 954 ··· 1023 1022 if (!(new & NUD_CONNECTED)) 1024 1023 neigh->confirmed = jiffies - 1025 1024 (neigh->parms->base_reachable_time << 1); 1026 - #ifdef CONFIG_ARPD 1027 1025 notify = 1; 1028 - #endif 1029 1026 } 1030 1027 if (new == old) 1031 1028 goto out; ··· 1055 1056 (neigh->flags & ~NTF_ROUTER); 1056 1057 } 1057 1058 write_unlock_bh(&neigh->lock); 1059 + 1060 + if (notify) 1061 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh); 1058 1062 #ifdef CONFIG_ARPD 1059 1063 if (notify && neigh->parms->app_probes) 1060 1064 neigh_app_notify(neigh);
+69
net/core/netevent.c
··· 1 + /* 2 + * Network event notifiers 3 + * 4 + * Authors: 5 + * Tom Tucker <tom@opengridcomputing.com> 6 + * Steve Wise <swise@opengridcomputing.com> 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the License, or (at your option) any later version. 12 + * 13 + * Fixes: 14 + */ 15 + 16 + #include <linux/rtnetlink.h> 17 + #include <linux/notifier.h> 18 + 19 + static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); 20 + 21 + /** 22 + * register_netevent_notifier - register a netevent notifier block 23 + * @nb: notifier 24 + * 25 + * Register a notifier to be called when a netevent occurs. 26 + * The notifier passed is linked into the kernel structures and must 27 + * not be reused until it has been unregistered. A negative errno code 28 + * is returned on a failure. 29 + */ 30 + int register_netevent_notifier(struct notifier_block *nb) 31 + { 32 + int err; 33 + 34 + err = atomic_notifier_chain_register(&netevent_notif_chain, nb); 35 + return err; 36 + } 37 + 38 + /** 39 + * netevent_unregister_notifier - unregister a netevent notifier block 40 + * @nb: notifier 41 + * 42 + * Unregister a notifier previously registered by 43 + * register_neigh_notifier(). The notifier is unlinked into the 44 + * kernel structures and may then be reused. A negative errno code 45 + * is returned on a failure. 46 + */ 47 + 48 + int unregister_netevent_notifier(struct notifier_block *nb) 49 + { 50 + return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); 51 + } 52 + 53 + /** 54 + * call_netevent_notifiers - call all netevent notifier blocks 55 + * @val: value passed unmodified to notifier function 56 + * @v: pointer passed unmodified to notifier function 57 + * 58 + * Call all neighbour notifier blocks. Parameters and return value 59 + * are as for notifier_call_chain(). 60 + */ 61 + 62 + int call_netevent_notifiers(unsigned long val, void *v) 63 + { 64 + return atomic_notifier_call_chain(&netevent_notif_chain, val, v); 65 + } 66 + 67 + EXPORT_SYMBOL_GPL(register_netevent_notifier); 68 + EXPORT_SYMBOL_GPL(unregister_netevent_notifier); 69 + EXPORT_SYMBOL_GPL(call_netevent_notifiers);
+34 -11
net/core/skbuff.c
··· 71 71 static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72 72 73 73 /* 74 - * lockdep: lock class key used by skb_queue_head_init(): 75 - */ 76 - struct lock_class_key skb_queue_lock_key; 77 - 78 - EXPORT_SYMBOL(skb_queue_lock_key); 79 - 80 - /* 81 74 * Keep out-of-line to prevent kernel bloat. 82 75 * __builtin_return_address is not used because it is not always 83 76 * reliable. ··· 249 256 goto out; 250 257 } 251 258 259 + /** 260 + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 261 + * @dev: network device to receive on 262 + * @length: length to allocate 263 + * @gfp_mask: get_free_pages mask, passed to alloc_skb 264 + * 265 + * Allocate a new &sk_buff and assign it a usage count of one. The 266 + * buffer has unspecified headroom built in. Users should allocate 267 + * the headroom they think they need without accounting for the 268 + * built in space. The built in space is used for optimisations. 269 + * 270 + * %NULL is returned if there is no free memory. 271 + */ 272 + struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 273 + unsigned int length, gfp_t gfp_mask) 274 + { 275 + struct sk_buff *skb; 276 + 277 + skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 278 + if (likely(skb)) 279 + skb_reserve(skb, NET_SKB_PAD); 280 + return skb; 281 + } 252 282 253 283 static void skb_drop_list(struct sk_buff **listp) 254 284 { ··· 862 846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 863 847 return err; 864 848 865 - for (i = 0; i < nfrags; i++) { 849 + i = 0; 850 + if (offset >= len) 851 + goto drop_pages; 852 + 853 + for (; i < nfrags; i++) { 866 854 int end = offset + skb_shinfo(skb)->frags[i].size; 867 855 868 856 if (end < len) { ··· 874 854 continue; 875 855 } 876 856 877 - if (len > offset) 878 - skb_shinfo(skb)->frags[i++].size = len - offset; 857 + skb_shinfo(skb)->frags[i++].size = len - offset; 879 858 859 + drop_pages: 880 860 skb_shinfo(skb)->nr_frags = i; 881 861 882 862 for (; i < nfrags; i++) ··· 884 864 885 865 if (skb_shinfo(skb)->frag_list) 886 866 skb_drop_fraglist(skb); 887 - break; 867 + goto done; 888 868 } 889 869 890 870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ··· 899 879 return -ENOMEM; 900 880 901 881 nfrag->next = frag->next; 882 + kfree_skb(frag); 902 883 frag = nfrag; 903 884 *fragp = frag; 904 885 } ··· 918 897 break; 919 898 } 920 899 900 + done: 921 901 if (len > skb_headlen(skb)) { 922 902 skb->data_len -= skb->len - len; 923 903 skb->len = len; ··· 2064 2042 EXPORT_SYMBOL(kfree_skb); 2065 2043 EXPORT_SYMBOL(__pskb_pull_tail); 2066 2044 EXPORT_SYMBOL(__alloc_skb); 2045 + EXPORT_SYMBOL(__netdev_alloc_skb); 2067 2046 EXPORT_SYMBOL(pskb_copy); 2068 2047 EXPORT_SYMBOL(pskb_expand_head); 2069 2048 EXPORT_SYMBOL(skb_checksum);
+2 -2
net/dccp/ipv6.c
··· 230 230 ipv6_addr_copy(&np->saddr, saddr); 231 231 inet->rcv_saddr = LOOPBACK4_IPV6; 232 232 233 - ip6_dst_store(sk, dst, NULL); 233 + __ip6_dst_store(sk, dst, NULL); 234 234 235 235 icsk->icsk_ext_hdr_len = 0; 236 236 if (np->opt != NULL) ··· 863 863 * comment in that function for the gory details. -acme 864 864 */ 865 865 866 - ip6_dst_store(newsk, dst, NULL); 866 + __ip6_dst_store(newsk, dst, NULL); 867 867 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 868 868 NETIF_F_TSO); 869 869 newdp6 = (struct dccp6_sock *)newsk;
+7 -2
net/decnet/dn_route.c
··· 925 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 926 926 if (!dev_out->dn_ptr) 927 927 continue; 928 - if (dn_dev_islocal(dev_out, oldflp->fld_src)) 929 - break; 928 + if (!dn_dev_islocal(dev_out, oldflp->fld_src)) 929 + continue; 930 + if ((dev_out->flags & IFF_LOOPBACK) && 931 + oldflp->fld_dst && 932 + !dn_dev_islocal(dev_out, oldflp->fld_dst)) 933 + continue; 934 + break; 930 935 } 931 936 read_unlock(&dev_base_lock); 932 937 if (dev_out == NULL)
+4 -3
net/ipv4/ip_output.c
··· 526 526 527 527 err = output(skb); 528 528 529 + if (!err) 530 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 529 531 if (err || !frag) 530 532 break; 531 533 ··· 651 649 /* 652 650 * Put this fragment into the sending queue. 653 651 */ 654 - 655 - IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 656 - 657 652 iph->tot_len = htons(len + hlen); 658 653 659 654 ip_send_check(iph); ··· 658 659 err = output(skb2); 659 660 if (err) 660 661 goto fail; 662 + 663 + IP_INC_STATS(IPSTATS_MIB_FRAGCREATES); 661 664 } 662 665 kfree_skb(skb); 663 666 IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
+7 -2
net/ipv4/ip_sockglue.c
··· 112 112 static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb) 113 113 { 114 114 char *secdata; 115 - u32 seclen; 115 + u32 seclen, secid; 116 116 int err; 117 117 118 - err = security_socket_getpeersec_dgram(skb, &secdata, &seclen); 118 + err = security_socket_getpeersec_dgram(NULL, skb, &secid); 119 + if (err) 120 + return; 121 + 122 + err = security_secid_to_secctx(secid, &secdata, &seclen); 119 123 if (err) 120 124 return; 121 125 122 126 put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata); 127 + security_release_secctx(secdata, seclen); 123 128 } 124 129 125 130
+1 -1
net/ipv4/netfilter/ip_conntrack_sip.c
··· 442 442 sip[i].tuple.src.u.udp.port = htons(ports[i]); 443 443 sip[i].mask.src.u.udp.port = 0xFFFF; 444 444 sip[i].mask.dst.protonum = 0xFF; 445 - sip[i].max_expected = 1; 445 + sip[i].max_expected = 2; 446 446 sip[i].timeout = 3 * 60; /* 3 minutes */ 447 447 sip[i].me = THIS_MODULE; 448 448 sip[i].help = sip_help;
+3
net/ipv4/netfilter/ipt_hashlimit.c
··· 508 508 if (!r->cfg.expire) 509 509 return 0; 510 510 511 + if (r->name[sizeof(r->name) - 1] != '\0') 512 + return 0; 513 + 511 514 /* This is the best we've got: We cannot release and re-grab lock, 512 515 * since checkentry() is called before ip_tables.c grabs ipt_mutex. 513 516 * We also cannot grab the hashtable spinlock, since htable_create will
+8
net/ipv4/route.c
··· 104 104 #include <net/icmp.h> 105 105 #include <net/xfrm.h> 106 106 #include <net/ip_mp_alg.h> 107 + #include <net/netevent.h> 107 108 #ifdef CONFIG_SYSCTL 108 109 #include <linux/sysctl.h> 109 110 #endif ··· 1126 1125 struct rtable *rth, **rthp; 1127 1126 u32 skeys[2] = { saddr, 0 }; 1128 1127 int ikeys[2] = { dev->ifindex, 0 }; 1128 + struct netevent_redirect netevent; 1129 1129 1130 1130 if (!in_dev) 1131 1131 return; ··· 1218 1216 rt_drop(rt); 1219 1217 goto do_next; 1220 1218 } 1219 + 1220 + netevent.old = &rth->u.dst; 1221 + netevent.new = &rt->u.dst; 1222 + call_netevent_notifiers(NETEVENT_REDIRECT, 1223 + &netevent); 1221 1224 1222 1225 rt_del(hash, rth); 1223 1226 if (!rt_intern_hash(hash, rt, &rt)) ··· 1459 1452 } 1460 1453 dst->metrics[RTAX_MTU-1] = mtu; 1461 1454 dst_set_expires(dst, ip_rt_mtu_expires); 1455 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 1462 1456 } 1463 1457 } 1464 1458
+3 -2
net/ipv4/tcp.c
··· 1132 1132 tp->ucopy.dma_chan = NULL; 1133 1133 preempt_disable(); 1134 1134 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1135 - !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) { 1135 + !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) { 1136 1136 preempt_enable_no_resched(); 1137 1137 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len); 1138 1138 } else ··· 1659 1659 const int tmo = tcp_fin_time(sk); 1660 1660 1661 1661 if (tmo > TCP_TIMEWAIT_LEN) { 1662 - inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1662 + inet_csk_reset_keepalive_timer(sk, 1663 + tmo - TCP_TIMEWAIT_LEN); 1663 1664 } else { 1664 1665 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 1665 1666 goto out;
-2
net/ipv4/tcp_ipv4.c
··· 438 438 It can f.e. if SYNs crossed. 439 439 */ 440 440 if (!sock_owned_by_user(sk)) { 441 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 442 441 sk->sk_err = err; 443 442 444 443 sk->sk_error_report(sk); ··· 873 874 drop_and_free: 874 875 reqsk_free(req); 875 876 drop: 876 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 877 877 return 0; 878 878 } 879 879
+3 -1
net/ipv4/tcp_minisocks.c
··· 589 589 /* RFC793: "second check the RST bit" and 590 590 * "fourth, check the SYN bit" 591 591 */ 592 - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 592 + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { 593 + TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 593 594 goto embryonic_reset; 595 + } 594 596 595 597 /* ACK sequence verified above, just make sure ACK is 596 598 * set. If ACK not set, just silently drop the packet.
+1 -1
net/ipv4/tcp_probe.c
··· 114 114 static ssize_t tcpprobe_read(struct file *file, char __user *buf, 115 115 size_t len, loff_t *ppos) 116 116 { 117 - int error = 0, cnt; 117 + int error = 0, cnt = 0; 118 118 unsigned char *tbuf; 119 119 120 120 if (!buf || len < 0)
+166 -8
net/ipv6/addrconf.c
··· 1869 1869 /* 1870 1870 * Manual configuration of address on an interface 1871 1871 */ 1872 - static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen) 1872 + static int inet6_addr_add(int ifindex, struct in6_addr *pfx, int plen, 1873 + __u32 prefered_lft, __u32 valid_lft) 1873 1874 { 1874 1875 struct inet6_ifaddr *ifp; 1875 1876 struct inet6_dev *idev; 1876 1877 struct net_device *dev; 1878 + __u8 ifa_flags = 0; 1877 1879 int scope; 1878 1880 1879 1881 ASSERT_RTNL(); 1880 1882 1883 + /* check the lifetime */ 1884 + if (!valid_lft || prefered_lft > valid_lft) 1885 + return -EINVAL; 1886 + 1881 1887 if ((dev = __dev_get_by_index(ifindex)) == NULL) 1882 1888 return -ENODEV; 1883 1889 ··· 1895 1889 1896 1890 scope = ipv6_addr_scope(pfx); 1897 1891 1898 - ifp = ipv6_add_addr(idev, pfx, plen, scope, IFA_F_PERMANENT); 1892 + if (valid_lft == INFINITY_LIFE_TIME) 1893 + ifa_flags |= IFA_F_PERMANENT; 1894 + else if (valid_lft >= 0x7FFFFFFF/HZ) 1895 + valid_lft = 0x7FFFFFFF/HZ; 1896 + 1897 + if (prefered_lft == 0) 1898 + ifa_flags |= IFA_F_DEPRECATED; 1899 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 1900 + (prefered_lft != INFINITY_LIFE_TIME)) 1901 + prefered_lft = 0x7FFFFFFF/HZ; 1902 + 1903 + ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 1904 + 1899 1905 if (!IS_ERR(ifp)) { 1906 + spin_lock(&ifp->lock); 1907 + ifp->valid_lft = valid_lft; 1908 + ifp->prefered_lft = prefered_lft; 1909 + ifp->tstamp = jiffies; 1910 + spin_unlock(&ifp->lock); 1911 + 1900 1912 addrconf_dad_start(ifp, 0); 1901 1913 in6_ifa_put(ifp); 1914 + addrconf_verify(0); 1902 1915 return 0; 1903 1916 } 1904 1917 ··· 1970 1945 return -EFAULT; 1971 1946 1972 1947 rtnl_lock(); 1973 - err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen); 1948 + err = inet6_addr_add(ireq.ifr6_ifindex, &ireq.ifr6_addr, ireq.ifr6_prefixlen, 1949 + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); 1974 1950 rtnl_unlock(); 1975 1951 return err; 1976 1952 } ··· 2797 2771 ifp->idev->nd_parms->retrans_time / HZ; 2798 2772 #endif 2799 2773 2800 - if (age >= ifp->valid_lft) { 2774 + if (ifp->valid_lft != INFINITY_LIFE_TIME && 2775 + age >= ifp->valid_lft) { 2801 2776 spin_unlock(&ifp->lock); 2802 2777 in6_ifa_hold(ifp); 2803 2778 read_unlock(&addrconf_hash_lock); 2804 2779 ipv6_del_addr(ifp); 2805 2780 goto restart; 2781 + } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 2782 + spin_unlock(&ifp->lock); 2783 + continue; 2806 2784 } else if (age >= ifp->prefered_lft) { 2807 2785 /* jiffies - ifp->tsamp > age >= ifp->prefered_lft */ 2808 2786 int deprecate = 0; ··· 2883 2853 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2884 2854 } 2885 2855 if (rta[IFA_LOCAL-1]) { 2886 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2856 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2857 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2887 2858 return -EINVAL; 2888 2859 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2889 2860 } ··· 2895 2864 } 2896 2865 2897 2866 static int 2867 + inet6_addr_modify(int ifindex, struct in6_addr *pfx, 2868 + __u32 prefered_lft, __u32 valid_lft) 2869 + { 2870 + struct inet6_ifaddr *ifp = NULL; 2871 + struct net_device *dev; 2872 + int ifa_flags = 0; 2873 + 2874 + if ((dev = __dev_get_by_index(ifindex)) == NULL) 2875 + return -ENODEV; 2876 + 2877 + if (!(dev->flags&IFF_UP)) 2878 + return -ENETDOWN; 2879 + 2880 + if (!valid_lft || (prefered_lft > valid_lft)) 2881 + return -EINVAL; 2882 + 2883 + ifp = ipv6_get_ifaddr(pfx, dev, 1); 2884 + if (ifp == NULL) 2885 + return -ENOENT; 2886 + 2887 + if (valid_lft == INFINITY_LIFE_TIME) 2888 + ifa_flags = IFA_F_PERMANENT; 2889 + else if (valid_lft >= 0x7FFFFFFF/HZ) 2890 + valid_lft = 0x7FFFFFFF/HZ; 2891 + 2892 + if (prefered_lft == 0) 2893 + ifa_flags = IFA_F_DEPRECATED; 2894 + else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2895 + (prefered_lft != INFINITY_LIFE_TIME)) 2896 + prefered_lft = 0x7FFFFFFF/HZ; 2897 + 2898 + spin_lock_bh(&ifp->lock); 2899 + ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED|IFA_F_PERMANENT)) | ifa_flags; 2900 + 2901 + ifp->tstamp = jiffies; 2902 + ifp->valid_lft = valid_lft; 2903 + ifp->prefered_lft = prefered_lft; 2904 + 2905 + spin_unlock_bh(&ifp->lock); 2906 + if (!(ifp->flags&IFA_F_TENTATIVE)) 2907 + ipv6_ifa_notify(0, ifp); 2908 + in6_ifa_put(ifp); 2909 + 2910 + addrconf_verify(0); 2911 + 2912 + return 0; 2913 + } 2914 + 2915 + static int 2898 2916 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2899 2917 { 2900 2918 struct rtattr **rta = arg; 2901 2919 struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 2902 2920 struct in6_addr *pfx; 2921 + __u32 valid_lft = INFINITY_LIFE_TIME, prefered_lft = INFINITY_LIFE_TIME; 2903 2922 2904 2923 pfx = NULL; 2905 2924 if (rta[IFA_ADDRESS-1]) { ··· 2958 2877 pfx = RTA_DATA(rta[IFA_ADDRESS-1]); 2959 2878 } 2960 2879 if (rta[IFA_LOCAL-1]) { 2961 - if (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx))) 2880 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*pfx) || 2881 + (pfx && memcmp(pfx, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*pfx)))) 2962 2882 return -EINVAL; 2963 2883 pfx = RTA_DATA(rta[IFA_LOCAL-1]); 2964 2884 } 2965 2885 if (pfx == NULL) 2966 2886 return -EINVAL; 2967 2887 2968 - return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen); 2888 + if (rta[IFA_CACHEINFO-1]) { 2889 + struct ifa_cacheinfo *ci; 2890 + if (RTA_PAYLOAD(rta[IFA_CACHEINFO-1]) < sizeof(*ci)) 2891 + return -EINVAL; 2892 + ci = RTA_DATA(rta[IFA_CACHEINFO-1]); 2893 + valid_lft = ci->ifa_valid; 2894 + prefered_lft = ci->ifa_prefered; 2895 + } 2896 + 2897 + if (nlh->nlmsg_flags & NLM_F_REPLACE) { 2898 + int ret; 2899 + ret = inet6_addr_modify(ifm->ifa_index, pfx, 2900 + prefered_lft, valid_lft); 2901 + if (ret == 0 || !(nlh->nlmsg_flags & NLM_F_CREATE)) 2902 + return ret; 2903 + } 2904 + 2905 + return inet6_addr_add(ifm->ifa_index, pfx, ifm->ifa_prefixlen, 2906 + prefered_lft, valid_lft); 2907 + 2969 2908 } 2970 2909 2971 2910 /* Maximum length of ifa_cacheinfo attributes */ ··· 3220 3119 { 3221 3120 enum addr_type_t type = ANYCAST_ADDR; 3222 3121 return inet6_dump_addr(skb, cb, type); 3122 + } 3123 + 3124 + static int inet6_rtm_getaddr(struct sk_buff *in_skb, 3125 + struct nlmsghdr* nlh, void *arg) 3126 + { 3127 + struct rtattr **rta = arg; 3128 + struct ifaddrmsg *ifm = NLMSG_DATA(nlh); 3129 + struct in6_addr *addr = NULL; 3130 + struct net_device *dev = NULL; 3131 + struct inet6_ifaddr *ifa; 3132 + struct sk_buff *skb; 3133 + int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + INET6_IFADDR_RTA_SPACE); 3134 + int err; 3135 + 3136 + if (rta[IFA_ADDRESS-1]) { 3137 + if (RTA_PAYLOAD(rta[IFA_ADDRESS-1]) < sizeof(*addr)) 3138 + return -EINVAL; 3139 + addr = RTA_DATA(rta[IFA_ADDRESS-1]); 3140 + } 3141 + if (rta[IFA_LOCAL-1]) { 3142 + if (RTA_PAYLOAD(rta[IFA_LOCAL-1]) < sizeof(*addr) || 3143 + (addr && memcmp(addr, RTA_DATA(rta[IFA_LOCAL-1]), sizeof(*addr)))) 3144 + return -EINVAL; 3145 + addr = RTA_DATA(rta[IFA_LOCAL-1]); 3146 + } 3147 + if (addr == NULL) 3148 + return -EINVAL; 3149 + 3150 + if (ifm->ifa_index) 3151 + dev = __dev_get_by_index(ifm->ifa_index); 3152 + 3153 + if ((ifa = ipv6_get_ifaddr(addr, dev, 1)) == NULL) 3154 + return -EADDRNOTAVAIL; 3155 + 3156 + if ((skb = alloc_skb(size, GFP_KERNEL)) == NULL) { 3157 + err = -ENOBUFS; 3158 + goto out; 3159 + } 3160 + 3161 + NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 3162 + err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid, 3163 + nlh->nlmsg_seq, RTM_NEWADDR, 0); 3164 + if (err < 0) { 3165 + err = -EMSGSIZE; 3166 + goto out_free; 3167 + } 3168 + 3169 + err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 3170 + if (err > 0) 3171 + err = 0; 3172 + out: 3173 + in6_ifa_put(ifa); 3174 + return err; 3175 + out_free: 3176 + kfree_skb(skb); 3177 + goto out; 3223 3178 } 3224 3179 3225 3180 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa) ··· 3520 3363 [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, 3521 3364 [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, 3522 3365 [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, 3523 - [RTM_GETADDR - RTM_BASE] = { .dumpit = inet6_dump_ifaddr, }, 3366 + [RTM_GETADDR - RTM_BASE] = { .doit = inet6_rtm_getaddr, 3367 + .dumpit = inet6_dump_ifaddr, }, 3524 3368 [RTM_GETMULTICAST - RTM_BASE] = { .dumpit = inet6_dump_ifmcaddr, }, 3525 3369 [RTM_GETANYCAST - RTM_BASE] = { .dumpit = inet6_dump_ifacaddr, }, 3526 3370 [RTM_NEWROUTE - RTM_BASE] = { .doit = inet6_rtm_newroute, },
+1 -1
net/ipv6/af_inet6.c
··· 658 658 return err; 659 659 } 660 660 661 - ip6_dst_store(sk, dst, NULL); 661 + __ip6_dst_store(sk, dst, NULL); 662 662 } 663 663 664 664 return 0;
+1 -1
net/ipv6/inet6_connection_sock.c
··· 185 185 return err; 186 186 } 187 187 188 - ip6_dst_store(sk, dst, NULL); 188 + __ip6_dst_store(sk, dst, NULL); 189 189 } 190 190 191 191 skb->dst = dst_clone(dst);
+87 -42
net/ipv6/ip6_output.c
··· 356 356 skb->dev = dst->dev; 357 357 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 358 358 0, skb->dev); 359 + IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS); 359 360 360 361 kfree_skb(skb); 361 362 return -ETIMEDOUT; ··· 596 595 } 597 596 598 597 err = output(skb); 598 + if(!err) 599 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 600 + 599 601 if (err || !frag) 600 602 break; 601 603 ··· 710 706 /* 711 707 * Put this fragment into the sending queue. 712 708 */ 713 - 714 - IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 715 - 716 709 err = output(frag); 717 710 if (err) 718 711 goto fail; 712 + 713 + IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); 719 714 } 720 715 kfree_skb(skb); 721 716 IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); ··· 726 723 return err; 727 724 } 728 725 729 - int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 726 + static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 727 + struct dst_entry *dst, 728 + struct flowi *fl) 730 729 { 731 - int err = 0; 730 + struct ipv6_pinfo *np = inet6_sk(sk); 731 + struct rt6_info *rt = (struct rt6_info *)dst; 732 732 733 - *dst = NULL; 734 - if (sk) { 735 - struct ipv6_pinfo *np = inet6_sk(sk); 736 - 737 - *dst = sk_dst_check(sk, np->dst_cookie); 738 - if (*dst) { 739 - struct rt6_info *rt = (struct rt6_info*)*dst; 740 - 741 - /* Yes, checking route validity in not connected 742 - * case is not very simple. Take into account, 743 - * that we do not support routing by source, TOS, 744 - * and MSG_DONTROUTE --ANK (980726) 745 - * 746 - * 1. If route was host route, check that 747 - * cached destination is current. 748 - * If it is network route, we still may 749 - * check its validity using saved pointer 750 - * to the last used address: daddr_cache. 751 - * We do not want to save whole address now, 752 - * (because main consumer of this service 753 - * is tcp, which has not this problem), 754 - * so that the last trick works only on connected 755 - * sockets. 756 - * 2. oif also should be the same. 757 - */ 758 - if (((rt->rt6i_dst.plen != 128 || 759 - !ipv6_addr_equal(&fl->fl6_dst, 760 - &rt->rt6i_dst.addr)) 761 - && (np->daddr_cache == NULL || 762 - !ipv6_addr_equal(&fl->fl6_dst, 763 - np->daddr_cache))) 764 - || (fl->oif && fl->oif != (*dst)->dev->ifindex)) { 765 - dst_release(*dst); 766 - *dst = NULL; 767 - } 768 - } 733 + if (!dst) 734 + goto out; 735 + 736 + /* Yes, checking route validity in not connected 737 + * case is not very simple. Take into account, 738 + * that we do not support routing by source, TOS, 739 + * and MSG_DONTROUTE --ANK (980726) 740 + * 741 + * 1. If route was host route, check that 742 + * cached destination is current. 743 + * If it is network route, we still may 744 + * check its validity using saved pointer 745 + * to the last used address: daddr_cache. 746 + * We do not want to save whole address now, 747 + * (because main consumer of this service 748 + * is tcp, which has not this problem), 749 + * so that the last trick works only on connected 750 + * sockets. 751 + * 2. oif also should be the same. 752 + */ 753 + if (((rt->rt6i_dst.plen != 128 || 754 + !ipv6_addr_equal(&fl->fl6_dst, &rt->rt6i_dst.addr)) 755 + && (np->daddr_cache == NULL || 756 + !ipv6_addr_equal(&fl->fl6_dst, np->daddr_cache))) 757 + || (fl->oif && fl->oif != dst->dev->ifindex)) { 758 + dst_release(dst); 759 + dst = NULL; 769 760 } 761 + 762 + out: 763 + return dst; 764 + } 765 + 766 + static int ip6_dst_lookup_tail(struct sock *sk, 767 + struct dst_entry **dst, struct flowi *fl) 768 + { 769 + int err; 770 770 771 771 if (*dst == NULL) 772 772 *dst = ip6_route_output(sk, fl); ··· 779 773 780 774 if (ipv6_addr_any(&fl->fl6_src)) { 781 775 err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); 782 - 783 776 if (err) 784 777 goto out_err_release; 785 778 } ··· 791 786 return err; 792 787 } 793 788 789 + /** 790 + * ip6_dst_lookup - perform route lookup on flow 791 + * @sk: socket which provides route info 792 + * @dst: pointer to dst_entry * for result 793 + * @fl: flow to lookup 794 + * 795 + * This function performs a route lookup on the given flow. 796 + * 797 + * It returns zero on success, or a standard errno code on error. 798 + */ 799 + int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 800 + { 801 + *dst = NULL; 802 + return ip6_dst_lookup_tail(sk, dst, fl); 803 + } 794 804 EXPORT_SYMBOL_GPL(ip6_dst_lookup); 805 + 806 + /** 807 + * ip6_sk_dst_lookup - perform socket cached route lookup on flow 808 + * @sk: socket which provides the dst cache and route info 809 + * @dst: pointer to dst_entry * for result 810 + * @fl: flow to lookup 811 + * 812 + * This function performs a route lookup on the given flow with the 813 + * possibility of using the cached route in the socket if it is valid. 814 + * It will take the socket dst lock when operating on the dst cache. 815 + * As a result, this function can only be used in process context. 816 + * 817 + * It returns zero on success, or a standard errno code on error. 818 + */ 819 + int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) 820 + { 821 + *dst = NULL; 822 + if (sk) { 823 + *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 824 + *dst = ip6_sk_dst_check(sk, *dst, fl); 825 + } 826 + 827 + return ip6_dst_lookup_tail(sk, dst, fl); 828 + } 829 + EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); 795 830 796 831 static inline int ip6_ufo_append_data(struct sock *sk, 797 832 int getfrag(void *from, char *to, int offset, int len,
+7
net/ipv6/route.c
··· 53 53 #include <linux/rtnetlink.h> 54 54 #include <net/dst.h> 55 55 #include <net/xfrm.h> 56 + #include <net/netevent.h> 56 57 57 58 #include <asm/uaccess.h> 58 59 ··· 743 742 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 744 743 } 745 744 dst->metrics[RTAX_MTU-1] = mtu; 745 + call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst); 746 746 } 747 747 } 748 748 ··· 1157 1155 struct rt6_info *rt, *nrt = NULL; 1158 1156 int strict; 1159 1157 struct fib6_node *fn; 1158 + struct netevent_redirect netevent; 1160 1159 1161 1160 /* 1162 1161 * Get the "current" route for this destination and ··· 1254 1251 1255 1252 if (ip6_ins_rt(nrt, NULL, NULL, NULL)) 1256 1253 goto out; 1254 + 1255 + netevent.old = &rt->u.dst; 1256 + netevent.new = &nrt->u.dst; 1257 + call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1257 1258 1258 1259 if (rt->rt6i_flags&RTF_CACHE) { 1259 1260 ip6_del_rt(rt, NULL, NULL, NULL);
+2 -4
net/ipv6/tcp_ipv6.c
··· 270 270 inet->rcv_saddr = LOOPBACK4_IPV6; 271 271 272 272 sk->sk_gso_type = SKB_GSO_TCPV6; 273 - ip6_dst_store(sk, dst, NULL); 273 + __ip6_dst_store(sk, dst, NULL); 274 274 275 275 icsk->icsk_ext_hdr_len = 0; 276 276 if (np->opt) ··· 427 427 case TCP_SYN_RECV: /* Cannot happen. 428 428 It can, it SYNs are crossed. --ANK */ 429 429 if (!sock_owned_by_user(sk)) { 430 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 431 430 sk->sk_err = err; 432 431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ 433 432 ··· 830 831 if (req) 831 832 reqsk_free(req); 832 833 833 - TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS); 834 834 return 0; /* don't send reset */ 835 835 } 836 836 ··· 945 947 */ 946 948 947 949 sk->sk_gso_type = SKB_GSO_TCPV6; 948 - ip6_dst_store(newsk, dst, NULL); 950 + __ip6_dst_store(newsk, dst, NULL); 949 951 950 952 newtcp6sk = (struct tcp6_sock *)newsk; 951 953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
+1 -1
net/ipv6/udp.c
··· 782 782 connected = 0; 783 783 } 784 784 785 - err = ip6_dst_lookup(sk, &dst, fl); 785 + err = ip6_sk_dst_lookup(sk, &dst, fl); 786 786 if (err) 787 787 goto out; 788 788 if (final_p)
+1 -1
net/ipv6/xfrm6_output.c
··· 125 125 if (!skb_is_gso(skb)) 126 126 return xfrm6_output_finish2(skb); 127 127 128 - skb->protocol = htons(ETH_P_IP); 128 + skb->protocol = htons(ETH_P_IPV6); 129 129 segs = skb_gso_segment(skb, 0); 130 130 kfree_skb(skb); 131 131 if (unlikely(IS_ERR(segs)))
+2
net/netfilter/xt_SECMARK.c
··· 57 57 { 58 58 int err; 59 59 struct xt_secmark_target_selinux_info *sel = &info->u.sel; 60 + 61 + sel->selctx[SECMARK_SELCTX_MAX - 1] = '\0'; 60 62 61 63 err = selinux_string_to_sid(sel->selctx, &sel->selsid); 62 64 if (err) {
+4 -1
net/netfilter/xt_string.c
··· 55 55 /* Damn, can't handle this case properly with iptables... */ 56 56 if (conf->from_offset > conf->to_offset) 57 57 return 0; 58 - 58 + if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') 59 + return 0; 60 + if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) 61 + return 0; 59 62 ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, 60 63 GFP_KERNEL, TS_AUTOLOAD); 61 64 if (IS_ERR(ts_conf))
+5 -12
net/unix/af_unix.c
··· 128 128 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 129 129 130 130 #ifdef CONFIG_SECURITY_NETWORK 131 - static void unix_get_peersec_dgram(struct sk_buff *skb) 131 + static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 132 132 { 133 - int err; 134 - 135 - err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb), 136 - UNIXSECLEN(skb)); 137 - if (err) 138 - *(UNIXSECDATA(skb)) = NULL; 133 + memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); 139 134 } 140 135 141 136 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 142 137 { 143 - scm->secdata = *UNIXSECDATA(skb); 144 - scm->seclen = *UNIXSECLEN(skb); 138 + scm->secid = *UNIXSID(skb); 145 139 } 146 140 #else 147 - static inline void unix_get_peersec_dgram(struct sk_buff *skb) 141 + static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) 148 142 { } 149 143 150 144 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) ··· 1316 1322 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1317 1323 if (siocb->scm->fp) 1318 1324 unix_attach_fds(siocb->scm, skb); 1319 - 1320 - unix_get_peersec_dgram(skb); 1325 + unix_get_secdata(siocb->scm, skb); 1321 1326 1322 1327 skb->h.raw = skb->data; 1323 1328 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+12 -2
security/dummy.c
··· 791 791 return -ENOPROTOOPT; 792 792 } 793 793 794 - static int dummy_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, 795 - u32 *seclen) 794 + static int dummy_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 796 795 { 797 796 return -ENOPROTOOPT; 798 797 } ··· 873 874 static int dummy_setprocattr(struct task_struct *p, char *name, void *value, size_t size) 874 875 { 875 876 return -EINVAL; 877 + } 878 + 879 + static int dummy_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 880 + { 881 + return -EOPNOTSUPP; 882 + } 883 + 884 + static void dummy_release_secctx(char *secdata, u32 seclen) 885 + { 876 886 } 877 887 878 888 #ifdef CONFIG_KEYS ··· 1036 1028 set_to_dummy_if_null(ops, d_instantiate); 1037 1029 set_to_dummy_if_null(ops, getprocattr); 1038 1030 set_to_dummy_if_null(ops, setprocattr); 1031 + set_to_dummy_if_null(ops, secid_to_secctx); 1032 + set_to_dummy_if_null(ops, release_secctx); 1039 1033 #ifdef CONFIG_SECURITY_NETWORK 1040 1034 set_to_dummy_if_null(ops, unix_stream_connect); 1041 1035 set_to_dummy_if_null(ops, unix_may_send);
+24 -14
security/selinux/hooks.c
··· 3524 3524 return err; 3525 3525 } 3526 3526 3527 - static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3527 + static int selinux_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) 3528 3528 { 3529 + u32 peer_secid = SECSID_NULL; 3529 3530 int err = 0; 3530 - u32 peer_sid; 3531 3531 3532 - if (skb->sk->sk_family == PF_UNIX) 3533 - selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket), 3534 - &peer_sid); 3535 - else 3536 - peer_sid = selinux_socket_getpeer_dgram(skb); 3532 + if (sock && (sock->sk->sk_family == PF_UNIX)) 3533 + selinux_get_inode_sid(SOCK_INODE(sock), &peer_secid); 3534 + else if (skb) 3535 + peer_secid = selinux_socket_getpeer_dgram(skb); 3537 3536 3538 - if (peer_sid == SECSID_NULL) 3539 - return -EINVAL; 3537 + if (peer_secid == SECSID_NULL) 3538 + err = -EINVAL; 3539 + *secid = peer_secid; 3540 3540 3541 - err = security_sid_to_context(peer_sid, secdata, seclen); 3542 - if (err) 3543 - return err; 3544 - 3545 - return 0; 3541 + return err; 3546 3542 } 3547 3543 3548 3544 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) ··· 4403 4407 return size; 4404 4408 } 4405 4409 4410 + static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 4411 + { 4412 + return security_sid_to_context(secid, secdata, seclen); 4413 + } 4414 + 4415 + static void selinux_release_secctx(char *secdata, u32 seclen) 4416 + { 4417 + if (secdata) 4418 + kfree(secdata); 4419 + } 4420 + 4406 4421 #ifdef CONFIG_KEYS 4407 4422 4408 4423 static int selinux_key_alloc(struct key *k, struct task_struct *tsk, ··· 4593 4586 4594 4587 .getprocattr = selinux_getprocattr, 4595 4588 .setprocattr = selinux_setprocattr, 4589 + 4590 + .secid_to_secctx = selinux_secid_to_secctx, 4591 + .release_secctx = selinux_release_secctx, 4596 4592 4597 4593 .unix_stream_connect = selinux_socket_unix_stream_connect, 4598 4594 .unix_may_send = selinux_socket_unix_may_send,