Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[PKT_SCHED] sch_htb: use rb_first() cleanup
[RTNETLINK]: Fix use of wrong skb in do_getlink()
[DECNET]: Fix sfuzz hanging on 2.6.18
[NET]: Do not memcmp() over pad bytes of struct flowi.
[NET]: Introduce protocol-specific destructor for time-wait sockets.
[NET]: Use typesafe inet_twsk() inline function instead of cast.
[NET]: Use hton{l,s}() for non-initializers.
[TCP]: Use TCPOLEN_TSTAMP_ALIGNED macro instead of magic number.
[IPV6]: Seperate sit driver to extra module (addrconf.c changes)
[IPV6]: Seperate sit driver to extra module
[NET]: File descriptor loss while receiving SCM_RIGHTS
[SCTP]: Fix the RX queue size shown in /proc/net/sctp/assocs output.
[SCTP]: Fix receive buffer accounting.
SELinux: Bug fix in polidydb_destroy
IPsec: fix handling of errors for socket policies
IPsec: correct semantics for SELinux policy matching
IPsec: propagate security module errors up from flow_cache_lookup
NetLabel: use SECINITSID_UNLABELED for a base SID
NetLabel: fix a cache race condition

+409 -217
+9 -15
include/linux/security.h
··· 882 * Check permission when a flow selects a xfrm_policy for processing 883 * XFRMs on a packet. The hook is called when selecting either a 884 * per-socket policy or a generic xfrm policy. 885 - * Return 0 if permission is granted. 886 * @xfrm_state_pol_flow_match: 887 * @x contains the state to match. 888 * @xp contains the policy to check for a match. ··· 892 * @xfrm_flow_state_match: 893 * @fl contains the flow key to match. 894 * @xfrm points to the xfrm_state to match. 895 * Return 1 if there is a match. 896 * @xfrm_decode_session: 897 * @skb points to skb to decode. ··· 1390 int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 1391 int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, 1392 struct xfrm_policy *xp, struct flowi *fl); 1393 - int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm); 1394 int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); 1395 #endif /* CONFIG_SECURITY_NETWORK_XFRM */ 1396 ··· 3123 return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL); 3124 } 3125 3126 - static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk) 3127 - { 3128 - return security_ops->xfrm_policy_alloc_security(xp, NULL, sk); 3129 - } 3130 - 3131 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new) 3132 { 3133 return security_ops->xfrm_policy_clone_security(old, new); ··· 3173 return security_ops->xfrm_state_pol_flow_match(x, xp, fl); 3174 } 3175 3176 - static inline int security_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 3177 { 3178 - return security_ops->xfrm_flow_state_match(fl, xfrm); 3179 } 3180 3181 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) ··· 3192 } 3193 #else /* CONFIG_SECURITY_NETWORK_XFRM */ 3194 static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx) 3195 - { 3196 - return 0; 3197 - } 3198 - 3199 - static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk) 3200 { 3201 return 0; 3202 } ··· 3243 } 3244 3245 static inline int security_xfrm_flow_state_match(struct flowi *fl, 3246 - struct xfrm_state *xfrm) 3247 { 3248 return 1; 3249 }
··· 882 * Check permission when a flow selects a xfrm_policy for processing 883 * XFRMs on a packet. The hook is called when selecting either a 884 * per-socket policy or a generic xfrm policy. 885 + * Return 0 if permission is granted, -ESRCH otherwise, or -errno 886 + * on other errors. 887 * @xfrm_state_pol_flow_match: 888 * @x contains the state to match. 889 * @xp contains the policy to check for a match. ··· 891 * @xfrm_flow_state_match: 892 * @fl contains the flow key to match. 893 * @xfrm points to the xfrm_state to match. 894 + * @xp points to the xfrm_policy to match. 895 * Return 1 if there is a match. 896 * @xfrm_decode_session: 897 * @skb points to skb to decode. ··· 1388 int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 1389 int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, 1390 struct xfrm_policy *xp, struct flowi *fl); 1391 + int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm, 1392 + struct xfrm_policy *xp); 1393 int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); 1394 #endif /* CONFIG_SECURITY_NETWORK_XFRM */ 1395 ··· 3120 return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL); 3121 } 3122 3123 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new) 3124 { 3125 return security_ops->xfrm_policy_clone_security(old, new); ··· 3175 return security_ops->xfrm_state_pol_flow_match(x, xp, fl); 3176 } 3177 3178 + static inline int security_xfrm_flow_state_match(struct flowi *fl, 3179 + struct xfrm_state *xfrm, struct xfrm_policy *xp) 3180 { 3181 + return security_ops->xfrm_flow_state_match(fl, xfrm, xp); 3182 } 3183 3184 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) ··· 3193 } 3194 #else /* CONFIG_SECURITY_NETWORK_XFRM */ 3195 static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx) 3196 { 3197 return 0; 3198 } ··· 3249 } 3250 3251 static inline int security_xfrm_flow_state_match(struct flowi *fl, 3252 + struct xfrm_state *xfrm, struct xfrm_policy *xp) 3253 { 3254 return 1; 3255 }
+1 -1
include/net/flow.h
··· 97 #define FLOW_DIR_FWD 2 98 99 struct sock; 100 - typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, 101 void **objp, atomic_t **obj_refp); 102 103 extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
··· 97 #define FLOW_DIR_FWD 2 98 99 struct sock; 100 + typedef int (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, 101 void **objp, atomic_t **obj_refp); 102 103 extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
+1
include/net/inet_timewait_sock.h
··· 196 { 197 if (atomic_dec_and_test(&tw->tw_refcnt)) { 198 struct module *owner = tw->tw_prot->owner; 199 #ifdef SOCK_REFCNT_DEBUG 200 printk(KERN_DEBUG "%s timewait_sock %p released\n", 201 tw->tw_prot->name, tw);
··· 196 { 197 if (atomic_dec_and_test(&tw->tw_refcnt)) { 198 struct module *owner = tw->tw_prot->owner; 199 + twsk_destructor((struct sock *)tw); 200 #ifdef SOCK_REFCNT_DEBUG 201 printk(KERN_DEBUG "%s timewait_sock %p released\n", 202 tw->tw_prot->name, tw);
+47 -15
include/net/netlabel.h
··· 34 #include <linux/net.h> 35 #include <linux/skbuff.h> 36 #include <net/netlink.h> 37 38 /* 39 * NetLabel - A management interface for maintaining network packet label ··· 107 108 /* LSM security attributes */ 109 struct netlbl_lsm_cache { 110 void (*free) (const void *data); 111 void *data; 112 }; ··· 119 unsigned char *mls_cat; 120 size_t mls_cat_len; 121 122 - struct netlbl_lsm_cache cache; 123 }; 124 125 /* 126 * LSM security attribute operations 127 */ 128 129 130 /** 131 * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct ··· 182 /** 183 * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct 184 * @secattr: the struct to clear 185 - * @clear_cache: cache clear flag 186 * 187 * Description: 188 * Destroys the @secattr struct, including freeing all of the internal buffers. 189 - * If @clear_cache is true then free the cache fields, otherwise leave them 190 - * intact. The struct must be reset with a call to netlbl_secattr_init() 191 - * before reuse. 192 * 193 */ 194 - static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr, 195 - u32 clear_cache) 196 { 197 - if (clear_cache && secattr->cache.data != NULL && secattr->cache.free) 198 - secattr->cache.free(secattr->cache.data); 199 kfree(secattr->domain); 200 kfree(secattr->mls_cat); 201 } ··· 213 /** 214 * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct 215 * @secattr: the struct to free 216 - * @clear_cache: cache clear flag 217 * 218 * Description: 219 - * Frees @secattr including all of the internal buffers. If @clear_cache is 220 - * true then free the cache fields, otherwise leave them intact. 221 * 222 */ 223 - static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr, 224 - u32 clear_cache) 225 { 226 - netlbl_secattr_destroy(secattr, clear_cache); 227 kfree(secattr); 228 } 229
··· 34 #include <linux/net.h> 35 #include <linux/skbuff.h> 36 #include <net/netlink.h> 37 + #include <asm/atomic.h> 38 39 /* 40 * NetLabel - A management interface for maintaining network packet label ··· 106 107 /* LSM security attributes */ 108 struct netlbl_lsm_cache { 109 + atomic_t refcount; 110 void (*free) (const void *data); 111 void *data; 112 }; ··· 117 unsigned char *mls_cat; 118 size_t mls_cat_len; 119 120 + struct netlbl_lsm_cache *cache; 121 }; 122 123 /* 124 * LSM security attribute operations 125 */ 126 127 + 128 + /** 129 + * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache 130 + * @flags: the memory allocation flags 131 + * 132 + * Description: 133 + * Allocate and initialize a netlbl_lsm_cache structure. Returns a pointer 134 + * on success, NULL on failure. 135 + * 136 + */ 137 + static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(int flags) 138 + { 139 + struct netlbl_lsm_cache *cache; 140 + 141 + cache = kzalloc(sizeof(*cache), flags); 142 + if (cache) 143 + atomic_set(&cache->refcount, 1); 144 + return cache; 145 + } 146 + 147 + /** 148 + * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct 149 + * @cache: the struct to free 150 + * 151 + * Description: 152 + * Frees @secattr including all of the internal buffers. 153 + * 154 + */ 155 + static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) 156 + { 157 + if (!atomic_dec_and_test(&cache->refcount)) 158 + return; 159 + 160 + if (cache->free) 161 + cache->free(cache->data); 162 + kfree(cache); 163 + } 164 165 /** 166 * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct ··· 143 /** 144 * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct 145 * @secattr: the struct to clear 146 * 147 * Description: 148 * Destroys the @secattr struct, including freeing all of the internal buffers. 149 + * The struct must be reset with a call to netlbl_secattr_init() before reuse. 150 * 151 */ 152 + static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) 153 { 154 + if (secattr->cache) 155 + netlbl_secattr_cache_free(secattr->cache); 156 kfree(secattr->domain); 157 kfree(secattr->mls_cat); 158 } ··· 178 /** 179 * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct 180 * @secattr: the struct to free 181 * 182 * Description: 183 + * Frees @secattr including all of the internal buffers. 184 * 185 */ 186 + static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr) 187 { 188 + netlbl_secattr_destroy(secattr); 189 kfree(secattr); 190 } 191
+14
include/net/sctp/sctp.h
··· 139 void sctp_write_space(struct sock *sk); 140 unsigned int sctp_poll(struct file *file, struct socket *sock, 141 poll_table *wait); 142 143 /* 144 * sctp/primitive.c ··· 443 INIT_LIST_HEAD(result); 444 } 445 return result; 446 } 447 448 /* Tests if the list has one and only one entry. */
··· 139 void sctp_write_space(struct sock *sk); 140 unsigned int sctp_poll(struct file *file, struct socket *sock, 141 poll_table *wait); 142 + void sctp_sock_rfree(struct sk_buff *skb); 143 144 /* 145 * sctp/primitive.c ··· 442 INIT_LIST_HEAD(result); 443 } 444 return result; 445 + } 446 + 447 + /* SCTP version of skb_set_owner_r. We need this one because 448 + * of the way we have to do receive buffer accounting on bundled 449 + * chunks. 450 + */ 451 + static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 452 + { 453 + struct sctp_ulpevent *event = sctp_skb2event(skb); 454 + 455 + skb->sk = sk; 456 + skb->destructor = sctp_sock_rfree; 457 + atomic_add(event->rmem_len, &sk->sk_rmem_alloc); 458 } 459 460 /* Tests if the list has one and only one entry. */
+1
include/net/sctp/ulpevent.h
··· 63 __u32 cumtsn; 64 int msg_flags; 65 int iif; 66 }; 67 68 /* Retrieve the skb this event sits inside of. */
··· 63 __u32 cumtsn; 64 int msg_flags; 65 int iif; 66 + unsigned int rmem_len; 67 }; 68 69 /* Retrieve the skb this event sits inside of. */
+7
include/net/timewait_sock.h
··· 19 unsigned int twsk_obj_size; 20 int (*twsk_unique)(struct sock *sk, 21 struct sock *sktw, void *twp); 22 }; 23 24 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ··· 27 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) 28 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); 29 return 0; 30 } 31 32 #endif /* _TIMEWAIT_SOCK_H */
··· 19 unsigned int twsk_obj_size; 20 int (*twsk_unique)(struct sock *sk, 21 struct sock *sktw, void *twp); 22 + void (*twsk_destructor)(struct sock *sk); 23 }; 24 25 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ··· 26 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) 27 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); 28 return 0; 29 + } 30 + 31 + static inline void twsk_destructor(struct sock *sk) 32 + { 33 + if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) 34 + sk->sk_prot->twsk_prot->twsk_destructor(sk); 35 } 36 37 #endif /* _TIMEWAIT_SOCK_H */
+2 -1
include/net/xfrm.h
··· 995 int create, unsigned short family); 996 extern void xfrm_policy_flush(u8 type); 997 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 998 - extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict); 999 extern void xfrm_init_pmtu(struct dst_entry *dst); 1000 1001 extern wait_queue_head_t km_waitq;
··· 995 int create, unsigned short family); 996 extern void xfrm_policy_flush(u8 type); 997 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 998 + extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, 999 + struct flowi *fl, int family, int strict); 1000 extern void xfrm_init_pmtu(struct dst_entry *dst); 1001 1002 extern wait_queue_head_t km_waitq;
+1 -2
net/compat.c
··· 285 286 if (i > 0) { 287 int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); 288 - if (!err) 289 - err = put_user(SOL_SOCKET, &cm->cmsg_level); 290 if (!err) 291 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 292 if (!err)
··· 285 286 if (i > 0) { 287 int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); 288 + err = put_user(SOL_SOCKET, &cm->cmsg_level); 289 if (!err) 290 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 291 if (!err)
+26 -12
net/core/flow.c
··· 85 add_timer(&flow_hash_rnd_timer); 86 } 87 88 static void __flow_cache_shrink(int cpu, int shrink_to) 89 { 90 struct flow_cache_entry *fle, **flp; ··· 108 } 109 while ((fle = *flp) != NULL) { 110 *flp = fle->next; 111 - if (fle->object) 112 - atomic_dec(fle->object_ref); 113 - kmem_cache_free(flow_cachep, fle); 114 - flow_count(cpu)--; 115 } 116 } 117 } ··· 225 226 nocache: 227 { 228 void *obj; 229 atomic_t *obj_ref; 230 231 - resolver(key, family, dir, &obj, &obj_ref); 232 233 if (fle) { 234 - fle->genid = atomic_read(&flow_cache_genid); 235 236 - if (fle->object) 237 - atomic_dec(fle->object_ref); 238 239 - fle->object = obj; 240 - fle->object_ref = obj_ref; 241 - if (obj) 242 - atomic_inc(fle->object_ref); 243 } 244 local_bh_enable(); 245 246 return obj; 247 } 248 }
··· 85 add_timer(&flow_hash_rnd_timer); 86 } 87 88 + static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) 89 + { 90 + if (fle->object) 91 + atomic_dec(fle->object_ref); 92 + kmem_cache_free(flow_cachep, fle); 93 + flow_count(cpu)--; 94 + } 95 + 96 static void __flow_cache_shrink(int cpu, int shrink_to) 97 { 98 struct flow_cache_entry *fle, **flp; ··· 100 } 101 while ((fle = *flp) != NULL) { 102 *flp = fle->next; 103 + flow_entry_kill(cpu, fle); 104 } 105 } 106 } ··· 220 221 nocache: 222 { 223 + int err; 224 void *obj; 225 atomic_t *obj_ref; 226 227 + err = resolver(key, family, dir, &obj, &obj_ref); 228 229 if (fle) { 230 + if (err) { 231 + /* Force security policy check on next lookup */ 232 + *head = fle->next; 233 + flow_entry_kill(cpu, fle); 234 + } else { 235 + fle->genid = atomic_read(&flow_cache_genid); 236 237 + if (fle->object) 238 + atomic_dec(fle->object_ref); 239 240 + fle->object = obj; 241 + fle->object_ref = obj_ref; 242 + if (obj) 243 + atomic_inc(fle->object_ref); 244 + } 245 } 246 local_bh_enable(); 247 248 + if (err) 249 + obj = ERR_PTR(err); 250 return obj; 251 } 252 }
+1 -1
net/core/rtnetlink.c
··· 602 goto errout; 603 } 604 605 - err = rtnl_unicast(skb, NETLINK_CB(skb).pid); 606 errout: 607 kfree(iw_buf); 608 dev_put(dev);
··· 602 goto errout; 603 } 604 605 + err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); 606 errout: 607 kfree(iw_buf); 608 dev_put(dev);
+1 -2
net/core/scm.c
··· 245 if (i > 0) 246 { 247 int cmlen = CMSG_LEN(i*sizeof(int)); 248 - if (!err) 249 - err = put_user(SOL_SOCKET, &cm->cmsg_level); 250 if (!err) 251 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 252 if (!err)
··· 245 if (i > 0) 246 { 247 int cmlen = CMSG_LEN(i*sizeof(int)); 248 + err = put_user(SOL_SOCKET, &cm->cmsg_level); 249 if (!err) 250 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 251 if (!err)
+3 -3
net/dccp/ipv4.c
··· 311 } 312 313 if (sk->sk_state == DCCP_TIME_WAIT) { 314 - inet_twsk_put((struct inet_timewait_sock *)sk); 315 return; 316 } 317 ··· 614 bh_lock_sock(nsk); 615 return nsk; 616 } 617 - inet_twsk_put((struct inet_timewait_sock *)nsk); 618 return NULL; 619 } 620 ··· 980 goto discard_it; 981 982 do_time_wait: 983 - inet_twsk_put((struct inet_timewait_sock *)sk); 984 goto no_dccp_socket; 985 } 986
··· 311 } 312 313 if (sk->sk_state == DCCP_TIME_WAIT) { 314 + inet_twsk_put(inet_twsk(sk)); 315 return; 316 } 317 ··· 614 bh_lock_sock(nsk); 615 return nsk; 616 } 617 + inet_twsk_put(inet_twsk(nsk)); 618 return NULL; 619 } 620 ··· 980 goto discard_it; 981 982 do_time_wait: 983 + inet_twsk_put(inet_twsk(sk)); 984 goto no_dccp_socket; 985 } 986
+3 -3
net/dccp/ipv6.c
··· 285 } 286 287 if (sk->sk_state == DCCP_TIME_WAIT) { 288 - inet_twsk_put((struct inet_timewait_sock *)sk); 289 return; 290 } 291 ··· 663 bh_lock_sock(nsk); 664 return nsk; 665 } 666 - inet_twsk_put((struct inet_timewait_sock *)nsk); 667 return NULL; 668 } 669 ··· 1109 goto discard_it; 1110 1111 do_time_wait: 1112 - inet_twsk_put((struct inet_timewait_sock *)sk); 1113 goto no_dccp_socket; 1114 } 1115
··· 285 } 286 287 if (sk->sk_state == DCCP_TIME_WAIT) { 288 + inet_twsk_put(inet_twsk(sk)); 289 return; 290 } 291 ··· 663 bh_lock_sock(nsk); 664 return nsk; 665 } 666 + inet_twsk_put(inet_twsk(nsk)); 667 return NULL; 668 } 669 ··· 1109 goto discard_it; 1110 1111 do_time_wait: 1112 + inet_twsk_put(inet_twsk(sk)); 1113 goto no_dccp_socket; 1114 } 1115
+3 -1
net/decnet/af_decnet.c
··· 1178 if (peer) { 1179 if ((sock->state != SS_CONNECTED && 1180 sock->state != SS_CONNECTING) && 1181 - scp->accept_mode == ACC_IMMED) 1182 return -ENOTCONN; 1183 1184 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); 1185 } else {
··· 1178 if (peer) { 1179 if ((sock->state != SS_CONNECTED && 1180 sock->state != SS_CONNECTING) && 1181 + scp->accept_mode == ACC_IMMED) { 1182 + release_sock(sk); 1183 return -ENOTCONN; 1184 + } 1185 1186 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); 1187 } else {
+8 -3
net/decnet/dn_route.c
··· 267 268 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 269 { 270 - return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 271 - fl1->oif == fl2->oif && 272 - fl1->iif == fl2->iif; 273 } 274 275 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
··· 267 268 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 269 { 270 + return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 271 + (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 272 + #ifdef CONFIG_IP_ROUTE_FWMARK 273 + (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) | 274 + #endif 275 + (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 276 + (fl1->oif ^ fl2->oif) | 277 + (fl1->iif ^ fl2->iif)) == 0; 278 } 279 280 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+10 -8
net/ipv4/cipso_ipv4.c
··· 43 #include <net/tcp.h> 44 #include <net/netlabel.h> 45 #include <net/cipso_ipv4.h> 46 #include <asm/bug.h> 47 48 struct cipso_v4_domhsh_entry { ··· 80 unsigned char *key; 81 size_t key_len; 82 83 - struct netlbl_lsm_cache lsm_data; 84 85 u32 activity; 86 struct list_head list; ··· 189 * @entry: the entry to free 190 * 191 * Description: 192 - * This function frees the memory associated with a cache entry. 193 * 194 */ 195 static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) 196 { 197 - if (entry->lsm_data.free) 198 - entry->lsm_data.free(entry->lsm_data.data); 199 kfree(entry->key); 200 kfree(entry); 201 } ··· 317 entry->key_len == key_len && 318 memcmp(entry->key, key, key_len) == 0) { 319 entry->activity += 1; 320 - secattr->cache.free = entry->lsm_data.free; 321 - secattr->cache.data = entry->lsm_data.data; 322 if (prev_entry == NULL) { 323 spin_unlock_bh(&cipso_v4_cache[bkt].lock); 324 return 0; ··· 385 memcpy(entry->key, cipso_ptr, cipso_ptr_len); 386 entry->key_len = cipso_ptr_len; 387 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); 388 - entry->lsm_data.free = secattr->cache.free; 389 - entry->lsm_data.data = secattr->cache.data; 390 391 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 392 spin_lock_bh(&cipso_v4_cache[bkt].lock);
··· 43 #include <net/tcp.h> 44 #include <net/netlabel.h> 45 #include <net/cipso_ipv4.h> 46 + #include <asm/atomic.h> 47 #include <asm/bug.h> 48 49 struct cipso_v4_domhsh_entry { ··· 79 unsigned char *key; 80 size_t key_len; 81 82 + struct netlbl_lsm_cache *lsm_data; 83 84 u32 activity; 85 struct list_head list; ··· 188 * @entry: the entry to free 189 * 190 * Description: 191 + * This function frees the memory associated with a cache entry including the 192 + * LSM cache data if there are no longer any users, i.e. reference count == 0. 193 * 194 */ 195 static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) 196 { 197 + if (entry->lsm_data) 198 + netlbl_secattr_cache_free(entry->lsm_data); 199 kfree(entry->key); 200 kfree(entry); 201 } ··· 315 entry->key_len == key_len && 316 memcmp(entry->key, key, key_len) == 0) { 317 entry->activity += 1; 318 + atomic_inc(&entry->lsm_data->refcount); 319 + secattr->cache = entry->lsm_data; 320 if (prev_entry == NULL) { 321 spin_unlock_bh(&cipso_v4_cache[bkt].lock); 322 return 0; ··· 383 memcpy(entry->key, cipso_ptr, cipso_ptr_len); 384 entry->key_len = cipso_ptr_len; 385 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); 386 + atomic_inc(&secattr->cache->refcount); 387 + entry->lsm_data = secattr->cache; 388 389 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 390 spin_lock_bh(&cipso_v4_cache[bkt].lock);
+2 -2
net/ipv4/ip_gre.c
··· 611 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 612 */ 613 if (flags == 0 && 614 - skb->protocol == __constant_htons(ETH_P_WCCP)) { 615 - skb->protocol = __constant_htons(ETH_P_IP); 616 if ((*(h + offset) & 0xF0) != 0x40) 617 offset += 4; 618 }
··· 611 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 612 */ 613 if (flags == 0 && 614 + skb->protocol == htons(ETH_P_WCCP)) { 615 + skb->protocol = htons(ETH_P_IP); 616 if ((*(h + offset) & 0xF0) != 0x40) 617 offset += 4; 618 }
+9 -3
net/ipv4/route.c
··· 566 567 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 568 { 569 - return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 && 570 - fl1->oif == fl2->oif && 571 - fl1->iif == fl2->iif; 572 } 573 574 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
··· 566 567 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 568 { 569 + return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 570 + (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | 571 + #ifdef CONFIG_IP_ROUTE_FWMARK 572 + (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) | 573 + #endif 574 + (*(u16 *)&fl1->nl_u.ip4_u.tos ^ 575 + *(u16 *)&fl2->nl_u.ip4_u.tos) | 576 + (fl1->oif ^ fl2->oif) | 577 + (fl1->iif ^ fl2->iif)) == 0; 578 } 579 580 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+8 -10
net/ipv4/tcp_ipv4.c
··· 355 return; 356 } 357 if (sk->sk_state == TCP_TIME_WAIT) { 358 - inet_twsk_put((struct inet_timewait_sock *)sk); 359 return; 360 } 361 ··· 578 struct tcphdr *th = skb->h.th; 579 struct { 580 struct tcphdr th; 581 - u32 tsopt[3]; 582 } rep; 583 struct ip_reply_arg arg; 584 ··· 960 bh_lock_sock(nsk); 961 return nsk; 962 } 963 - inet_twsk_put((struct inet_timewait_sock *)nsk); 964 return NULL; 965 } 966 ··· 1154 1155 do_time_wait: 1156 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1157 - inet_twsk_put((struct inet_timewait_sock *) sk); 1158 goto discard_it; 1159 } 1160 1161 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1162 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1163 - inet_twsk_put((struct inet_timewait_sock *) sk); 1164 goto discard_it; 1165 } 1166 - switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1167 - skb, th)) { 1168 case TCP_TW_SYN: { 1169 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1170 skb->nh.iph->daddr, 1171 th->dest, 1172 inet_iif(skb)); 1173 if (sk2) { 1174 - inet_twsk_deschedule((struct inet_timewait_sock *)sk, 1175 - &tcp_death_row); 1176 - inet_twsk_put((struct inet_timewait_sock *)sk); 1177 sk = sk2; 1178 goto process; 1179 }
··· 355 return; 356 } 357 if (sk->sk_state == TCP_TIME_WAIT) { 358 + inet_twsk_put(inet_twsk(sk)); 359 return; 360 } 361 ··· 578 struct tcphdr *th = skb->h.th; 579 struct { 580 struct tcphdr th; 581 + u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; 582 } rep; 583 struct ip_reply_arg arg; 584 ··· 960 bh_lock_sock(nsk); 961 return nsk; 962 } 963 + inet_twsk_put(inet_twsk(nsk)); 964 return NULL; 965 } 966 ··· 1154 1155 do_time_wait: 1156 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1157 + inet_twsk_put(inet_twsk(sk)); 1158 goto discard_it; 1159 } 1160 1161 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1162 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1163 + inet_twsk_put(inet_twsk(sk)); 1164 goto discard_it; 1165 } 1166 + switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1167 case TCP_TW_SYN: { 1168 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1169 skb->nh.iph->daddr, 1170 th->dest, 1171 inet_iif(skb)); 1172 if (sk2) { 1173 + inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); 1174 + inet_twsk_put(inet_twsk(sk)); 1175 sk = sk2; 1176 goto process; 1177 }
+20 -11
net/ipv4/tcp_output.c
··· 273 __u32 tstamp) 274 { 275 if (tp->rx_opt.tstamp_ok) { 276 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 277 - (TCPOPT_NOP << 16) | 278 - (TCPOPT_TIMESTAMP << 8) | 279 - TCPOLEN_TIMESTAMP); 280 *ptr++ = htonl(tstamp); 281 *ptr++ = htonl(tp->rx_opt.ts_recent); 282 } ··· 325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 326 if (ts) { 327 if(sack) 328 - *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 329 - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 330 else 331 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 332 - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 333 *ptr++ = htonl(tstamp); /* TSVAL */ 334 *ptr++ = htonl(ts_recent); /* TSECR */ 335 } else if(sack) 336 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 337 - (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 338 if (offer_wscale) 339 - *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 340 } 341 342 /* This routine actually transmits TCP packets queued in by
··· 273 __u32 tstamp) 274 { 275 if (tp->rx_opt.tstamp_ok) { 276 + *ptr++ = htonl((TCPOPT_NOP << 24) | 277 + (TCPOPT_NOP << 16) | 278 + (TCPOPT_TIMESTAMP << 8) | 279 + TCPOLEN_TIMESTAMP); 280 *ptr++ = htonl(tstamp); 281 *ptr++ = htonl(tp->rx_opt.ts_recent); 282 } ··· 325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 326 if (ts) { 327 if(sack) 328 + *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 329 + (TCPOLEN_SACK_PERM << 16) | 330 + (TCPOPT_TIMESTAMP << 8) | 331 + TCPOLEN_TIMESTAMP); 332 else 333 + *ptr++ = htonl((TCPOPT_NOP << 24) | 334 + (TCPOPT_NOP << 16) | 335 + (TCPOPT_TIMESTAMP << 8) | 336 + TCPOLEN_TIMESTAMP); 337 *ptr++ = htonl(tstamp); /* TSVAL */ 338 *ptr++ = htonl(ts_recent); /* TSECR */ 339 } else if(sack) 340 + *ptr++ = htonl((TCPOPT_NOP << 24) | 341 + (TCPOPT_NOP << 16) | 342 + (TCPOPT_SACK_PERM << 8) | 343 + TCPOLEN_SACK_PERM); 344 if (offer_wscale) 345 + *ptr++ = htonl((TCPOPT_NOP << 24) | 346 + (TCPOPT_WINDOW << 16) | 347 + (TCPOLEN_WINDOW << 8) | 348 + (wscale)); 349 } 350 351 /* This routine actually transmits TCP packets queued in by
+1 -1
net/ipv4/xfrm4_policy.c
··· 52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 53 xdst->u.rt.fl.fl4_src == fl->fl4_src && 54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos && 55 - xfrm_bundle_ok(xdst, fl, AF_INET, 0)) { 56 dst_clone(dst); 57 break; 58 }
··· 52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 53 xdst->u.rt.fl.fl4_src == fl->fl4_src && 54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos && 55 + xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { 56 dst_clone(dst); 57 break; 58 }
+13
net/ipv6/Kconfig
··· 153 ---help--- 154 Support for MIPv6 route optimization mode. 155 156 config IPV6_TUNNEL 157 tristate "IPv6: IPv6-in-IPv6 tunnel" 158 select INET6_TUNNEL
··· 153 ---help--- 154 Support for MIPv6 route optimization mode. 155 156 + config IPV6_SIT 157 + tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)" 158 + depends on IPV6 159 + default y 160 + ---help--- 161 + Tunneling means encapsulating data of one protocol type within 162 + another protocol and sending it over a channel that understands the 163 + encapsulating protocol. This driver implements encapsulation of IPv6 164 + into IPv4 packets. This is useful if you want to connect two IPv6 165 + networks over an IPv4-only path. 166 + 167 + Saying M here will produce a module called sit.ko. If unsure, say Y. 168 + 169 config IPV6_TUNNEL 170 tristate "IPv6: IPv6-in-IPv6 tunnel" 171 select INET6_TUNNEL
+2 -1
net/ipv6/Makefile
··· 4 5 obj-$(CONFIG_IPV6) += ipv6.o 6 7 - ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \ 8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ 9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ ··· 29 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o 30 obj-$(CONFIG_NETFILTER) += netfilter/ 31 32 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 33 34 obj-y += exthdrs_core.o
··· 4 5 obj-$(CONFIG_IPV6) += ipv6.o 6 7 + ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ 8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ 9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ ··· 29 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o 30 obj-$(CONFIG_NETFILTER) += netfilter/ 31 32 + obj-$(CONFIG_IPV6_SIT) += sit.o 33 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 34 35 obj-y += exthdrs_core.o
+16 -2
net/ipv6/addrconf.c
··· 396 ndev->regen_timer.data = (unsigned long) ndev; 397 if ((dev->flags&IFF_LOOPBACK) || 398 dev->type == ARPHRD_TUNNEL || 399 - dev->type == ARPHRD_NONE || 400 - dev->type == ARPHRD_SIT) { 401 printk(KERN_INFO 402 "%s: Disabled Privacy Extensions\n", 403 dev->name); ··· 1548 This thing is done here expecting that the whole 1549 class of non-broadcast devices need not cloning. 1550 */ 1551 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 1552 cfg.fc_flags |= RTF_NONEXTHOP; 1553 1554 ip6_route_add(&cfg); 1555 } ··· 1573 ip6_route_add(&cfg); 1574 } 1575 1576 static void sit_route_add(struct net_device *dev) 1577 { 1578 struct fib6_config cfg = { ··· 1587 /* prefix length - 96 bits "::d.d.d.d" */ 1588 ip6_route_add(&cfg); 1589 } 1590 1591 static void addrconf_add_lroute(struct net_device *dev) 1592 { ··· 1858 if (dev == NULL) 1859 goto err_exit; 1860 1861 if (dev->type == ARPHRD_SIT) { 1862 struct ifreq ifr; 1863 mm_segment_t oldfs; ··· 1888 err = dev_open(dev); 1889 } 1890 } 1891 1892 err_exit: 1893 rtnl_unlock(); ··· 2018 return err; 2019 } 2020 2021 static void sit_add_v4_addrs(struct inet6_dev *idev) 2022 { 2023 struct inet6_ifaddr * ifp; ··· 2087 } 2088 } 2089 } 2090 2091 static void init_loopback(struct net_device *dev) 2092 { ··· 2151 addrconf_add_linklocal(idev, &addr); 2152 } 2153 2154 static void addrconf_sit_config(struct net_device *dev) 2155 { 2156 struct inet6_dev *idev; ··· 2177 } else 2178 sit_route_add(dev); 2179 } 2180 2181 static inline int 2182 ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) ··· 2272 } 2273 2274 switch(dev->type) { 2275 case ARPHRD_SIT: 2276 addrconf_sit_config(dev); 2277 break; 2278 case ARPHRD_TUNNEL6: 2279 addrconf_ip6_tnl_config(dev); 2280 break;
··· 396 ndev->regen_timer.data = (unsigned long) ndev; 397 if ((dev->flags&IFF_LOOPBACK) || 398 dev->type == ARPHRD_TUNNEL || 399 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 400 + dev->type == ARPHRD_SIT || 401 + #endif 402 + dev->type == ARPHRD_NONE) { 403 printk(KERN_INFO 404 "%s: Disabled Privacy Extensions\n", 405 dev->name); ··· 1546 This thing is done here expecting that the whole 1547 class of non-broadcast devices need not cloning. 1548 */ 1549 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1550 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 1551 cfg.fc_flags |= RTF_NONEXTHOP; 1552 + #endif 1553 1554 ip6_route_add(&cfg); 1555 } ··· 1569 ip6_route_add(&cfg); 1570 } 1571 1572 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1573 static void sit_route_add(struct net_device *dev) 1574 { 1575 struct fib6_config cfg = { ··· 1582 /* prefix length - 96 bits "::d.d.d.d" */ 1583 ip6_route_add(&cfg); 1584 } 1585 + #endif 1586 1587 static void addrconf_add_lroute(struct net_device *dev) 1588 { ··· 1852 if (dev == NULL) 1853 goto err_exit; 1854 1855 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1856 if (dev->type == ARPHRD_SIT) { 1857 struct ifreq ifr; 1858 mm_segment_t oldfs; ··· 1881 err = dev_open(dev); 1882 } 1883 } 1884 + #endif 1885 1886 err_exit: 1887 rtnl_unlock(); ··· 2010 return err; 2011 } 2012 2013 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2014 static void sit_add_v4_addrs(struct inet6_dev *idev) 2015 { 2016 struct inet6_ifaddr * ifp; ··· 2078 } 2079 } 2080 } 2081 + #endif 2082 2083 static void init_loopback(struct net_device *dev) 2084 { ··· 2141 addrconf_add_linklocal(idev, &addr); 2142 } 2143 2144 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2145 static void addrconf_sit_config(struct net_device *dev) 2146 { 2147 struct inet6_dev *idev; ··· 2166 } else 2167 sit_route_add(dev); 2168 } 2169 + #endif 2170 2171 static inline int 2172 ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) ··· 2260 } 2261 2262 switch(dev->type) { 2263 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2264 case ARPHRD_SIT: 2265 addrconf_sit_config(dev); 2266 break; 2267 + #endif 2268 case ARPHRD_TUNNEL6: 2269 addrconf_ip6_tnl_config(dev); 2270 break;
-2
net/ipv6/af_inet6.c
··· 850 err = addrconf_init(); 851 if (err) 852 goto addrconf_fail; 853 - sit_init(); 854 855 /* Init v6 extension headers. */ 856 ipv6_rthdr_init(); ··· 926 mip6_fini(); 927 #endif 928 /* Cleanup code parts. */ 929 - sit_cleanup(); 930 ip6_flowlabel_cleanup(); 931 addrconf_cleanup(); 932 ip6_route_cleanup();
··· 850 err = addrconf_init(); 851 if (err) 852 goto addrconf_fail; 853 854 /* Init v6 extension headers. */ 855 ipv6_rthdr_init(); ··· 927 mip6_fini(); 928 #endif 929 /* Cleanup code parts. */ 930 ip6_flowlabel_cleanup(); 931 addrconf_cleanup(); 932 ip6_route_cleanup();
+3
net/ipv6/sit.c
··· 850 inet_del_protocol(&sit_protocol, IPPROTO_IPV6); 851 goto out; 852 }
··· 850 inet_del_protocol(&sit_protocol, IPPROTO_IPV6); 851 goto out; 852 } 853 + 854 + module_init(sit_init); 855 + module_exit(sit_cleanup);
+6 -7
net/ipv6/tcp_ipv6.c
··· 329 } 330 331 if (sk->sk_state == TCP_TIME_WAIT) { 332 - inet_twsk_put((struct inet_timewait_sock *)sk); 333 return; 334 } 335 ··· 653 int tot_len = sizeof(struct tcphdr); 654 655 if (ts) 656 - tot_len += 3*4; 657 658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 659 GFP_ATOMIC); ··· 749 bh_lock_sock(nsk); 750 return nsk; 751 } 752 - inet_twsk_put((struct inet_timewait_sock *)nsk); 753 return NULL; 754 } 755 ··· 1283 1284 do_time_wait: 1285 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1286 - inet_twsk_put((struct inet_timewait_sock *)sk); 1287 goto discard_it; 1288 } 1289 1290 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1291 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1292 - inet_twsk_put((struct inet_timewait_sock *)sk); 1293 goto discard_it; 1294 } 1295 1296 - switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1297 - skb, th)) { 1298 case TCP_TW_SYN: 1299 { 1300 struct sock *sk2;
··· 329 } 330 331 if (sk->sk_state == TCP_TIME_WAIT) { 332 + inet_twsk_put(inet_twsk(sk)); 333 return; 334 } 335 ··· 653 int tot_len = sizeof(struct tcphdr); 654 655 if (ts) 656 + tot_len += TCPOLEN_TSTAMP_ALIGNED; 657 658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 659 GFP_ATOMIC); ··· 749 bh_lock_sock(nsk); 750 return nsk; 751 } 752 + inet_twsk_put(inet_twsk(nsk)); 753 return NULL; 754 } 755 ··· 1283 1284 do_time_wait: 1285 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1286 + inet_twsk_put(inet_twsk(sk)); 1287 goto discard_it; 1288 } 1289 1290 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1291 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1292 + inet_twsk_put(inet_twsk(sk)); 1293 goto discard_it; 1294 } 1295 1296 + switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1297 case TCP_TW_SYN: 1298 { 1299 struct sock *sk2;
+1 -1
net/ipv6/xfrm6_policy.c
··· 73 xdst->u.rt6.rt6i_src.plen); 74 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && 75 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && 76 - xfrm_bundle_ok(xdst, fl, AF_INET6, 77 (xdst->u.rt6.rt6i_dst.plen != 128 || 78 xdst->u.rt6.rt6i_src.plen != 128))) { 79 dst_clone(dst);
··· 73 xdst->u.rt6.rt6i_src.plen); 74 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && 75 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && 76 + xfrm_bundle_ok(policy, xdst, fl, AF_INET6, 77 (xdst->u.rt6.rt6i_dst.plen != 128 || 78 xdst->u.rt6.rt6i_src.plen != 128))) { 79 dst_clone(dst);
-5
net/key/af_key.c
··· 2928 if (*dir) 2929 goto out; 2930 } 2931 - else { 2932 - *dir = security_xfrm_sock_policy_alloc(xp, sk); 2933 - if (*dir) 2934 - goto out; 2935 - } 2936 2937 *dir = pol->sadb_x_policy_dir-1; 2938 return xp;
··· 2928 if (*dir) 2929 goto out; 2930 } 2931 2932 *dir = pol->sadb_x_policy_dir-1; 2933 return xp;
+1 -1
net/netlabel/netlabel_kapi.c
··· 200 int netlbl_cache_add(const struct sk_buff *skb, 201 const struct netlbl_lsm_secattr *secattr) 202 { 203 - if (secattr->cache.data == NULL) 204 return -ENOMSG; 205 206 if (CIPSO_V4_OPTEXIST(skb))
··· 200 int netlbl_cache_add(const struct sk_buff *skb, 201 const struct netlbl_lsm_secattr *secattr) 202 { 203 + if (secattr->cache == NULL) 204 return -ENOMSG; 205 206 if (CIPSO_V4_OPTEXIST(skb))
+2 -3
net/sched/sch_htb.c
··· 786 for (i = 0; i < 500; i++) { 787 struct htb_class *cl; 788 long diff; 789 - struct rb_node *p = q->wait_pq[level].rb_node; 790 if (!p) 791 return 0; 792 - while (p->rb_left) 793 - p = p->rb_left; 794 795 cl = rb_entry(p, struct htb_class, pq_node); 796 if (time_after(cl->pq_key, q->jiffies)) {
··· 786 for (i = 0; i < 500; i++) { 787 struct htb_class *cl; 788 long diff; 789 + struct rb_node *p = rb_first(&q->wait_pq[level]); 790 + 791 if (!p) 792 return 0; 793 794 cl = rb_entry(p, struct htb_class, pq_node); 795 if (time_after(cl->pq_key, q->jiffies)) {
+1 -1
net/sctp/proc.c
··· 344 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 345 assoc->state, hash, assoc->assoc_id, 346 assoc->sndbuf_used, 347 - (sk->sk_rcvbuf - assoc->rwnd), 348 sock_i_uid(sk), sock_i_ino(sk), 349 epb->bind_addr.port, 350 assoc->peer.port);
··· 344 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 345 assoc->state, hash, assoc->assoc_id, 346 assoc->sndbuf_used, 347 + atomic_read(&assoc->rmem_alloc), 348 sock_i_uid(sk), sock_i_ino(sk), 349 epb->bind_addr.port, 350 assoc->peer.port);
+18 -4
net/sctp/socket.c
··· 5362 sctp_association_put(asoc); 5363 } 5364 5365 /* Helper function to wait for space in the sndbuf. */ 5366 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 5367 size_t msg_len) ··· 5648 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5649 event = sctp_skb2event(skb); 5650 if (event->asoc == assoc) { 5651 - sock_rfree(skb); 5652 __skb_unlink(skb, &oldsk->sk_receive_queue); 5653 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5654 - skb_set_owner_r(skb, newsk); 5655 } 5656 } 5657 ··· 5679 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5680 event = sctp_skb2event(skb); 5681 if (event->asoc == assoc) { 5682 - sock_rfree(skb); 5683 __skb_unlink(skb, &oldsp->pd_lobby); 5684 __skb_queue_tail(queue, skb); 5685 - skb_set_owner_r(skb, newsk); 5686 } 5687 } 5688
··· 5362 sctp_association_put(asoc); 5363 } 5364 5365 + /* Do accounting for the receive space on the socket. 5366 + * Accounting for the association is done in ulpevent.c 5367 + * We set this as a destructor for the cloned data skbs so that 5368 + * accounting is done at the correct time. 5369 + */ 5370 + void sctp_sock_rfree(struct sk_buff *skb) 5371 + { 5372 + struct sock *sk = skb->sk; 5373 + struct sctp_ulpevent *event = sctp_skb2event(skb); 5374 + 5375 + atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 5376 + } 5377 + 5378 + 5379 /* Helper function to wait for space in the sndbuf. */ 5380 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 5381 size_t msg_len) ··· 5634 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5635 event = sctp_skb2event(skb); 5636 if (event->asoc == assoc) { 5637 + sctp_sock_rfree(skb); 5638 __skb_unlink(skb, &oldsk->sk_receive_queue); 5639 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5640 + sctp_skb_set_owner_r(skb, newsk); 5641 } 5642 } 5643 ··· 5665 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5666 event = sctp_skb2event(skb); 5667 if (event->asoc == assoc) { 5668 + sctp_sock_rfree(skb); 5669 __skb_unlink(skb, &oldsp->pd_lobby); 5670 __skb_queue_tail(queue, skb); 5671 + sctp_skb_set_owner_r(skb, newsk); 5672 } 5673 } 5674
+15 -10
net/sctp/ulpevent.c
··· 55 56 57 /* Initialize an ULP event from an given skb. */ 58 - SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) 59 { 60 memset(event, 0, sizeof(struct sctp_ulpevent)); 61 event->msg_flags = msg_flags; 62 } 63 64 /* Create a new sctp_ulpevent. */ ··· 76 goto fail; 77 78 event = sctp_skb2event(skb); 79 - sctp_ulpevent_init(event, msg_flags); 80 81 return event; 82 ··· 104 sctp_association_hold((struct sctp_association *)asoc); 105 skb = sctp_event2skb(event); 106 event->asoc = (struct sctp_association *)asoc; 107 - atomic_add(skb->truesize, &event->asoc->rmem_alloc); 108 - skb_set_owner_r(skb, asoc->base.sk); 109 } 110 111 /* A simple destructor to give up the reference to the association. */ 112 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 113 { 114 struct sctp_association *asoc = event->asoc; 115 - struct sk_buff *skb = sctp_event2skb(event); 116 117 - atomic_sub(skb->truesize, &asoc->rmem_alloc); 118 sctp_association_put(asoc); 119 } 120 ··· 374 375 /* Embed the event fields inside the cloned skb. */ 376 event = sctp_skb2event(skb); 377 - sctp_ulpevent_init(event, MSG_NOTIFICATION); 378 379 sre = (struct sctp_remote_error *) 380 skb_push(skb, sizeof(struct sctp_remote_error)); ··· 466 467 /* Embed the event fields inside the cloned skb. */ 468 event = sctp_skb2event(skb); 469 - sctp_ulpevent_init(event, MSG_NOTIFICATION); 470 471 ssf = (struct sctp_send_failed *) 472 skb_push(skb, sizeof(struct sctp_send_failed)); ··· 684 /* Embed the event fields inside the cloned skb. */ 685 event = sctp_skb2event(skb); 686 687 - /* Initialize event with flags 0. */ 688 - sctp_ulpevent_init(event, 0); 689 690 sctp_ulpevent_receive_data(event, asoc); 691
··· 55 56 57 /* Initialize an ULP event from an given skb. */ 58 + SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, 59 + int msg_flags, 60 + unsigned int len) 61 { 62 memset(event, 0, sizeof(struct sctp_ulpevent)); 63 event->msg_flags = msg_flags; 64 + event->rmem_len = len; 65 } 66 67 /* Create a new sctp_ulpevent. */ ··· 73 goto fail; 74 75 event = sctp_skb2event(skb); 76 + sctp_ulpevent_init(event, msg_flags, skb->truesize); 77 78 return event; 79 ··· 101 sctp_association_hold((struct sctp_association *)asoc); 102 skb = sctp_event2skb(event); 103 event->asoc = (struct sctp_association *)asoc; 104 + atomic_add(event->rmem_len, &event->asoc->rmem_alloc); 105 + sctp_skb_set_owner_r(skb, asoc->base.sk); 106 } 107 108 /* A simple destructor to give up the reference to the association. */ 109 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 110 { 111 struct sctp_association *asoc = event->asoc; 112 113 + atomic_sub(event->rmem_len, &asoc->rmem_alloc); 114 sctp_association_put(asoc); 115 } 116 ··· 372 373 /* Embed the event fields inside the cloned skb. */ 374 event = sctp_skb2event(skb); 375 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 376 377 sre = (struct sctp_remote_error *) 378 skb_push(skb, sizeof(struct sctp_remote_error)); ··· 464 465 /* Embed the event fields inside the cloned skb. */ 466 event = sctp_skb2event(skb); 467 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 468 469 ssf = (struct sctp_send_failed *) 470 skb_push(skb, sizeof(struct sctp_send_failed)); ··· 682 /* Embed the event fields inside the cloned skb. */ 683 event = sctp_skb2event(skb); 684 685 + /* Initialize event with flags 0 and correct length 686 + * Since this is a clone of the original skb, only account for 687 + * the data of this chunk as other chunks will be accounted separately. 688 + */ 689 + sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); 690 691 sctp_ulpevent_receive_data(event, asoc); 692
+1 -1
net/sctp/ulpqueue.c
··· 309 if (!new) 310 return NULL; /* try again later */ 311 312 - new->sk = f_frag->sk; 313 314 skb_shinfo(new)->frag_list = pos; 315 } else
··· 309 if (!new) 310 return NULL; /* try again later */ 311 312 + sctp_skb_set_owner_r(new, f_frag->sk); 313 314 skb_shinfo(new)->frag_list = pos; 315 } else
+75 -26
net/xfrm/xfrm_policy.c
··· 883 } 884 EXPORT_SYMBOL(xfrm_policy_walk); 885 886 - /* Find policy to apply to this flow. */ 887 - 888 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl, 889 u8 type, u16 family, int dir) 890 { 891 struct xfrm_selector *sel = &pol->selector; 892 - int match; 893 894 if (pol->family != family || 895 pol->type != type) 896 - return 0; 897 898 match = xfrm_selector_match(sel, fl, family); 899 - if (match) { 900 - if (!security_xfrm_policy_lookup(pol, fl->secid, dir)) 901 - return 1; 902 - } 903 904 - return 0; 905 } 906 907 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl, 908 u16 family, u8 dir) 909 { 910 struct xfrm_policy *pol, *ret; 911 xfrm_address_t *daddr, *saddr; 912 struct hlist_node *entry; ··· 924 chain = policy_hash_direct(daddr, saddr, family, dir); 925 ret = NULL; 926 hlist_for_each_entry(pol, entry, chain, bydst) { 927 - if (xfrm_policy_match(pol, fl, type, family, dir)) { 928 ret = pol; 929 priority = ret->priority; 930 break; ··· 940 } 941 chain = &xfrm_policy_inexact[dir]; 942 hlist_for_each_entry(pol, entry, chain, bydst) { 943 - if (xfrm_policy_match(pol, fl, type, family, dir) && 944 - pol->priority < priority) { 945 ret = pol; 946 break; 947 } 948 } 949 if (ret) 950 xfrm_pol_hold(ret); 951 read_unlock_bh(&xfrm_policy_lock); 952 953 return ret; 954 } 955 956 - static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 957 void **objp, atomic_t **obj_refp) 958 { 959 struct xfrm_policy *pol; 960 961 #ifdef CONFIG_XFRM_SUB_POLICY 962 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir); 963 - if (pol) 964 goto end; 965 #endif 966 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir); 967 - 968 #ifdef CONFIG_XFRM_SUB_POLICY 969 end: 970 #endif 971 if ((*objp = (void *) pol) != NULL) 972 *obj_refp = &pol->refcnt; 973 } 974 975 static inline int policy_to_flow_dir(int dir) ··· 1016 sk->sk_family); 1017 int err = 0; 1018 1019 - if (match) 1020 - err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir)); 1021 - 1022 - if (match && !err) 1023 - xfrm_pol_hold(pol); 1024 - else 1025 pol = NULL; 1026 } 1027 read_unlock_bh(&xfrm_policy_lock); ··· 1317 pol_dead = 0; 1318 xfrm_nr = 0; 1319 1320 - if (sk && sk->sk_policy[1]) 1321 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1322 1323 if (!policy) { 1324 /* To accelerate a bit... */ ··· 1331 1332 policy = flow_cache_lookup(fl, dst_orig->ops->family, 1333 dir, xfrm_policy_lookup); 1334 } 1335 1336 if (!policy) ··· 1379 fl, family, 1380 XFRM_POLICY_OUT); 1381 if (pols[1]) { 1382 if (pols[1]->action == XFRM_POLICY_BLOCK) { 1383 err = -EPERM; 1384 goto error; ··· 1614 } 1615 1616 pol = NULL; 1617 - if (sk && sk->sk_policy[dir]) 1618 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 1619 1620 if (!pol) 1621 pol = flow_cache_lookup(&fl, family, fl_dir, 1622 xfrm_policy_lookup); 1623 1624 if (!pol) { 1625 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { ··· 1645 &fl, family, 1646 XFRM_POLICY_IN); 1647 if (pols[1]) { 1648 pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec; 1649 npols ++; 1650 } ··· 1754 1755 static int stale_bundle(struct dst_entry *dst) 1756 { 1757 - return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 1758 } 1759 1760 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) ··· 1876 * still valid. 1877 */ 1878 1879 - int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict) 1880 { 1881 struct dst_entry *dst = &first->u.dst; 1882 struct xfrm_dst *last; ··· 1894 1895 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1896 return 0; 1897 - if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm)) 1898 return 0; 1899 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1900 return 0;
··· 883 } 884 EXPORT_SYMBOL(xfrm_policy_walk); 885 886 + /* 887 + * Find policy to apply to this flow. 888 + * 889 + * Returns 0 if policy found, else an -errno. 890 + */ 891 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl, 892 u8 type, u16 family, int dir) 893 { 894 struct xfrm_selector *sel = &pol->selector; 895 + int match, ret = -ESRCH; 896 897 if (pol->family != family || 898 pol->type != type) 899 + return ret; 900 901 match = xfrm_selector_match(sel, fl, family); 902 + if (match) 903 + ret = security_xfrm_policy_lookup(pol, fl->secid, dir); 904 905 + return ret; 906 } 907 908 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl, 909 u16 family, u8 dir) 910 { 911 + int err; 912 struct xfrm_policy *pol, *ret; 913 xfrm_address_t *daddr, *saddr; 914 struct hlist_node *entry; ··· 922 chain = policy_hash_direct(daddr, saddr, family, dir); 923 ret = NULL; 924 hlist_for_each_entry(pol, entry, chain, bydst) { 925 + err = xfrm_policy_match(pol, fl, type, family, dir); 926 + if (err) { 927 + if (err == -ESRCH) 928 + continue; 929 + else { 930 + ret = ERR_PTR(err); 931 + goto fail; 932 + } 933 + } else { 934 ret = pol; 935 priority = ret->priority; 936 break; ··· 930 } 931 chain = &xfrm_policy_inexact[dir]; 932 hlist_for_each_entry(pol, entry, chain, bydst) { 933 + err = xfrm_policy_match(pol, fl, type, family, dir); 934 + if (err) { 935 + if (err == -ESRCH) 936 + continue; 937 + else { 938 + ret = ERR_PTR(err); 939 + goto fail; 940 + } 941 + } else if (pol->priority < priority) { 942 ret = pol; 943 break; 944 } 945 } 946 if (ret) 947 xfrm_pol_hold(ret); 948 + fail: 949 read_unlock_bh(&xfrm_policy_lock); 950 951 return ret; 952 } 953 954 + static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 955 void **objp, atomic_t **obj_refp) 956 { 957 struct xfrm_policy *pol; 958 + int err = 0; 959 960 #ifdef CONFIG_XFRM_SUB_POLICY 961 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir); 962 + if (IS_ERR(pol)) { 963 + err = PTR_ERR(pol); 964 + pol = NULL; 965 + } 966 + if (pol || err) 967 goto end; 968 #endif 969 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir); 970 + if (IS_ERR(pol)) { 971 + err = PTR_ERR(pol); 972 + pol = NULL; 973 + } 974 #ifdef CONFIG_XFRM_SUB_POLICY 975 end: 976 #endif 977 if ((*objp = (void *) pol) != NULL) 978 *obj_refp = &pol->refcnt; 979 + return err; 980 } 981 982 static inline int policy_to_flow_dir(int dir) ··· 989 sk->sk_family); 990 int err = 0; 991 992 + if (match) { 993 + err = security_xfrm_policy_lookup(pol, fl->secid, 994 + policy_to_flow_dir(dir)); 995 + if (!err) 996 + xfrm_pol_hold(pol); 997 + else if (err == -ESRCH) 998 + pol = NULL; 999 + else 1000 + pol = ERR_PTR(err); 1001 + } else 1002 pol = NULL; 1003 } 1004 read_unlock_bh(&xfrm_policy_lock); ··· 1286 pol_dead = 0; 1287 xfrm_nr = 0; 1288 1289 + if (sk && sk->sk_policy[1]) { 1290 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1291 + if (IS_ERR(policy)) 1292 + return PTR_ERR(policy); 1293 + } 1294 1295 if (!policy) { 1296 /* To accelerate a bit... */ ··· 1297 1298 policy = flow_cache_lookup(fl, dst_orig->ops->family, 1299 dir, xfrm_policy_lookup); 1300 + if (IS_ERR(policy)) 1301 + return PTR_ERR(policy); 1302 } 1303 1304 if (!policy) ··· 1343 fl, family, 1344 XFRM_POLICY_OUT); 1345 if (pols[1]) { 1346 + if (IS_ERR(pols[1])) { 1347 + err = PTR_ERR(pols[1]); 1348 + goto error; 1349 + } 1350 if (pols[1]->action == XFRM_POLICY_BLOCK) { 1351 err = -EPERM; 1352 goto error; ··· 1574 } 1575 1576 pol = NULL; 1577 + if (sk && sk->sk_policy[dir]) { 1578 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 1579 + if (IS_ERR(pol)) 1580 + return 0; 1581 + } 1582 1583 if (!pol) 1584 pol = flow_cache_lookup(&fl, family, fl_dir, 1585 xfrm_policy_lookup); 1586 + 1587 + if (IS_ERR(pol)) 1588 + return 0; 1589 1590 if (!pol) { 1591 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { ··· 1599 &fl, family, 1600 XFRM_POLICY_IN); 1601 if (pols[1]) { 1602 + if (IS_ERR(pols[1])) 1603 + return 0; 1604 pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec; 1605 npols ++; 1606 } ··· 1706 1707 static int stale_bundle(struct dst_entry *dst) 1708 { 1709 + return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 1710 } 1711 1712 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) ··· 1828 * still valid. 1829 */ 1830 1831 + int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, 1832 + struct flowi *fl, int family, int strict) 1833 { 1834 struct dst_entry *dst = &first->u.dst; 1835 struct xfrm_dst *last; ··· 1845 1846 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1847 return 0; 1848 + if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol)) 1849 return 0; 1850 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1851 return 0;
-9
net/xfrm/xfrm_user.c
··· 1992 xp->type = XFRM_POLICY_TYPE_MAIN; 1993 copy_templates(xp, ut, nr); 1994 1995 - if (!xp->security) { 1996 - int err = security_xfrm_sock_policy_alloc(xp, sk); 1997 - if (err) { 1998 - kfree(xp); 1999 - *dir = err; 2000 - return NULL; 2001 - } 2002 - } 2003 - 2004 *dir = p->dir; 2005 2006 return xp;
··· 1992 xp->type = XFRM_POLICY_TYPE_MAIN; 1993 copy_templates(xp, ut, nr); 1994 1995 *dir = p->dir; 1996 1997 return xp;
+2 -1
security/dummy.c
··· 881 return 1; 882 } 883 884 - static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 885 { 886 return 1; 887 }
··· 881 return 1; 882 } 883 884 + static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 885 + struct xfrm_policy *xp) 886 { 887 return 1; 888 }
+2 -1
security/selinux/include/xfrm.h
··· 19 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 20 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 21 struct xfrm_policy *xp, struct flowi *fl); 22 - int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm); 23 24 25 /*
··· 19 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 20 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 21 struct xfrm_policy *xp, struct flowi *fl); 22 + int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 23 + struct xfrm_policy *xp); 24 25 26 /*
+2
security/selinux/ss/policydb.c
··· 618 c = c->next; 619 ocontext_destroy(ctmp,i); 620 } 621 } 622 623 g = p->genfs; ··· 634 g = g->next; 635 kfree(gtmp); 636 } 637 638 cond_policydb_destroy(p); 639
··· 618 c = c->next; 619 ocontext_destroy(ctmp,i); 620 } 621 + p->ocontexts[i] = NULL; 622 } 623 624 g = p->genfs; ··· 633 g = g->next; 634 kfree(gtmp); 635 } 636 + p->genfs = NULL; 637 638 cond_policydb_destroy(p); 639
+30 -36
security/selinux/ss/services.c
··· 2172 */ 2173 static void selinux_netlbl_cache_free(const void *data) 2174 { 2175 - struct netlbl_cache *cache = NETLBL_CACHE(data); 2176 switch (cache->type) { 2177 case NETLBL_CACHE_T_MLS: 2178 ebitmap_destroy(&cache->data.mls_label.level[0].cat); ··· 2202 struct netlbl_lsm_secattr secattr; 2203 2204 netlbl_secattr_init(&secattr); 2205 2206 cache = kzalloc(sizeof(*cache), GFP_ATOMIC); 2207 if (cache == NULL) 2208 - goto netlbl_cache_add_failure; 2209 - secattr.cache.free = selinux_netlbl_cache_free; 2210 - secattr.cache.data = (void *)cache; 2211 2212 cache->type = NETLBL_CACHE_T_MLS; 2213 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, 2214 &ctx->range.level[0].cat) != 0) 2215 - goto netlbl_cache_add_failure; 2216 cache->data.mls_label.level[1].cat.highbit = 2217 cache->data.mls_label.level[0].cat.highbit; 2218 cache->data.mls_label.level[1].cat.node = ··· 2223 cache->data.mls_label.level[0].sens = ctx->range.level[0].sens; 2224 cache->data.mls_label.level[1].sens = ctx->range.level[0].sens; 2225 2226 - if (netlbl_cache_add(skb, &secattr) != 0) 2227 - goto netlbl_cache_add_failure; 2228 2229 - return; 2230 - 2231 - netlbl_cache_add_failure: 2232 - netlbl_secattr_destroy(&secattr, 1); 2233 } 2234 2235 /** ··· 2268 2269 POLICY_RDLOCK; 2270 2271 - if (secattr->cache.data) { 2272 - cache = NETLBL_CACHE(secattr->cache.data); 2273 switch (cache->type) { 2274 case NETLBL_CACHE_T_SID: 2275 *sid = cache->data.sid; ··· 2336 selinux_netlbl_cache_add(skb, &ctx_new); 2337 ebitmap_destroy(&ctx_new.range.level[0].cat); 2338 } else { 2339 - *sid = SECINITSID_UNLABELED; 2340 rc = 0; 2341 } 2342 ··· 2374 &secattr, 2375 base_sid, 2376 sid); 2377 - netlbl_secattr_destroy(&secattr, 0); 2378 2379 return rc; 2380 } ··· 2420 if (rc == 0) 2421 sksec->nlbl_state = NLBL_LABELED; 2422 2423 - netlbl_secattr_destroy(&secattr, 0); 2424 2425 netlbl_socket_setsid_return: 2426 POLICY_RDUNLOCK; ··· 2519 if (netlbl_sock_getattr(sk, &secattr) == 0 && 2520 selinux_netlbl_secattr_to_sid(NULL, 2521 &secattr, 2522 - sksec->sid, 2523 &nlbl_peer_sid) == 0) 2524 sksec->peer_sid = nlbl_peer_sid; 2525 - netlbl_secattr_destroy(&secattr, 0); 2526 2527 sksec->nlbl_state = NLBL_REQUIRE; 2528 ··· 2550 2551 rc = selinux_netlbl_skbuff_getsid(skb, sock_sid, &peer_sid); 2552 if (rc != 0) 2553 - return SECSID_NULL; 2554 - 2555 - if (peer_sid == SECINITSID_UNLABELED) 2556 return SECSID_NULL; 2557 2558 return peer_sid; ··· 2613 u32 netlbl_sid; 2614 u32 recv_perm; 2615 2616 - rc = selinux_netlbl_skbuff_getsid(skb, SECINITSID_NETMSG, &netlbl_sid); 2617 if (rc != 0) 2618 return rc; 2619 2620 - if (netlbl_sid == SECINITSID_UNLABELED) 2621 return 0; 2622 2623 switch (sksec->sclass) { ··· 2657 u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock) 2658 { 2659 struct sk_security_struct *sksec = sock->sk->sk_security; 2660 - 2661 - if (sksec->peer_sid == SECINITSID_UNLABELED) 2662 - return SECSID_NULL; 2663 - 2664 return sksec->peer_sid; 2665 } 2666 ··· 2672 u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb) 2673 { 2674 int peer_sid; 2675 - struct sock *sk = skb->sk; 2676 - struct inode_security_struct *isec; 2677 2678 - if (sk == NULL || sk->sk_socket == NULL) 2679 - return SECSID_NULL; 2680 - 2681 - isec = SOCK_INODE(sk->sk_socket)->i_security; 2682 - if (selinux_netlbl_skbuff_getsid(skb, isec->sid, &peer_sid) != 0) 2683 - return SECSID_NULL; 2684 - if (peer_sid == SECINITSID_UNLABELED) 2685 return SECSID_NULL; 2686 2687 return peer_sid;
··· 2172 */ 2173 static void selinux_netlbl_cache_free(const void *data) 2174 { 2175 + struct netlbl_cache *cache; 2176 + 2177 + if (data == NULL) 2178 + return; 2179 + 2180 + cache = NETLBL_CACHE(data); 2181 switch (cache->type) { 2182 case NETLBL_CACHE_T_MLS: 2183 ebitmap_destroy(&cache->data.mls_label.level[0].cat); ··· 2197 struct netlbl_lsm_secattr secattr; 2198 2199 netlbl_secattr_init(&secattr); 2200 + secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); 2201 + if (secattr.cache == NULL) 2202 + goto netlbl_cache_add_return; 2203 2204 cache = kzalloc(sizeof(*cache), GFP_ATOMIC); 2205 if (cache == NULL) 2206 + goto netlbl_cache_add_return; 2207 + secattr.cache->free = selinux_netlbl_cache_free; 2208 + secattr.cache->data = (void *)cache; 2209 2210 cache->type = NETLBL_CACHE_T_MLS; 2211 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, 2212 &ctx->range.level[0].cat) != 0) 2213 + goto netlbl_cache_add_return; 2214 cache->data.mls_label.level[1].cat.highbit = 2215 cache->data.mls_label.level[0].cat.highbit; 2216 cache->data.mls_label.level[1].cat.node = ··· 2215 cache->data.mls_label.level[0].sens = ctx->range.level[0].sens; 2216 cache->data.mls_label.level[1].sens = ctx->range.level[0].sens; 2217 2218 + netlbl_cache_add(skb, &secattr); 2219 2220 + netlbl_cache_add_return: 2221 + netlbl_secattr_destroy(&secattr); 2222 } 2223 2224 /** ··· 2263 2264 POLICY_RDLOCK; 2265 2266 + if (secattr->cache) { 2267 + cache = NETLBL_CACHE(secattr->cache->data); 2268 switch (cache->type) { 2269 case NETLBL_CACHE_T_SID: 2270 *sid = cache->data.sid; ··· 2331 selinux_netlbl_cache_add(skb, &ctx_new); 2332 ebitmap_destroy(&ctx_new.range.level[0].cat); 2333 } else { 2334 + *sid = SECSID_NULL; 2335 rc = 0; 2336 } 2337 ··· 2369 &secattr, 2370 base_sid, 2371 sid); 2372 + netlbl_secattr_destroy(&secattr); 2373 2374 return rc; 2375 } ··· 2415 if (rc == 0) 2416 sksec->nlbl_state = NLBL_LABELED; 2417 2418 + netlbl_secattr_destroy(&secattr); 2419 2420 netlbl_socket_setsid_return: 2421 POLICY_RDUNLOCK; ··· 2514 if (netlbl_sock_getattr(sk, &secattr) == 0 && 2515 selinux_netlbl_secattr_to_sid(NULL, 2516 &secattr, 2517 + SECINITSID_UNLABELED, 2518 &nlbl_peer_sid) == 0) 2519 sksec->peer_sid = nlbl_peer_sid; 2520 + netlbl_secattr_destroy(&secattr); 2521 2522 sksec->nlbl_state = NLBL_REQUIRE; 2523 ··· 2545 2546 rc = selinux_netlbl_skbuff_getsid(skb, sock_sid, &peer_sid); 2547 if (rc != 0) 2548 return SECSID_NULL; 2549 2550 return peer_sid; ··· 2611 u32 netlbl_sid; 2612 u32 recv_perm; 2613 2614 + rc = selinux_netlbl_skbuff_getsid(skb, 2615 + SECINITSID_UNLABELED, 2616 + &netlbl_sid); 2617 if (rc != 0) 2618 return rc; 2619 2620 + if (netlbl_sid == SECSID_NULL) 2621 return 0; 2622 2623 switch (sksec->sclass) { ··· 2653 u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock) 2654 { 2655 struct sk_security_struct *sksec = sock->sk->sk_security; 2656 return sksec->peer_sid; 2657 } 2658 ··· 2672 u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb) 2673 { 2674 int peer_sid; 2675 2676 + if (selinux_netlbl_skbuff_getsid(skb, 2677 + SECINITSID_UNLABELED, 2678 + &peer_sid) != 0) 2679 return SECSID_NULL; 2680 2681 return peer_sid;
+41 -12
security/selinux/xfrm.c
··· 77 */ 78 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir) 79 { 80 - int rc = 0; 81 - u32 sel_sid = SECINITSID_UNLABELED; 82 struct xfrm_sec_ctx *ctx; 83 84 /* Context sid is either set to label or ANY_ASSOC */ ··· 88 89 sel_sid = ctx->ctx_sid; 90 } 91 92 rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, 93 ASSOCIATION__POLMATCH, 94 NULL); 95 96 return rc; 97 } ··· 118 u32 pol_sid; 119 int err; 120 121 - if (x->security) 122 - state_sid = x->security->ctx_sid; 123 - else 124 - state_sid = SECINITSID_UNLABELED; 125 - 126 - if (xp->security) 127 pol_sid = xp->security->ctx_sid; 128 - else 129 - pol_sid = SECINITSID_UNLABELED; 130 131 err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION, 132 ASSOCIATION__POLMATCH, ··· 140 if (err) 141 return 0; 142 143 - return selinux_xfrm_flow_state_match(fl, x); 144 } 145 146 /* ··· 152 * can use a given security association. 153 */ 154 155 - int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 156 { 157 int rc = 0; 158 u32 sel_sid = SECINITSID_UNLABELED; 159 struct xfrm_sec_ctx *ctx; 160 161 /* Context sid is either set to label or ANY_ASSOC */ 162 if ((ctx = xfrm->security)) {
··· 77 */ 78 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir) 79 { 80 + int rc; 81 + u32 sel_sid; 82 struct xfrm_sec_ctx *ctx; 83 84 /* Context sid is either set to label or ANY_ASSOC */ ··· 88 89 sel_sid = ctx->ctx_sid; 90 } 91 + else 92 + /* 93 + * All flows should be treated as polmatch'ing an 94 + * otherwise applicable "non-labeled" policy. This 95 + * would prevent inadvertent "leaks". 96 + */ 97 + return 0; 98 99 rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, 100 ASSOCIATION__POLMATCH, 101 NULL); 102 + 103 + if (rc == -EACCES) 104 + rc = -ESRCH; 105 106 return rc; 107 } ··· 108 u32 pol_sid; 109 int err; 110 111 + if (xp->security) { 112 + if (!x->security) 113 + /* unlabeled SA and labeled policy can't match */ 114 + return 0; 115 + else 116 + state_sid = x->security->ctx_sid; 117 pol_sid = xp->security->ctx_sid; 118 + } else 119 + if (x->security) 120 + /* unlabeled policy and labeled SA can't match */ 121 + return 0; 122 + else 123 + /* unlabeled policy and unlabeled SA match all flows */ 124 + return 1; 125 126 err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION, 127 ASSOCIATION__POLMATCH, ··· 125 if (err) 126 return 0; 127 128 + err = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION, 129 + ASSOCIATION__SENDTO, 130 + NULL)? 0:1; 131 + 132 + return err; 133 } 134 135 /* ··· 133 * can use a given security association. 134 */ 135 136 + int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 137 + struct xfrm_policy *xp) 138 { 139 int rc = 0; 140 u32 sel_sid = SECINITSID_UNLABELED; 141 struct xfrm_sec_ctx *ctx; 142 + 143 + if (!xp->security) 144 + if (!xfrm->security) 145 + return 1; 146 + else 147 + return 0; 148 + else 149 + if (!xfrm->security) 150 + return 0; 151 152 /* Context sid is either set to label or ANY_ASSOC */ 153 if ((ctx = xfrm->security)) {