Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[PKT_SCHED] sch_htb: use rb_first() cleanup
[RTNETLINK]: Fix use of wrong skb in do_getlink()
[DECNET]: Fix sfuzz hanging on 2.6.18
[NET]: Do not memcmp() over pad bytes of struct flowi.
[NET]: Introduce protocol-specific destructor for time-wait sockets.
[NET]: Use typesafe inet_twsk() inline function instead of cast.
[NET]: Use hton{l,s}() for non-initializers.
[TCP]: Use TCPOLEN_TSTAMP_ALIGNED macro instead of magic number.
[IPV6]: Seperate sit driver to extra module (addrconf.c changes)
[IPV6]: Seperate sit driver to extra module
[NET]: File descriptor loss while receiving SCM_RIGHTS
[SCTP]: Fix the RX queue size shown in /proc/net/sctp/assocs output.
[SCTP]: Fix receive buffer accounting.
SELinux: Bug fix in polidydb_destroy
IPsec: fix handling of errors for socket policies
IPsec: correct semantics for SELinux policy matching
IPsec: propagate security module errors up from flow_cache_lookup
NetLabel: use SECINITSID_UNLABELED for a base SID
NetLabel: fix a cache race condition

+409 -217
+9 -15
include/linux/security.h
··· 882 882 * Check permission when a flow selects a xfrm_policy for processing 883 883 * XFRMs on a packet. The hook is called when selecting either a 884 884 * per-socket policy or a generic xfrm policy. 885 - * Return 0 if permission is granted. 885 + * Return 0 if permission is granted, -ESRCH otherwise, or -errno 886 + * on other errors. 886 887 * @xfrm_state_pol_flow_match: 887 888 * @x contains the state to match. 888 889 * @xp contains the policy to check for a match. ··· 892 891 * @xfrm_flow_state_match: 893 892 * @fl contains the flow key to match. 894 893 * @xfrm points to the xfrm_state to match. 894 + * @xp points to the xfrm_policy to match. 895 895 * Return 1 if there is a match. 896 896 * @xfrm_decode_session: 897 897 * @skb points to skb to decode. ··· 1390 1388 int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 1391 1389 int (*xfrm_state_pol_flow_match)(struct xfrm_state *x, 1392 1390 struct xfrm_policy *xp, struct flowi *fl); 1393 - int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm); 1391 + int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm, 1392 + struct xfrm_policy *xp); 1394 1393 int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall); 1395 1394 #endif /* CONFIG_SECURITY_NETWORK_XFRM */ 1396 1395 ··· 3123 3120 return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL); 3124 3121 } 3125 3122 3126 - static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk) 3127 - { 3128 - return security_ops->xfrm_policy_alloc_security(xp, NULL, sk); 3129 - } 3130 - 3131 3123 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new) 3132 3124 { 3133 3125 return security_ops->xfrm_policy_clone_security(old, new); ··· 3173 3175 return security_ops->xfrm_state_pol_flow_match(x, xp, fl); 3174 3176 } 3175 3177 3176 - static inline int security_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 3178 + static inline int security_xfrm_flow_state_match(struct flowi *fl, 3179 + struct xfrm_state *xfrm, struct xfrm_policy *xp) 3177 3180 { 3178 - return security_ops->xfrm_flow_state_match(fl, xfrm); 3181 + return security_ops->xfrm_flow_state_match(fl, xfrm, xp); 3179 3182 } 3180 3183 3181 3184 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) ··· 3192 3193 } 3193 3194 #else /* CONFIG_SECURITY_NETWORK_XFRM */ 3194 3195 static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm_user_sec_ctx *sec_ctx) 3195 - { 3196 - return 0; 3197 - } 3198 - 3199 - static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk) 3200 3196 { 3201 3197 return 0; 3202 3198 } ··· 3243 3249 } 3244 3250 3245 3251 static inline int security_xfrm_flow_state_match(struct flowi *fl, 3246 - struct xfrm_state *xfrm) 3252 + struct xfrm_state *xfrm, struct xfrm_policy *xp) 3247 3253 { 3248 3254 return 1; 3249 3255 }
+1 -1
include/net/flow.h
··· 97 97 #define FLOW_DIR_FWD 2 98 98 99 99 struct sock; 100 - typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, 100 + typedef int (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir, 101 101 void **objp, atomic_t **obj_refp); 102 102 103 103 extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
+1
include/net/inet_timewait_sock.h
··· 196 196 { 197 197 if (atomic_dec_and_test(&tw->tw_refcnt)) { 198 198 struct module *owner = tw->tw_prot->owner; 199 + twsk_destructor((struct sock *)tw); 199 200 #ifdef SOCK_REFCNT_DEBUG 200 201 printk(KERN_DEBUG "%s timewait_sock %p released\n", 201 202 tw->tw_prot->name, tw);
+47 -15
include/net/netlabel.h
··· 34 34 #include <linux/net.h> 35 35 #include <linux/skbuff.h> 36 36 #include <net/netlink.h> 37 + #include <asm/atomic.h> 37 38 38 39 /* 39 40 * NetLabel - A management interface for maintaining network packet label ··· 107 106 108 107 /* LSM security attributes */ 109 108 struct netlbl_lsm_cache { 109 + atomic_t refcount; 110 110 void (*free) (const void *data); 111 111 void *data; 112 112 }; ··· 119 117 unsigned char *mls_cat; 120 118 size_t mls_cat_len; 121 119 122 - struct netlbl_lsm_cache cache; 120 + struct netlbl_lsm_cache *cache; 123 121 }; 124 122 125 123 /* 126 124 * LSM security attribute operations 127 125 */ 128 126 127 + 128 + /** 129 + * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache 130 + * @flags: the memory allocation flags 131 + * 132 + * Description: 133 + * Allocate and initialize a netlbl_lsm_cache structure. Returns a pointer 134 + * on success, NULL on failure. 135 + * 136 + */ 137 + static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(int flags) 138 + { 139 + struct netlbl_lsm_cache *cache; 140 + 141 + cache = kzalloc(sizeof(*cache), flags); 142 + if (cache) 143 + atomic_set(&cache->refcount, 1); 144 + return cache; 145 + } 146 + 147 + /** 148 + * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct 149 + * @cache: the struct to free 150 + * 151 + * Description: 152 + * Frees @secattr including all of the internal buffers. 153 + * 154 + */ 155 + static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) 156 + { 157 + if (!atomic_dec_and_test(&cache->refcount)) 158 + return; 159 + 160 + if (cache->free) 161 + cache->free(cache->data); 162 + kfree(cache); 163 + } 129 164 130 165 /** 131 166 * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct ··· 182 143 /** 183 144 * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct 184 145 * @secattr: the struct to clear 185 - * @clear_cache: cache clear flag 186 146 * 187 147 * Description: 188 148 * Destroys the @secattr struct, including freeing all of the internal buffers. 189 - * If @clear_cache is true then free the cache fields, otherwise leave them 190 - * intact. The struct must be reset with a call to netlbl_secattr_init() 191 - * before reuse. 149 + * The struct must be reset with a call to netlbl_secattr_init() before reuse. 192 150 * 193 151 */ 194 - static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr, 195 - u32 clear_cache) 152 + static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) 196 153 { 197 - if (clear_cache && secattr->cache.data != NULL && secattr->cache.free) 198 - secattr->cache.free(secattr->cache.data); 154 + if (secattr->cache) 155 + netlbl_secattr_cache_free(secattr->cache); 199 156 kfree(secattr->domain); 200 157 kfree(secattr->mls_cat); 201 158 } ··· 213 178 /** 214 179 * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct 215 180 * @secattr: the struct to free 216 - * @clear_cache: cache clear flag 217 181 * 218 182 * Description: 219 - * Frees @secattr including all of the internal buffers. If @clear_cache is 220 - * true then free the cache fields, otherwise leave them intact. 183 + * Frees @secattr including all of the internal buffers. 221 184 * 222 185 */ 223 - static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr, 224 - u32 clear_cache) 186 + static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr) 225 187 { 226 - netlbl_secattr_destroy(secattr, clear_cache); 188 + netlbl_secattr_destroy(secattr); 227 189 kfree(secattr); 228 190 } 229 191
+14
include/net/sctp/sctp.h
··· 139 139 void sctp_write_space(struct sock *sk); 140 140 unsigned int sctp_poll(struct file *file, struct socket *sock, 141 141 poll_table *wait); 142 + void sctp_sock_rfree(struct sk_buff *skb); 142 143 143 144 /* 144 145 * sctp/primitive.c ··· 443 442 INIT_LIST_HEAD(result); 444 443 } 445 444 return result; 445 + } 446 + 447 + /* SCTP version of skb_set_owner_r. We need this one because 448 + * of the way we have to do receive buffer accounting on bundled 449 + * chunks. 450 + */ 451 + static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 452 + { 453 + struct sctp_ulpevent *event = sctp_skb2event(skb); 454 + 455 + skb->sk = sk; 456 + skb->destructor = sctp_sock_rfree; 457 + atomic_add(event->rmem_len, &sk->sk_rmem_alloc); 446 458 } 447 459 448 460 /* Tests if the list has one and only one entry. */
+1
include/net/sctp/ulpevent.h
··· 63 63 __u32 cumtsn; 64 64 int msg_flags; 65 65 int iif; 66 + unsigned int rmem_len; 66 67 }; 67 68 68 69 /* Retrieve the skb this event sits inside of. */
+7
include/net/timewait_sock.h
··· 19 19 unsigned int twsk_obj_size; 20 20 int (*twsk_unique)(struct sock *sk, 21 21 struct sock *sktw, void *twp); 22 + void (*twsk_destructor)(struct sock *sk); 22 23 }; 23 24 24 25 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ··· 27 26 if (sk->sk_prot->twsk_prot->twsk_unique != NULL) 28 27 return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp); 29 28 return 0; 29 + } 30 + 31 + static inline void twsk_destructor(struct sock *sk) 32 + { 33 + if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) 34 + sk->sk_prot->twsk_prot->twsk_destructor(sk); 30 35 } 31 36 32 37 #endif /* _TIMEWAIT_SOCK_H */
+2 -1
include/net/xfrm.h
··· 995 995 int create, unsigned short family); 996 996 extern void xfrm_policy_flush(u8 type); 997 997 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 998 - extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict); 998 + extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, 999 + struct flowi *fl, int family, int strict); 999 1000 extern void xfrm_init_pmtu(struct dst_entry *dst); 1000 1001 1001 1002 extern wait_queue_head_t km_waitq;
+1 -2
net/compat.c
··· 285 285 286 286 if (i > 0) { 287 287 int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); 288 - if (!err) 289 - err = put_user(SOL_SOCKET, &cm->cmsg_level); 288 + err = put_user(SOL_SOCKET, &cm->cmsg_level); 290 289 if (!err) 291 290 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 292 291 if (!err)
+26 -12
net/core/flow.c
··· 85 85 add_timer(&flow_hash_rnd_timer); 86 86 } 87 87 88 + static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) 89 + { 90 + if (fle->object) 91 + atomic_dec(fle->object_ref); 92 + kmem_cache_free(flow_cachep, fle); 93 + flow_count(cpu)--; 94 + } 95 + 88 96 static void __flow_cache_shrink(int cpu, int shrink_to) 89 97 { 90 98 struct flow_cache_entry *fle, **flp; ··· 108 100 } 109 101 while ((fle = *flp) != NULL) { 110 102 *flp = fle->next; 111 - if (fle->object) 112 - atomic_dec(fle->object_ref); 113 - kmem_cache_free(flow_cachep, fle); 114 - flow_count(cpu)--; 103 + flow_entry_kill(cpu, fle); 115 104 } 116 105 } 117 106 } ··· 225 220 226 221 nocache: 227 222 { 223 + int err; 228 224 void *obj; 229 225 atomic_t *obj_ref; 230 226 231 - resolver(key, family, dir, &obj, &obj_ref); 227 + err = resolver(key, family, dir, &obj, &obj_ref); 232 228 233 229 if (fle) { 234 - fle->genid = atomic_read(&flow_cache_genid); 230 + if (err) { 231 + /* Force security policy check on next lookup */ 232 + *head = fle->next; 233 + flow_entry_kill(cpu, fle); 234 + } else { 235 + fle->genid = atomic_read(&flow_cache_genid); 235 236 236 - if (fle->object) 237 - atomic_dec(fle->object_ref); 237 + if (fle->object) 238 + atomic_dec(fle->object_ref); 238 239 239 - fle->object = obj; 240 - fle->object_ref = obj_ref; 241 - if (obj) 242 - atomic_inc(fle->object_ref); 240 + fle->object = obj; 241 + fle->object_ref = obj_ref; 242 + if (obj) 243 + atomic_inc(fle->object_ref); 244 + } 243 245 } 244 246 local_bh_enable(); 245 247 248 + if (err) 249 + obj = ERR_PTR(err); 246 250 return obj; 247 251 } 248 252 }
+1 -1
net/core/rtnetlink.c
··· 602 602 goto errout; 603 603 } 604 604 605 - err = rtnl_unicast(skb, NETLINK_CB(skb).pid); 605 + err = rtnl_unicast(nskb, NETLINK_CB(skb).pid); 606 606 errout: 607 607 kfree(iw_buf); 608 608 dev_put(dev);
+1 -2
net/core/scm.c
··· 245 245 if (i > 0) 246 246 { 247 247 int cmlen = CMSG_LEN(i*sizeof(int)); 248 - if (!err) 249 - err = put_user(SOL_SOCKET, &cm->cmsg_level); 248 + err = put_user(SOL_SOCKET, &cm->cmsg_level); 250 249 if (!err) 251 250 err = put_user(SCM_RIGHTS, &cm->cmsg_type); 252 251 if (!err)
+3 -3
net/dccp/ipv4.c
··· 311 311 } 312 312 313 313 if (sk->sk_state == DCCP_TIME_WAIT) { 314 - inet_twsk_put((struct inet_timewait_sock *)sk); 314 + inet_twsk_put(inet_twsk(sk)); 315 315 return; 316 316 } 317 317 ··· 614 614 bh_lock_sock(nsk); 615 615 return nsk; 616 616 } 617 - inet_twsk_put((struct inet_timewait_sock *)nsk); 617 + inet_twsk_put(inet_twsk(nsk)); 618 618 return NULL; 619 619 } 620 620 ··· 980 980 goto discard_it; 981 981 982 982 do_time_wait: 983 - inet_twsk_put((struct inet_timewait_sock *)sk); 983 + inet_twsk_put(inet_twsk(sk)); 984 984 goto no_dccp_socket; 985 985 } 986 986
+3 -3
net/dccp/ipv6.c
··· 285 285 } 286 286 287 287 if (sk->sk_state == DCCP_TIME_WAIT) { 288 - inet_twsk_put((struct inet_timewait_sock *)sk); 288 + inet_twsk_put(inet_twsk(sk)); 289 289 return; 290 290 } 291 291 ··· 663 663 bh_lock_sock(nsk); 664 664 return nsk; 665 665 } 666 - inet_twsk_put((struct inet_timewait_sock *)nsk); 666 + inet_twsk_put(inet_twsk(nsk)); 667 667 return NULL; 668 668 } 669 669 ··· 1109 1109 goto discard_it; 1110 1110 1111 1111 do_time_wait: 1112 - inet_twsk_put((struct inet_timewait_sock *)sk); 1112 + inet_twsk_put(inet_twsk(sk)); 1113 1113 goto no_dccp_socket; 1114 1114 } 1115 1115
+3 -1
net/decnet/af_decnet.c
··· 1178 1178 if (peer) { 1179 1179 if ((sock->state != SS_CONNECTED && 1180 1180 sock->state != SS_CONNECTING) && 1181 - scp->accept_mode == ACC_IMMED) 1181 + scp->accept_mode == ACC_IMMED) { 1182 + release_sock(sk); 1182 1183 return -ENOTCONN; 1184 + } 1183 1185 1184 1186 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); 1185 1187 } else {
+8 -3
net/decnet/dn_route.c
··· 267 267 268 268 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 269 269 { 270 - return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 271 - fl1->oif == fl2->oif && 272 - fl1->iif == fl2->iif; 270 + return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 271 + (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 272 + #ifdef CONFIG_IP_ROUTE_FWMARK 273 + (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) | 274 + #endif 275 + (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 276 + (fl1->oif ^ fl2->oif) | 277 + (fl1->iif ^ fl2->iif)) == 0; 273 278 } 274 279 275 280 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
+10 -8
net/ipv4/cipso_ipv4.c
··· 43 43 #include <net/tcp.h> 44 44 #include <net/netlabel.h> 45 45 #include <net/cipso_ipv4.h> 46 + #include <asm/atomic.h> 46 47 #include <asm/bug.h> 47 48 48 49 struct cipso_v4_domhsh_entry { ··· 80 79 unsigned char *key; 81 80 size_t key_len; 82 81 83 - struct netlbl_lsm_cache lsm_data; 82 + struct netlbl_lsm_cache *lsm_data; 84 83 85 84 u32 activity; 86 85 struct list_head list; ··· 189 188 * @entry: the entry to free 190 189 * 191 190 * Description: 192 - * This function frees the memory associated with a cache entry. 191 + * This function frees the memory associated with a cache entry including the 192 + * LSM cache data if there are no longer any users, i.e. reference count == 0. 193 193 * 194 194 */ 195 195 static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) 196 196 { 197 - if (entry->lsm_data.free) 198 - entry->lsm_data.free(entry->lsm_data.data); 197 + if (entry->lsm_data) 198 + netlbl_secattr_cache_free(entry->lsm_data); 199 199 kfree(entry->key); 200 200 kfree(entry); 201 201 } ··· 317 315 entry->key_len == key_len && 318 316 memcmp(entry->key, key, key_len) == 0) { 319 317 entry->activity += 1; 320 - secattr->cache.free = entry->lsm_data.free; 321 - secattr->cache.data = entry->lsm_data.data; 318 + atomic_inc(&entry->lsm_data->refcount); 319 + secattr->cache = entry->lsm_data; 322 320 if (prev_entry == NULL) { 323 321 spin_unlock_bh(&cipso_v4_cache[bkt].lock); 324 322 return 0; ··· 385 383 memcpy(entry->key, cipso_ptr, cipso_ptr_len); 386 384 entry->key_len = cipso_ptr_len; 387 385 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); 388 - entry->lsm_data.free = secattr->cache.free; 389 - entry->lsm_data.data = secattr->cache.data; 386 + atomic_inc(&secattr->cache->refcount); 387 + entry->lsm_data = secattr->cache; 390 388 391 389 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 392 390 spin_lock_bh(&cipso_v4_cache[bkt].lock);
+2 -2
net/ipv4/ip_gre.c
··· 611 611 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 612 612 */ 613 613 if (flags == 0 && 614 - skb->protocol == __constant_htons(ETH_P_WCCP)) { 615 - skb->protocol = __constant_htons(ETH_P_IP); 614 + skb->protocol == htons(ETH_P_WCCP)) { 615 + skb->protocol = htons(ETH_P_IP); 616 616 if ((*(h + offset) & 0xF0) != 0x40) 617 617 offset += 4; 618 618 }
+9 -3
net/ipv4/route.c
··· 566 566 567 567 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 568 568 { 569 - return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 && 570 - fl1->oif == fl2->oif && 571 - fl1->iif == fl2->iif; 569 + return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 570 + (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | 571 + #ifdef CONFIG_IP_ROUTE_FWMARK 572 + (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) | 573 + #endif 574 + (*(u16 *)&fl1->nl_u.ip4_u.tos ^ 575 + *(u16 *)&fl2->nl_u.ip4_u.tos) | 576 + (fl1->oif ^ fl2->oif) | 577 + (fl1->iif ^ fl2->iif)) == 0; 572 578 } 573 579 574 580 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
+8 -10
net/ipv4/tcp_ipv4.c
··· 355 355 return; 356 356 } 357 357 if (sk->sk_state == TCP_TIME_WAIT) { 358 - inet_twsk_put((struct inet_timewait_sock *)sk); 358 + inet_twsk_put(inet_twsk(sk)); 359 359 return; 360 360 } 361 361 ··· 578 578 struct tcphdr *th = skb->h.th; 579 579 struct { 580 580 struct tcphdr th; 581 - u32 tsopt[3]; 581 + u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2]; 582 582 } rep; 583 583 struct ip_reply_arg arg; 584 584 ··· 960 960 bh_lock_sock(nsk); 961 961 return nsk; 962 962 } 963 - inet_twsk_put((struct inet_timewait_sock *)nsk); 963 + inet_twsk_put(inet_twsk(nsk)); 964 964 return NULL; 965 965 } 966 966 ··· 1154 1154 1155 1155 do_time_wait: 1156 1156 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1157 - inet_twsk_put((struct inet_timewait_sock *) sk); 1157 + inet_twsk_put(inet_twsk(sk)); 1158 1158 goto discard_it; 1159 1159 } 1160 1160 1161 1161 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { 1162 1162 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1163 - inet_twsk_put((struct inet_timewait_sock *) sk); 1163 + inet_twsk_put(inet_twsk(sk)); 1164 1164 goto discard_it; 1165 1165 } 1166 - switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1167 - skb, th)) { 1166 + switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1168 1167 case TCP_TW_SYN: { 1169 1168 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1170 1169 skb->nh.iph->daddr, 1171 1170 th->dest, 1172 1171 inet_iif(skb)); 1173 1172 if (sk2) { 1174 - inet_twsk_deschedule((struct inet_timewait_sock *)sk, 1175 - &tcp_death_row); 1176 - inet_twsk_put((struct inet_timewait_sock *)sk); 1173 + inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row); 1174 + inet_twsk_put(inet_twsk(sk)); 1177 1175 sk = sk2; 1178 1176 goto process; 1179 1177 }
+20 -11
net/ipv4/tcp_output.c
··· 273 273 __u32 tstamp) 274 274 { 275 275 if (tp->rx_opt.tstamp_ok) { 276 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | 277 - (TCPOPT_NOP << 16) | 278 - (TCPOPT_TIMESTAMP << 8) | 279 - TCPOLEN_TIMESTAMP); 276 + *ptr++ = htonl((TCPOPT_NOP << 24) | 277 + (TCPOPT_NOP << 16) | 278 + (TCPOPT_TIMESTAMP << 8) | 279 + TCPOLEN_TIMESTAMP); 280 280 *ptr++ = htonl(tstamp); 281 281 *ptr++ = htonl(tp->rx_opt.ts_recent); 282 282 } ··· 325 325 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); 326 326 if (ts) { 327 327 if(sack) 328 - *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) | 329 - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 328 + *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 329 + (TCPOLEN_SACK_PERM << 16) | 330 + (TCPOPT_TIMESTAMP << 8) | 331 + TCPOLEN_TIMESTAMP); 330 332 else 331 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 332 - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); 333 + *ptr++ = htonl((TCPOPT_NOP << 24) | 334 + (TCPOPT_NOP << 16) | 335 + (TCPOPT_TIMESTAMP << 8) | 336 + TCPOLEN_TIMESTAMP); 333 337 *ptr++ = htonl(tstamp); /* TSVAL */ 334 338 *ptr++ = htonl(ts_recent); /* TSECR */ 335 339 } else if(sack) 336 - *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 337 - (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM); 340 + *ptr++ = htonl((TCPOPT_NOP << 24) | 341 + (TCPOPT_NOP << 16) | 342 + (TCPOPT_SACK_PERM << 8) | 343 + TCPOLEN_SACK_PERM); 338 344 if (offer_wscale) 339 - *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale)); 345 + *ptr++ = htonl((TCPOPT_NOP << 24) | 346 + (TCPOPT_WINDOW << 16) | 347 + (TCPOLEN_WINDOW << 8) | 348 + (wscale)); 340 349 } 341 350 342 351 /* This routine actually transmits TCP packets queued in by
+1 -1
net/ipv4/xfrm4_policy.c
··· 52 52 xdst->u.rt.fl.fl4_dst == fl->fl4_dst && 53 53 xdst->u.rt.fl.fl4_src == fl->fl4_src && 54 54 xdst->u.rt.fl.fl4_tos == fl->fl4_tos && 55 - xfrm_bundle_ok(xdst, fl, AF_INET, 0)) { 55 + xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) { 56 56 dst_clone(dst); 57 57 break; 58 58 }
+13
net/ipv6/Kconfig
··· 153 153 ---help--- 154 154 Support for MIPv6 route optimization mode. 155 155 156 + config IPV6_SIT 157 + tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)" 158 + depends on IPV6 159 + default y 160 + ---help--- 161 + Tunneling means encapsulating data of one protocol type within 162 + another protocol and sending it over a channel that understands the 163 + encapsulating protocol. This driver implements encapsulation of IPv6 164 + into IPv4 packets. This is useful if you want to connect two IPv6 165 + networks over an IPv4-only path. 166 + 167 + Saying M here will produce a module called sit.ko. If unsure, say Y. 168 + 156 169 config IPV6_TUNNEL 157 170 tristate "IPv6: IPv6-in-IPv6 tunnel" 158 171 select INET6_TUNNEL
+2 -1
net/ipv6/Makefile
··· 4 4 5 5 obj-$(CONFIG_IPV6) += ipv6.o 6 6 7 - ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \ 7 + ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ 8 8 route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \ 9 9 protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \ 10 10 exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \ ··· 29 29 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o 30 30 obj-$(CONFIG_NETFILTER) += netfilter/ 31 31 32 + obj-$(CONFIG_IPV6_SIT) += sit.o 32 33 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 33 34 34 35 obj-y += exthdrs_core.o
+16 -2
net/ipv6/addrconf.c
··· 396 396 ndev->regen_timer.data = (unsigned long) ndev; 397 397 if ((dev->flags&IFF_LOOPBACK) || 398 398 dev->type == ARPHRD_TUNNEL || 399 - dev->type == ARPHRD_NONE || 400 - dev->type == ARPHRD_SIT) { 399 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 400 + dev->type == ARPHRD_SIT || 401 + #endif 402 + dev->type == ARPHRD_NONE) { 401 403 printk(KERN_INFO 402 404 "%s: Disabled Privacy Extensions\n", 403 405 dev->name); ··· 1548 1546 This thing is done here expecting that the whole 1549 1547 class of non-broadcast devices need not cloning. 1550 1548 */ 1549 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1551 1550 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT)) 1552 1551 cfg.fc_flags |= RTF_NONEXTHOP; 1552 + #endif 1553 1553 1554 1554 ip6_route_add(&cfg); 1555 1555 } ··· 1573 1569 ip6_route_add(&cfg); 1574 1570 } 1575 1571 1572 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1576 1573 static void sit_route_add(struct net_device *dev) 1577 1574 { 1578 1575 struct fib6_config cfg = { ··· 1587 1582 /* prefix length - 96 bits "::d.d.d.d" */ 1588 1583 ip6_route_add(&cfg); 1589 1584 } 1585 + #endif 1590 1586 1591 1587 static void addrconf_add_lroute(struct net_device *dev) 1592 1588 { ··· 1858 1852 if (dev == NULL) 1859 1853 goto err_exit; 1860 1854 1855 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 1861 1856 if (dev->type == ARPHRD_SIT) { 1862 1857 struct ifreq ifr; 1863 1858 mm_segment_t oldfs; ··· 1888 1881 err = dev_open(dev); 1889 1882 } 1890 1883 } 1884 + #endif 1891 1885 1892 1886 err_exit: 1893 1887 rtnl_unlock(); ··· 2018 2010 return err; 2019 2011 } 2020 2012 2013 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2021 2014 static void sit_add_v4_addrs(struct inet6_dev *idev) 2022 2015 { 2023 2016 struct inet6_ifaddr * ifp; ··· 2087 2078 } 2088 2079 } 2089 2080 } 2081 + #endif 2090 2082 2091 2083 static void init_loopback(struct net_device *dev) 2092 2084 { ··· 2151 2141 addrconf_add_linklocal(idev, &addr); 2152 2142 } 2153 2143 2144 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2154 2145 static void addrconf_sit_config(struct net_device *dev) 2155 2146 { 2156 2147 struct inet6_dev *idev; ··· 2177 2166 } else 2178 2167 sit_route_add(dev); 2179 2168 } 2169 + #endif 2180 2170 2181 2171 static inline int 2182 2172 ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) ··· 2272 2260 } 2273 2261 2274 2262 switch(dev->type) { 2263 + #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2275 2264 case ARPHRD_SIT: 2276 2265 addrconf_sit_config(dev); 2277 2266 break; 2267 + #endif 2278 2268 case ARPHRD_TUNNEL6: 2279 2269 addrconf_ip6_tnl_config(dev); 2280 2270 break;
-2
net/ipv6/af_inet6.c
··· 850 850 err = addrconf_init(); 851 851 if (err) 852 852 goto addrconf_fail; 853 - sit_init(); 854 853 855 854 /* Init v6 extension headers. */ 856 855 ipv6_rthdr_init(); ··· 926 927 mip6_fini(); 927 928 #endif 928 929 /* Cleanup code parts. */ 929 - sit_cleanup(); 930 930 ip6_flowlabel_cleanup(); 931 931 addrconf_cleanup(); 932 932 ip6_route_cleanup();
+3
net/ipv6/sit.c
··· 850 850 inet_del_protocol(&sit_protocol, IPPROTO_IPV6); 851 851 goto out; 852 852 } 853 + 854 + module_init(sit_init); 855 + module_exit(sit_cleanup);
+6 -7
net/ipv6/tcp_ipv6.c
··· 329 329 } 330 330 331 331 if (sk->sk_state == TCP_TIME_WAIT) { 332 - inet_twsk_put((struct inet_timewait_sock *)sk); 332 + inet_twsk_put(inet_twsk(sk)); 333 333 return; 334 334 } 335 335 ··· 653 653 int tot_len = sizeof(struct tcphdr); 654 654 655 655 if (ts) 656 - tot_len += 3*4; 656 + tot_len += TCPOLEN_TSTAMP_ALIGNED; 657 657 658 658 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, 659 659 GFP_ATOMIC); ··· 749 749 bh_lock_sock(nsk); 750 750 return nsk; 751 751 } 752 - inet_twsk_put((struct inet_timewait_sock *)nsk); 752 + inet_twsk_put(inet_twsk(nsk)); 753 753 return NULL; 754 754 } 755 755 ··· 1283 1283 1284 1284 do_time_wait: 1285 1285 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1286 - inet_twsk_put((struct inet_timewait_sock *)sk); 1286 + inet_twsk_put(inet_twsk(sk)); 1287 1287 goto discard_it; 1288 1288 } 1289 1289 1290 1290 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { 1291 1291 TCP_INC_STATS_BH(TCP_MIB_INERRS); 1292 - inet_twsk_put((struct inet_timewait_sock *)sk); 1292 + inet_twsk_put(inet_twsk(sk)); 1293 1293 goto discard_it; 1294 1294 } 1295 1295 1296 - switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk, 1297 - skb, th)) { 1296 + switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { 1298 1297 case TCP_TW_SYN: 1299 1298 { 1300 1299 struct sock *sk2;
+1 -1
net/ipv6/xfrm6_policy.c
··· 73 73 xdst->u.rt6.rt6i_src.plen); 74 74 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) && 75 75 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) && 76 - xfrm_bundle_ok(xdst, fl, AF_INET6, 76 + xfrm_bundle_ok(policy, xdst, fl, AF_INET6, 77 77 (xdst->u.rt6.rt6i_dst.plen != 128 || 78 78 xdst->u.rt6.rt6i_src.plen != 128))) { 79 79 dst_clone(dst);
-5
net/key/af_key.c
··· 2928 2928 if (*dir) 2929 2929 goto out; 2930 2930 } 2931 - else { 2932 - *dir = security_xfrm_sock_policy_alloc(xp, sk); 2933 - if (*dir) 2934 - goto out; 2935 - } 2936 2931 2937 2932 *dir = pol->sadb_x_policy_dir-1; 2938 2933 return xp;
+1 -1
net/netlabel/netlabel_kapi.c
··· 200 200 int netlbl_cache_add(const struct sk_buff *skb, 201 201 const struct netlbl_lsm_secattr *secattr) 202 202 { 203 - if (secattr->cache.data == NULL) 203 + if (secattr->cache == NULL) 204 204 return -ENOMSG; 205 205 206 206 if (CIPSO_V4_OPTEXIST(skb))
+2 -3
net/sched/sch_htb.c
··· 786 786 for (i = 0; i < 500; i++) { 787 787 struct htb_class *cl; 788 788 long diff; 789 - struct rb_node *p = q->wait_pq[level].rb_node; 789 + struct rb_node *p = rb_first(&q->wait_pq[level]); 790 + 790 791 if (!p) 791 792 return 0; 792 - while (p->rb_left) 793 - p = p->rb_left; 794 793 795 794 cl = rb_entry(p, struct htb_class, pq_node); 796 795 if (time_after(cl->pq_key, q->jiffies)) {
+1 -1
net/sctp/proc.c
··· 344 344 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 345 345 assoc->state, hash, assoc->assoc_id, 346 346 assoc->sndbuf_used, 347 - (sk->sk_rcvbuf - assoc->rwnd), 347 + atomic_read(&assoc->rmem_alloc), 348 348 sock_i_uid(sk), sock_i_ino(sk), 349 349 epb->bind_addr.port, 350 350 assoc->peer.port);
+18 -4
net/sctp/socket.c
··· 5362 5362 sctp_association_put(asoc); 5363 5363 } 5364 5364 5365 + /* Do accounting for the receive space on the socket. 5366 + * Accounting for the association is done in ulpevent.c 5367 + * We set this as a destructor for the cloned data skbs so that 5368 + * accounting is done at the correct time. 5369 + */ 5370 + void sctp_sock_rfree(struct sk_buff *skb) 5371 + { 5372 + struct sock *sk = skb->sk; 5373 + struct sctp_ulpevent *event = sctp_skb2event(skb); 5374 + 5375 + atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); 5376 + } 5377 + 5378 + 5365 5379 /* Helper function to wait for space in the sndbuf. */ 5366 5380 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, 5367 5381 size_t msg_len) ··· 5648 5634 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5649 5635 event = sctp_skb2event(skb); 5650 5636 if (event->asoc == assoc) { 5651 - sock_rfree(skb); 5637 + sctp_sock_rfree(skb); 5652 5638 __skb_unlink(skb, &oldsk->sk_receive_queue); 5653 5639 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5654 - skb_set_owner_r(skb, newsk); 5640 + sctp_skb_set_owner_r(skb, newsk); 5655 5641 } 5656 5642 } 5657 5643 ··· 5679 5665 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5680 5666 event = sctp_skb2event(skb); 5681 5667 if (event->asoc == assoc) { 5682 - sock_rfree(skb); 5668 + sctp_sock_rfree(skb); 5683 5669 __skb_unlink(skb, &oldsp->pd_lobby); 5684 5670 __skb_queue_tail(queue, skb); 5685 - skb_set_owner_r(skb, newsk); 5671 + sctp_skb_set_owner_r(skb, newsk); 5686 5672 } 5687 5673 } 5688 5674
+15 -10
net/sctp/ulpevent.c
··· 55 55 56 56 57 57 /* Initialize an ULP event from an given skb. */ 58 - SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) 58 + SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, 59 + int msg_flags, 60 + unsigned int len) 59 61 { 60 62 memset(event, 0, sizeof(struct sctp_ulpevent)); 61 63 event->msg_flags = msg_flags; 64 + event->rmem_len = len; 62 65 } 63 66 64 67 /* Create a new sctp_ulpevent. */ ··· 76 73 goto fail; 77 74 78 75 event = sctp_skb2event(skb); 79 - sctp_ulpevent_init(event, msg_flags); 76 + sctp_ulpevent_init(event, msg_flags, skb->truesize); 80 77 81 78 return event; 82 79 ··· 104 101 sctp_association_hold((struct sctp_association *)asoc); 105 102 skb = sctp_event2skb(event); 106 103 event->asoc = (struct sctp_association *)asoc; 107 - atomic_add(skb->truesize, &event->asoc->rmem_alloc); 108 - skb_set_owner_r(skb, asoc->base.sk); 104 + atomic_add(event->rmem_len, &event->asoc->rmem_alloc); 105 + sctp_skb_set_owner_r(skb, asoc->base.sk); 109 106 } 110 107 111 108 /* A simple destructor to give up the reference to the association. */ 112 109 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 113 110 { 114 111 struct sctp_association *asoc = event->asoc; 115 - struct sk_buff *skb = sctp_event2skb(event); 116 112 117 - atomic_sub(skb->truesize, &asoc->rmem_alloc); 113 + atomic_sub(event->rmem_len, &asoc->rmem_alloc); 118 114 sctp_association_put(asoc); 119 115 } 120 116 ··· 374 372 375 373 /* Embed the event fields inside the cloned skb. */ 376 374 event = sctp_skb2event(skb); 377 - sctp_ulpevent_init(event, MSG_NOTIFICATION); 375 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 378 376 379 377 sre = (struct sctp_remote_error *) 380 378 skb_push(skb, sizeof(struct sctp_remote_error)); ··· 466 464 467 465 /* Embed the event fields inside the cloned skb. */ 468 466 event = sctp_skb2event(skb); 469 - sctp_ulpevent_init(event, MSG_NOTIFICATION); 467 + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 470 468 471 469 ssf = (struct sctp_send_failed *) 472 470 skb_push(skb, sizeof(struct sctp_send_failed)); ··· 684 682 /* Embed the event fields inside the cloned skb. */ 685 683 event = sctp_skb2event(skb); 686 684 687 - /* Initialize event with flags 0. */ 688 - sctp_ulpevent_init(event, 0); 685 + /* Initialize event with flags 0 and correct length 686 + * Since this is a clone of the original skb, only account for 687 + * the data of this chunk as other chunks will be accounted separately. 688 + */ 689 + sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); 689 690 690 691 sctp_ulpevent_receive_data(event, asoc); 691 692
+1 -1
net/sctp/ulpqueue.c
··· 309 309 if (!new) 310 310 return NULL; /* try again later */ 311 311 312 - new->sk = f_frag->sk; 312 + sctp_skb_set_owner_r(new, f_frag->sk); 313 313 314 314 skb_shinfo(new)->frag_list = pos; 315 315 } else
+75 -26
net/xfrm/xfrm_policy.c
··· 883 883 } 884 884 EXPORT_SYMBOL(xfrm_policy_walk); 885 885 886 - /* Find policy to apply to this flow. */ 887 - 886 + /* 887 + * Find policy to apply to this flow. 888 + * 889 + * Returns 0 if policy found, else an -errno. 890 + */ 888 891 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl, 889 892 u8 type, u16 family, int dir) 890 893 { 891 894 struct xfrm_selector *sel = &pol->selector; 892 - int match; 895 + int match, ret = -ESRCH; 893 896 894 897 if (pol->family != family || 895 898 pol->type != type) 896 - return 0; 899 + return ret; 897 900 898 901 match = xfrm_selector_match(sel, fl, family); 899 - if (match) { 900 - if (!security_xfrm_policy_lookup(pol, fl->secid, dir)) 901 - return 1; 902 - } 902 + if (match) 903 + ret = security_xfrm_policy_lookup(pol, fl->secid, dir); 903 904 904 - return 0; 905 + return ret; 905 906 } 906 907 907 908 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl, 908 909 u16 family, u8 dir) 909 910 { 911 + int err; 910 912 struct xfrm_policy *pol, *ret; 911 913 xfrm_address_t *daddr, *saddr; 912 914 struct hlist_node *entry; ··· 924 922 chain = policy_hash_direct(daddr, saddr, family, dir); 925 923 ret = NULL; 926 924 hlist_for_each_entry(pol, entry, chain, bydst) { 927 - if (xfrm_policy_match(pol, fl, type, family, dir)) { 925 + err = xfrm_policy_match(pol, fl, type, family, dir); 926 + if (err) { 927 + if (err == -ESRCH) 928 + continue; 929 + else { 930 + ret = ERR_PTR(err); 931 + goto fail; 932 + } 933 + } else { 928 934 ret = pol; 929 935 priority = ret->priority; 930 936 break; ··· 940 930 } 941 931 chain = &xfrm_policy_inexact[dir]; 942 932 hlist_for_each_entry(pol, entry, chain, bydst) { 943 - if (xfrm_policy_match(pol, fl, type, family, dir) && 944 - pol->priority < priority) { 933 + err = xfrm_policy_match(pol, fl, type, family, dir); 934 + if (err) { 935 + if (err == -ESRCH) 936 + continue; 937 + else { 938 + ret = ERR_PTR(err); 939 + goto fail; 940 + } 941 + } else if (pol->priority < priority) { 945 942 ret = pol; 946 943 break; 947 944 } 948 945 } 949 946 if (ret) 950 947 xfrm_pol_hold(ret); 948 + fail: 951 949 read_unlock_bh(&xfrm_policy_lock); 952 950 953 951 return ret; 954 952 } 955 953 956 - static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 954 + static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir, 957 955 void **objp, atomic_t **obj_refp) 958 956 { 959 957 struct xfrm_policy *pol; 958 + int err = 0; 960 959 961 960 #ifdef CONFIG_XFRM_SUB_POLICY 962 961 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir); 963 - if (pol) 962 + if (IS_ERR(pol)) { 963 + err = PTR_ERR(pol); 964 + pol = NULL; 965 + } 966 + if (pol || err) 964 967 goto end; 965 968 #endif 966 969 pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir); 967 - 970 + if (IS_ERR(pol)) { 971 + err = PTR_ERR(pol); 972 + pol = NULL; 973 + } 968 974 #ifdef CONFIG_XFRM_SUB_POLICY 969 975 end: 970 976 #endif 971 977 if ((*objp = (void *) pol) != NULL) 972 978 *obj_refp = &pol->refcnt; 979 + return err; 973 980 } 974 981 975 982 static inline int policy_to_flow_dir(int dir) ··· 1016 989 sk->sk_family); 1017 990 int err = 0; 1018 991 1019 - if (match) 1020 - err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir)); 1021 - 1022 - if (match && !err) 1023 - xfrm_pol_hold(pol); 1024 - else 992 + if (match) { 993 + err = security_xfrm_policy_lookup(pol, fl->secid, 994 + policy_to_flow_dir(dir)); 995 + if (!err) 996 + xfrm_pol_hold(pol); 997 + else if (err == -ESRCH) 998 + pol = NULL; 999 + else 1000 + pol = ERR_PTR(err); 1001 + } else 1025 1002 pol = NULL; 1026 1003 } 1027 1004 read_unlock_bh(&xfrm_policy_lock); ··· 1317 1286 pol_dead = 0; 1318 1287 xfrm_nr = 0; 1319 1288 1320 - if (sk && sk->sk_policy[1]) 1289 + if (sk && sk->sk_policy[1]) { 1321 1290 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1291 + if (IS_ERR(policy)) 1292 + return PTR_ERR(policy); 1293 + } 1322 1294 1323 1295 if (!policy) { 1324 1296 /* To accelerate a bit... */ ··· 1331 1297 1332 1298 policy = flow_cache_lookup(fl, dst_orig->ops->family, 1333 1299 dir, xfrm_policy_lookup); 1300 + if (IS_ERR(policy)) 1301 + return PTR_ERR(policy); 1334 1302 } 1335 1303 1336 1304 if (!policy) ··· 1379 1343 fl, family, 1380 1344 XFRM_POLICY_OUT); 1381 1345 if (pols[1]) { 1346 + if (IS_ERR(pols[1])) { 1347 + err = PTR_ERR(pols[1]); 1348 + goto error; 1349 + } 1382 1350 if (pols[1]->action == XFRM_POLICY_BLOCK) { 1383 1351 err = -EPERM; 1384 1352 goto error; ··· 1614 1574 } 1615 1575 1616 1576 pol = NULL; 1617 - if (sk && sk->sk_policy[dir]) 1577 + if (sk && sk->sk_policy[dir]) { 1618 1578 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 1579 + if (IS_ERR(pol)) 1580 + return 0; 1581 + } 1619 1582 1620 1583 if (!pol) 1621 1584 pol = flow_cache_lookup(&fl, family, fl_dir, 1622 1585 xfrm_policy_lookup); 1586 + 1587 + if (IS_ERR(pol)) 1588 + return 0; 1623 1589 1624 1590 if (!pol) { 1625 1591 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { ··· 1645 1599 &fl, family, 1646 1600 XFRM_POLICY_IN); 1647 1601 if (pols[1]) { 1602 + if (IS_ERR(pols[1])) 1603 + return 0; 1648 1604 pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec; 1649 1605 npols ++; 1650 1606 } ··· 1754 1706 1755 1707 static int stale_bundle(struct dst_entry *dst) 1756 1708 { 1757 - return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 1709 + return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0); 1758 1710 } 1759 1711 1760 1712 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) ··· 1876 1828 * still valid. 1877 1829 */ 1878 1830 1879 - int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict) 1831 + int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, 1832 + struct flowi *fl, int family, int strict) 1880 1833 { 1881 1834 struct dst_entry *dst = &first->u.dst; 1882 1835 struct xfrm_dst *last; ··· 1894 1845 1895 1846 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1896 1847 return 0; 1897 - if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm)) 1848 + if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol)) 1898 1849 return 0; 1899 1850 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1900 1851 return 0;
-9
net/xfrm/xfrm_user.c
··· 1992 1992 xp->type = XFRM_POLICY_TYPE_MAIN; 1993 1993 copy_templates(xp, ut, nr); 1994 1994 1995 - if (!xp->security) { 1996 - int err = security_xfrm_sock_policy_alloc(xp, sk); 1997 - if (err) { 1998 - kfree(xp); 1999 - *dir = err; 2000 - return NULL; 2001 - } 2002 - } 2003 - 2004 1995 *dir = p->dir; 2005 1996 2006 1997 return xp;
+2 -1
security/dummy.c
··· 881 881 return 1; 882 882 } 883 883 884 - static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 884 + static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 885 + struct xfrm_policy *xp) 885 886 { 886 887 return 1; 887 888 }
+2 -1
security/selinux/include/xfrm.h
··· 19 19 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir); 20 20 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 21 21 struct xfrm_policy *xp, struct flowi *fl); 22 - int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm); 22 + int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 23 + struct xfrm_policy *xp); 23 24 24 25 25 26 /*
+2
security/selinux/ss/policydb.c
··· 618 618 c = c->next; 619 619 ocontext_destroy(ctmp,i); 620 620 } 621 + p->ocontexts[i] = NULL; 621 622 } 622 623 623 624 g = p->genfs; ··· 634 633 g = g->next; 635 634 kfree(gtmp); 636 635 } 636 + p->genfs = NULL; 637 637 638 638 cond_policydb_destroy(p); 639 639
+30 -36
security/selinux/ss/services.c
··· 2172 2172 */ 2173 2173 static void selinux_netlbl_cache_free(const void *data) 2174 2174 { 2175 - struct netlbl_cache *cache = NETLBL_CACHE(data); 2175 + struct netlbl_cache *cache; 2176 + 2177 + if (data == NULL) 2178 + return; 2179 + 2180 + cache = NETLBL_CACHE(data); 2176 2181 switch (cache->type) { 2177 2182 case NETLBL_CACHE_T_MLS: 2178 2183 ebitmap_destroy(&cache->data.mls_label.level[0].cat); ··· 2202 2197 struct netlbl_lsm_secattr secattr; 2203 2198 2204 2199 netlbl_secattr_init(&secattr); 2200 + secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); 2201 + if (secattr.cache == NULL) 2202 + goto netlbl_cache_add_return; 2205 2203 2206 2204 cache = kzalloc(sizeof(*cache), GFP_ATOMIC); 2207 2205 if (cache == NULL) 2208 - goto netlbl_cache_add_failure; 2209 - secattr.cache.free = selinux_netlbl_cache_free; 2210 - secattr.cache.data = (void *)cache; 2206 + goto netlbl_cache_add_return; 2207 + secattr.cache->free = selinux_netlbl_cache_free; 2208 + secattr.cache->data = (void *)cache; 2211 2209 2212 2210 cache->type = NETLBL_CACHE_T_MLS; 2213 2211 if (ebitmap_cpy(&cache->data.mls_label.level[0].cat, 2214 2212 &ctx->range.level[0].cat) != 0) 2215 - goto netlbl_cache_add_failure; 2213 + goto netlbl_cache_add_return; 2216 2214 cache->data.mls_label.level[1].cat.highbit = 2217 2215 cache->data.mls_label.level[0].cat.highbit; 2218 2216 cache->data.mls_label.level[1].cat.node = ··· 2223 2215 cache->data.mls_label.level[0].sens = ctx->range.level[0].sens; 2224 2216 cache->data.mls_label.level[1].sens = ctx->range.level[0].sens; 2225 2217 2226 - if (netlbl_cache_add(skb, &secattr) != 0) 2227 - goto netlbl_cache_add_failure; 2218 + netlbl_cache_add(skb, &secattr); 2228 2219 2229 - return; 2230 - 2231 - netlbl_cache_add_failure: 2232 - netlbl_secattr_destroy(&secattr, 1); 2220 + netlbl_cache_add_return: 2221 + netlbl_secattr_destroy(&secattr); 2233 2222 } 2234 2223 2235 2224 /** ··· 2268 2263 2269 2264 POLICY_RDLOCK; 2270 2265 2271 - if (secattr->cache.data) { 2272 - cache = NETLBL_CACHE(secattr->cache.data); 2266 + if (secattr->cache) { 2267 + cache = NETLBL_CACHE(secattr->cache->data); 2273 2268 switch (cache->type) { 2274 2269 case NETLBL_CACHE_T_SID: 2275 2270 *sid = cache->data.sid; ··· 2336 2331 selinux_netlbl_cache_add(skb, &ctx_new); 2337 2332 ebitmap_destroy(&ctx_new.range.level[0].cat); 2338 2333 } else { 2339 - *sid = SECINITSID_UNLABELED; 2334 + *sid = SECSID_NULL; 2340 2335 rc = 0; 2341 2336 } 2342 2337 ··· 2374 2369 &secattr, 2375 2370 base_sid, 2376 2371 sid); 2377 - netlbl_secattr_destroy(&secattr, 0); 2372 + netlbl_secattr_destroy(&secattr); 2378 2373 2379 2374 return rc; 2380 2375 } ··· 2420 2415 if (rc == 0) 2421 2416 sksec->nlbl_state = NLBL_LABELED; 2422 2417 2423 - netlbl_secattr_destroy(&secattr, 0); 2418 + netlbl_secattr_destroy(&secattr); 2424 2419 2425 2420 netlbl_socket_setsid_return: 2426 2421 POLICY_RDUNLOCK; ··· 2519 2514 if (netlbl_sock_getattr(sk, &secattr) == 0 && 2520 2515 selinux_netlbl_secattr_to_sid(NULL, 2521 2516 &secattr, 2522 - sksec->sid, 2517 + SECINITSID_UNLABELED, 2523 2518 &nlbl_peer_sid) == 0) 2524 2519 sksec->peer_sid = nlbl_peer_sid; 2525 - netlbl_secattr_destroy(&secattr, 0); 2520 + netlbl_secattr_destroy(&secattr); 2526 2521 2527 2522 sksec->nlbl_state = NLBL_REQUIRE; 2528 2523 ··· 2550 2545 2551 2546 rc = selinux_netlbl_skbuff_getsid(skb, sock_sid, &peer_sid); 2552 2547 if (rc != 0) 2553 - return SECSID_NULL; 2554 - 2555 - if (peer_sid == SECINITSID_UNLABELED) 2556 2548 return SECSID_NULL; 2557 2549 2558 2550 return peer_sid; ··· 2613 2611 u32 netlbl_sid; 2614 2612 u32 recv_perm; 2615 2613 2616 - rc = selinux_netlbl_skbuff_getsid(skb, SECINITSID_NETMSG, &netlbl_sid); 2614 + rc = selinux_netlbl_skbuff_getsid(skb, 2615 + SECINITSID_UNLABELED, 2616 + &netlbl_sid); 2617 2617 if (rc != 0) 2618 2618 return rc; 2619 2619 2620 - if (netlbl_sid == SECINITSID_UNLABELED) 2620 + if (netlbl_sid == SECSID_NULL) 2621 2621 return 0; 2622 2622 2623 2623 switch (sksec->sclass) { ··· 2657 2653 u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock) 2658 2654 { 2659 2655 struct sk_security_struct *sksec = sock->sk->sk_security; 2660 - 2661 - if (sksec->peer_sid == SECINITSID_UNLABELED) 2662 - return SECSID_NULL; 2663 - 2664 2656 return sksec->peer_sid; 2665 2657 } 2666 2658 ··· 2672 2672 u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb) 2673 2673 { 2674 2674 int peer_sid; 2675 - struct sock *sk = skb->sk; 2676 - struct inode_security_struct *isec; 2677 2675 2678 - if (sk == NULL || sk->sk_socket == NULL) 2679 - return SECSID_NULL; 2680 - 2681 - isec = SOCK_INODE(sk->sk_socket)->i_security; 2682 - if (selinux_netlbl_skbuff_getsid(skb, isec->sid, &peer_sid) != 0) 2683 - return SECSID_NULL; 2684 - if (peer_sid == SECINITSID_UNLABELED) 2676 + if (selinux_netlbl_skbuff_getsid(skb, 2677 + SECINITSID_UNLABELED, 2678 + &peer_sid) != 0) 2685 2679 return SECSID_NULL; 2686 2680 2687 2681 return peer_sid;
+41 -12
security/selinux/xfrm.c
··· 77 77 */ 78 78 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir) 79 79 { 80 - int rc = 0; 81 - u32 sel_sid = SECINITSID_UNLABELED; 80 + int rc; 81 + u32 sel_sid; 82 82 struct xfrm_sec_ctx *ctx; 83 83 84 84 /* Context sid is either set to label or ANY_ASSOC */ ··· 88 88 89 89 sel_sid = ctx->ctx_sid; 90 90 } 91 + else 92 + /* 93 + * All flows should be treated as polmatch'ing an 94 + * otherwise applicable "non-labeled" policy. This 95 + * would prevent inadvertent "leaks". 96 + */ 97 + return 0; 91 98 92 99 rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, 93 100 ASSOCIATION__POLMATCH, 94 101 NULL); 102 + 103 + if (rc == -EACCES) 104 + rc = -ESRCH; 95 105 96 106 return rc; 97 107 } ··· 118 108 u32 pol_sid; 119 109 int err; 120 110 121 - if (x->security) 122 - state_sid = x->security->ctx_sid; 123 - else 124 - state_sid = SECINITSID_UNLABELED; 125 - 126 - if (xp->security) 111 + if (xp->security) { 112 + if (!x->security) 113 + /* unlabeled SA and labeled policy can't match */ 114 + return 0; 115 + else 116 + state_sid = x->security->ctx_sid; 127 117 pol_sid = xp->security->ctx_sid; 128 - else 129 - pol_sid = SECINITSID_UNLABELED; 118 + } else 119 + if (x->security) 120 + /* unlabeled policy and labeled SA can't match */ 121 + return 0; 122 + else 123 + /* unlabeled policy and unlabeled SA match all flows */ 124 + return 1; 130 125 131 126 err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION, 132 127 ASSOCIATION__POLMATCH, ··· 140 125 if (err) 141 126 return 0; 142 127 143 - return selinux_xfrm_flow_state_match(fl, x); 128 + err = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION, 129 + ASSOCIATION__SENDTO, 130 + NULL)? 0:1; 131 + 132 + return err; 144 133 } 145 134 146 135 /* ··· 152 133 * can use a given security association. 153 134 */ 154 135 155 - int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm) 136 + int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm, 137 + struct xfrm_policy *xp) 156 138 { 157 139 int rc = 0; 158 140 u32 sel_sid = SECINITSID_UNLABELED; 159 141 struct xfrm_sec_ctx *ctx; 142 + 143 + if (!xp->security) 144 + if (!xfrm->security) 145 + return 1; 146 + else 147 + return 0; 148 + else 149 + if (!xfrm->security) 150 + return 0; 160 151 161 152 /* Context sid is either set to label or ANY_ASSOC */ 162 153 if ((ctx = xfrm->security)) {