Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'selinux-pr-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux

Pull selinux updates from Paul Moore:
"While we have a small number of SELinux patches for v5.11, there are a
few changes worth highlighting:

- Change the LSM network hooks to pass flowi_common structs instead
of the parent flowi struct as the LSMs do not currently need the
full flowi struct and they do not have enough information to use it
safely (missing information on the address family).

This patch was discussed both with Herbert Xu (representing team
netdev) and James Morris (representing team
LSMs-other-than-SELinux).

- Fix how we handle errors in inode_doinit_with_dentry() so that we
attempt to properly label the inode on following lookups instead of
continuing to treat it as unlabeled.

- Tweak the kernel logic around allowx, auditallowx, and dontauditx
SELinux policy statements such that the auditx/dontauditx are
effective even without the allowx statement.

Everything passes our test suite"

* tag 'selinux-pr-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux:
lsm,selinux: pass flowi_common instead of flowi to the LSM hooks
selinux: Fix fall-through warnings for Clang
selinux: drop super_block backpointer from superblock_security_struct
selinux: fix inode_doinit_with_dentry() LABEL_INVALID error handling
selinux: allow dontauditx and auditallowx rules to take effect without allowx
selinux: fix error initialization in inode_doinit_with_dentry()

+101 -77
+1 -1
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
··· 1145 1145 fl6.daddr = ip6h->saddr; 1146 1146 fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; 1147 1147 fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); 1148 - security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); 1148 + security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6)); 1149 1149 dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); 1150 1150 if (IS_ERR(dst)) 1151 1151 goto free_sk;
+2 -2
drivers/net/wireguard/socket.c
··· 49 49 rt = dst_cache_get_ip4(cache, &fl.saddr); 50 50 51 51 if (!rt) { 52 - security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); 52 + security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl)); 53 53 if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, 54 54 fl.saddr, RT_SCOPE_HOST))) { 55 55 endpoint->src4.s_addr = 0; ··· 129 129 dst = dst_cache_get_ip6(cache, &fl.saddr); 130 130 131 131 if (!dst) { 132 - security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); 132 + security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl)); 133 133 if (unlikely(!ipv6_addr_any(&fl.saddr) && 134 134 !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { 135 135 endpoint->src6 = fl.saddr = in6addr_any;
+2 -2
include/linux/lsm_hook_defs.h
··· 311 311 LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) 312 312 LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) 313 313 LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, 314 - struct flowi *fl) 314 + struct flowi_common *flic) 315 315 LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) 316 316 LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) 317 317 LSM_HOOK(int, 0, tun_dev_create, void) ··· 351 351 LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, 352 352 u8 dir) 353 353 LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, 354 - struct xfrm_policy *xp, const struct flowi *fl) 354 + struct xfrm_policy *xp, const struct flowi_common *flic) 355 355 LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, 356 356 int ckall) 357 357 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
+1 -1
include/linux/lsm_hooks.h
··· 1105 1105 * @xfrm_state_pol_flow_match: 1106 1106 * @x contains the state to match. 1107 1107 * @xp contains the policy to check for a match. 1108 - * @fl contains the flow to check for a match. 1108 + * @flic contains the flowi_common struct to check for a match. 1109 1109 * Return 1 if there is a match. 1110 1110 * @xfrm_decode_session: 1111 1111 * @skb points to skb to decode.
+14 -9
include/linux/security.h
··· 168 168 struct sock; 169 169 struct sockaddr; 170 170 struct socket; 171 - struct flowi; 171 + struct flowi_common; 172 172 struct dst_entry; 173 173 struct xfrm_selector; 174 174 struct xfrm_policy; ··· 1356 1356 int security_sk_alloc(struct sock *sk, int family, gfp_t priority); 1357 1357 void security_sk_free(struct sock *sk); 1358 1358 void security_sk_clone(const struct sock *sk, struct sock *newsk); 1359 - void security_sk_classify_flow(struct sock *sk, struct flowi *fl); 1360 - void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); 1359 + void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); 1360 + void security_req_classify_flow(const struct request_sock *req, 1361 + struct flowi_common *flic); 1361 1362 void security_sock_graft(struct sock*sk, struct socket *parent); 1362 1363 int security_inet_conn_request(const struct sock *sk, 1363 1364 struct sk_buff *skb, struct request_sock *req); ··· 1509 1508 { 1510 1509 } 1511 1510 1512 - static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 1511 + static inline void security_sk_classify_flow(struct sock *sk, 1512 + struct flowi_common *flic) 1513 1513 { 1514 1514 } 1515 1515 1516 - static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) 1516 + static inline void security_req_classify_flow(const struct request_sock *req, 1517 + struct flowi_common *flic) 1517 1518 { 1518 1519 } 1519 1520 ··· 1642 1639 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 1643 1640 int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 1644 1641 struct xfrm_policy *xp, 1645 - const struct flowi *fl); 1642 + const struct flowi_common *flic); 1646 1643 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); 1647 - void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); 1644 + void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic); 1648 1645 1649 1646 #else /* CONFIG_SECURITY_NETWORK_XFRM */ 1650 1647 ··· 1696 1693 } 1697 1694 1698 1695 static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 1699 - struct xfrm_policy *xp, const struct flowi *fl) 1696 + struct xfrm_policy *xp, 1697 + const struct flowi_common *flic) 1700 1698 { 1701 1699 return 1; 1702 1700 } ··· 1707 1703 return 0; 1708 1704 } 1709 1705 1710 - static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) 1706 + static inline void security_skb_classify_flow(struct sk_buff *skb, 1707 + struct flowi_common *flic) 1711 1708 { 1712 1709 } 1713 1710
+10
include/net/flow.h
··· 195 195 return container_of(fl4, struct flowi, u.ip4); 196 196 } 197 197 198 + static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4) 199 + { 200 + return &(flowi4_to_flowi(fl4)->u.__fl_common); 201 + } 202 + 198 203 static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) 199 204 { 200 205 return container_of(fl6, struct flowi, u.ip6); 206 + } 207 + 208 + static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6) 209 + { 210 + return &(flowi6_to_flowi(fl6)->u.__fl_common); 201 211 } 202 212 203 213 static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+3 -3
include/net/route.h
··· 165 165 sk ? inet_sk_flowi_flags(sk) : 0, 166 166 daddr, saddr, dport, sport, sock_net_uid(net, sk)); 167 167 if (sk) 168 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 168 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 169 169 return ip_route_output_flow(net, fl4, sk); 170 170 } 171 171 ··· 322 322 ip_rt_put(rt); 323 323 flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr); 324 324 } 325 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 325 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 326 326 return ip_route_output_flow(net, fl4, sk); 327 327 } 328 328 ··· 338 338 flowi4_update_output(fl4, sk->sk_bound_dev_if, 339 339 RT_CONN_FLAGS(sk), fl4->daddr, 340 340 fl4->saddr); 341 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 341 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 342 342 return ip_route_output_flow(sock_net(sk), fl4, sk); 343 343 } 344 344 return rt;
+1 -1
net/dccp/ipv4.c
··· 464 464 .fl4_dport = dccp_hdr(skb)->dccph_sport, 465 465 }; 466 466 467 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 467 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 468 468 rt = ip_route_output_flow(net, &fl4, sk); 469 469 if (IS_ERR(rt)) { 470 470 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+3 -3
net/dccp/ipv6.c
··· 203 203 fl6.flowi6_oif = ireq->ir_iif; 204 204 fl6.fl6_dport = ireq->ir_rmt_port; 205 205 fl6.fl6_sport = htons(ireq->ir_num); 206 - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 206 + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); 207 207 208 208 209 209 rcu_read_lock(); ··· 279 279 fl6.flowi6_oif = inet6_iif(rxskb); 280 280 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; 281 281 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; 282 - security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); 282 + security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6)); 283 283 284 284 /* sk = NULL, but it is safe for now. RST socket required. */ 285 285 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); ··· 907 907 fl6.flowi6_oif = sk->sk_bound_dev_if; 908 908 fl6.fl6_dport = usin->sin6_port; 909 909 fl6.fl6_sport = inet->inet_sport; 910 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 910 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 911 911 912 912 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 913 913 final_p = fl6_update_dst(&fl6, opt, &final);
+2 -2
net/ipv4/icmp.c
··· 447 447 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 448 448 fl4.flowi4_proto = IPPROTO_ICMP; 449 449 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); 450 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 450 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 451 451 rt = ip_route_output_key(net, &fl4); 452 452 if (IS_ERR(rt)) 453 453 goto out_unlock; ··· 503 503 route_lookup_dev = icmp_get_route_lookup_dev(skb_in); 504 504 fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); 505 505 506 - security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 506 + security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4)); 507 507 rt = ip_route_output_key_hash(net, fl4, skb_in); 508 508 if (IS_ERR(rt)) 509 509 return rt;
+2 -2
net/ipv4/inet_connection_sock.c
··· 602 602 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 603 603 ireq->ir_loc_addr, ireq->ir_rmt_port, 604 604 htons(ireq->ir_num), sk->sk_uid); 605 - security_req_classify_flow(req, flowi4_to_flowi(fl4)); 605 + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 606 606 rt = ip_route_output_flow(net, fl4, sk); 607 607 if (IS_ERR(rt)) 608 608 goto no_route; ··· 640 640 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 641 641 ireq->ir_loc_addr, ireq->ir_rmt_port, 642 642 htons(ireq->ir_num), sk->sk_uid); 643 - security_req_classify_flow(req, flowi4_to_flowi(fl4)); 643 + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 644 644 rt = ip_route_output_flow(net, fl4, sk); 645 645 if (IS_ERR(rt)) 646 646 goto no_route;
+1 -1
net/ipv4/ip_output.c
··· 1700 1700 daddr, saddr, 1701 1701 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, 1702 1702 arg->uid); 1703 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1703 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 1704 1704 rt = ip_route_output_key(net, &fl4); 1705 1705 if (IS_ERR(rt)) 1706 1706 return;
+1 -1
net/ipv4/ping.c
··· 778 778 fl4.fl4_icmp_type = user_icmph.type; 779 779 fl4.fl4_icmp_code = user_icmph.code; 780 780 781 - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 781 + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); 782 782 rt = ip_route_output_flow(net, &fl4, sk); 783 783 if (IS_ERR(rt)) { 784 784 err = PTR_ERR(rt);
+1 -1
net/ipv4/raw.c
··· 640 640 goto done; 641 641 } 642 642 643 - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 643 + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); 644 644 rt = ip_route_output_flow(net, &fl4, sk); 645 645 if (IS_ERR(rt)) { 646 646 err = PTR_ERR(rt);
+1 -1
net/ipv4/syncookies.c
··· 418 418 inet_sk_flowi_flags(sk), 419 419 opt->srr ? opt->faddr : ireq->ir_rmt_addr, 420 420 ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); 421 - security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 421 + security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); 422 422 rt = ip_route_output_key(sock_net(sk), &fl4); 423 423 if (IS_ERR(rt)) { 424 424 reqsk_free(req);
+1 -1
net/ipv4/udp.c
··· 1196 1196 faddr, saddr, dport, inet->inet_sport, 1197 1197 sk->sk_uid); 1198 1198 1199 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1199 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 1200 1200 rt = ip_route_output_flow(net, fl4, sk); 1201 1201 if (IS_ERR(rt)) { 1202 1202 err = PTR_ERR(rt);
+1 -1
net/ipv6/af_inet6.c
··· 819 819 fl6.fl6_dport = inet->inet_dport; 820 820 fl6.fl6_sport = inet->inet_sport; 821 821 fl6.flowi6_uid = sk->sk_uid; 822 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 822 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 823 823 824 824 rcu_read_lock(); 825 825 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
+1 -1
net/ipv6/datagram.c
··· 60 60 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) 61 61 fl6->flowi6_oif = np->mcast_oif; 62 62 63 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 63 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 64 64 } 65 65 66 66 int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+3 -3
net/ipv6/icmp.c
··· 573 573 fl6.fl6_icmp_code = code; 574 574 fl6.flowi6_uid = sock_net_uid(net, NULL); 575 575 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL); 576 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 576 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 577 577 578 578 np = inet6_sk(sk); 579 579 ··· 755 755 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 756 756 fl6.flowi6_mark = mark; 757 757 fl6.flowi6_uid = sock_net_uid(net, NULL); 758 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 758 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 759 759 760 760 local_bh_disable(); 761 761 sk = icmpv6_xmit_lock(net); ··· 1008 1008 fl6->fl6_icmp_type = type; 1009 1009 fl6->fl6_icmp_code = 0; 1010 1010 fl6->flowi6_oif = oif; 1011 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 1011 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1012 1012 } 1013 1013 1014 1014 static void __net_exit icmpv6_sk_exit(struct net *net)
+2 -2
net/ipv6/inet6_connection_sock.c
··· 46 46 fl6->fl6_dport = ireq->ir_rmt_port; 47 47 fl6->fl6_sport = htons(ireq->ir_num); 48 48 fl6->flowi6_uid = sk->sk_uid; 49 - security_req_classify_flow(req, flowi6_to_flowi(fl6)); 49 + security_req_classify_flow(req, flowi6_to_flowi_common(fl6)); 50 50 51 51 dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 52 52 if (IS_ERR(dst)) ··· 95 95 fl6->fl6_sport = inet->inet_sport; 96 96 fl6->fl6_dport = inet->inet_dport; 97 97 fl6->flowi6_uid = sk->sk_uid; 98 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 98 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 99 99 100 100 rcu_read_lock(); 101 101 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+1 -1
net/ipv6/netfilter/nf_reject_ipv6.c
··· 314 314 315 315 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 316 316 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); 317 - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 317 + security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6)); 318 318 dst = ip6_route_output(net, NULL, &fl6); 319 319 if (dst->error) { 320 320 dst_release(dst);
+1 -1
net/ipv6/ping.c
··· 111 111 fl6.flowi6_uid = sk->sk_uid; 112 112 fl6.fl6_icmp_type = user_icmph.icmp6_type; 113 113 fl6.fl6_icmp_code = user_icmph.icmp6_code; 114 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 114 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 115 115 116 116 ipcm6_init_sk(&ipc6, np); 117 117 ipc6.sockc.mark = sk->sk_mark;
+1 -1
net/ipv6/raw.c
··· 915 915 fl6.flowi6_oif = np->mcast_oif; 916 916 else if (!fl6.flowi6_oif) 917 917 fl6.flowi6_oif = np->ucast_oif; 918 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 918 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 919 919 920 920 if (hdrincl) 921 921 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+1 -1
net/ipv6/syncookies.c
··· 233 233 fl6.fl6_dport = ireq->ir_rmt_port; 234 234 fl6.fl6_sport = inet_sk(sk)->inet_sport; 235 235 fl6.flowi6_uid = sk->sk_uid; 236 - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 236 + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); 237 237 238 238 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 239 239 if (IS_ERR(dst))
+2 -2
net/ipv6/tcp_ipv6.c
··· 278 278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 279 279 final_p = fl6_update_dst(&fl6, opt, &final); 280 280 281 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 281 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 282 282 283 283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 284 284 if (IS_ERR(dst)) { ··· 965 965 fl6.fl6_dport = t1->dest; 966 966 fl6.fl6_sport = t1->source; 967 967 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 968 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 968 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 969 969 970 970 /* Pass a socket to ip6_dst_lookup either it is for RST 971 971 * Underlying function will use this to retrieve the network
+1 -1
net/ipv6/udp.c
··· 1498 1498 } else if (!fl6.flowi6_oif) 1499 1499 fl6.flowi6_oif = np->ucast_oif; 1500 1500 1501 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1501 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 1502 1502 1503 1503 if (ipc6.tclass < 0) 1504 1504 ipc6.tclass = np->tclass;
+1 -1
net/l2tp/l2tp_ip6.c
··· 606 606 else if (!fl6.flowi6_oif) 607 607 fl6.flowi6_oif = np->ucast_oif; 608 608 609 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 609 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 610 610 611 611 if (ipc6.tclass < 0) 612 612 ipc6.tclass = np->tclass;
+1 -1
net/netfilter/nf_synproxy_core.c
··· 849 849 fl6.fl6_sport = nth->source; 850 850 fl6.fl6_dport = nth->dest; 851 851 security_skb_classify_flow((struct sk_buff *)skb, 852 - flowi6_to_flowi(&fl6)); 852 + flowi6_to_flowi_common(&fl6)); 853 853 err = nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); 854 854 if (err) { 855 855 goto free_nskb;
+4 -2
net/xfrm/xfrm_state.c
··· 1021 1021 if ((x->sel.family && 1022 1022 (x->sel.family != family || 1023 1023 !xfrm_selector_match(&x->sel, fl, family))) || 1024 - !security_xfrm_state_pol_flow_match(x, pol, fl)) 1024 + !security_xfrm_state_pol_flow_match(x, pol, 1025 + &fl->u.__fl_common)) 1025 1026 return; 1026 1027 1027 1028 if (!*best || ··· 1037 1036 if ((!x->sel.family || 1038 1037 (x->sel.family == family && 1039 1038 xfrm_selector_match(&x->sel, fl, family))) && 1040 - security_xfrm_state_pol_flow_match(x, pol, fl)) 1039 + security_xfrm_state_pol_flow_match(x, pol, 1040 + &fl->u.__fl_common)) 1041 1041 *error = -ESRCH; 1042 1042 } 1043 1043 }
+9 -8
security/security.c
··· 2208 2208 } 2209 2209 EXPORT_SYMBOL(security_sk_clone); 2210 2210 2211 - void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 2211 + void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic) 2212 2212 { 2213 - call_void_hook(sk_getsecid, sk, &fl->flowi_secid); 2213 + call_void_hook(sk_getsecid, sk, &flic->flowic_secid); 2214 2214 } 2215 2215 EXPORT_SYMBOL(security_sk_classify_flow); 2216 2216 2217 - void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) 2217 + void security_req_classify_flow(const struct request_sock *req, 2218 + struct flowi_common *flic) 2218 2219 { 2219 - call_void_hook(req_classify_flow, req, fl); 2220 + call_void_hook(req_classify_flow, req, flic); 2220 2221 } 2221 2222 EXPORT_SYMBOL(security_req_classify_flow); 2222 2223 ··· 2409 2408 2410 2409 int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 2411 2410 struct xfrm_policy *xp, 2412 - const struct flowi *fl) 2411 + const struct flowi_common *flic) 2413 2412 { 2414 2413 struct security_hook_list *hp; 2415 2414 int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); ··· 2425 2424 */ 2426 2425 hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match, 2427 2426 list) { 2428 - rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl); 2427 + rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic); 2429 2428 break; 2430 2429 } 2431 2430 return rc; ··· 2436 2435 return call_int_hook(xfrm_decode_session, 0, skb, secid, 1); 2437 2436 } 2438 2437 2439 - void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) 2438 + void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) 2440 2439 { 2441 - int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid, 2440 + int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid, 2442 2441 0); 2443 2442 2444 2443 BUG_ON(rc);
+17 -9
security/selinux/hooks.c
··· 600 600 { 601 601 const struct cred *cred = current_cred(); 602 602 struct superblock_security_struct *sbsec = sb->s_security; 603 - struct dentry *root = sbsec->sb->s_root; 603 + struct dentry *root = sb->s_root; 604 604 struct selinux_mnt_opts *opts = mnt_opts; 605 605 struct inode_security_struct *root_isec; 606 606 u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; ··· 1080 1080 return rc; 1081 1081 } 1082 1082 if (sbsec->flags & ROOTCONTEXT_MNT) { 1083 - struct dentry *root = sbsec->sb->s_root; 1083 + struct dentry *root = sb->s_root; 1084 1084 struct inode_security_struct *isec = backing_inode_security(root); 1085 1085 seq_putc(m, ','); 1086 1086 seq_puts(m, ROOTCONTEXT_STR); ··· 1451 1451 * inode_doinit with a dentry, before these inodes could 1452 1452 * be used again by userspace. 1453 1453 */ 1454 - goto out; 1454 + goto out_invalid; 1455 1455 } 1456 1456 1457 1457 rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid, ··· 1508 1508 * could be used again by userspace. 1509 1509 */ 1510 1510 if (!dentry) 1511 - goto out; 1511 + goto out_invalid; 1512 1512 rc = selinux_genfs_get_sid(dentry, sclass, 1513 1513 sbsec->flags, &sid); 1514 1514 if (rc) { ··· 1533 1533 out: 1534 1534 spin_lock(&isec->lock); 1535 1535 if (isec->initialized == LABEL_PENDING) { 1536 - if (!sid || rc) { 1536 + if (rc) { 1537 1537 isec->initialized = LABEL_INVALID; 1538 1538 goto out_unlock; 1539 1539 } 1540 - 1541 1540 isec->initialized = LABEL_INITIALIZED; 1542 1541 isec->sid = sid; 1543 1542 } ··· 1544 1545 out_unlock: 1545 1546 spin_unlock(&isec->lock); 1546 1547 return rc; 1548 + 1549 + out_invalid: 1550 + spin_lock(&isec->lock); 1551 + if (isec->initialized == LABEL_PENDING) { 1552 + isec->initialized = LABEL_INVALID; 1553 + isec->sid = sid; 1554 + } 1555 + spin_unlock(&isec->lock); 1556 + return 0; 1547 1557 } 1548 1558 1549 1559 /* Convert a Linux signal to an access vector. */ ··· 2568 2560 mutex_init(&sbsec->lock); 2569 2561 INIT_LIST_HEAD(&sbsec->isec_head); 2570 2562 spin_lock_init(&sbsec->isec_lock); 2571 - sbsec->sb = sb; 2572 2563 sbsec->sid = SECINITSID_UNLABELED; 2573 2564 sbsec->def_sid = SECINITSID_FILE; 2574 2565 sbsec->mntpoint_sid = SECINITSID_UNLABELED; ··· 4036 4029 switch (id) { 4037 4030 case LOADING_MODULE: 4038 4031 rc = selinux_kernel_module_from_file(NULL); 4032 + break; 4039 4033 default: 4040 4034 break; 4041 4035 } ··· 5437 5429 } 5438 5430 5439 5431 static void selinux_req_classify_flow(const struct request_sock *req, 5440 - struct flowi *fl) 5432 + struct flowi_common *flic) 5441 5433 { 5442 - fl->flowi_secid = req->secid; 5434 + flic->flowic_secid = req->secid; 5443 5435 } 5444 5436 5445 5437 static int selinux_tun_dev_alloc_security(void **security)
-1
security/selinux/include/objsec.h
··· 61 61 }; 62 62 63 63 struct superblock_security_struct { 64 - struct super_block *sb; /* back pointer to sb object */ 65 64 u32 sid; /* SID of file system superblock */ 66 65 u32 def_sid; /* default SID for labeling */ 67 66 u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
+1 -1
security/selinux/include/xfrm.h
··· 26 26 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 27 27 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 28 28 struct xfrm_policy *xp, 29 - const struct flowi *fl); 29 + const struct flowi_common *flic); 30 30 31 31 #ifdef CONFIG_SECURITY_NETWORK_XFRM 32 32 extern atomic_t selinux_xfrm_refcount;
+1 -3
security/selinux/ss/services.c
··· 596 596 node->datum.u.xperms->driver); 597 597 } 598 598 599 - /* If no ioctl commands are allowed, ignore auditallow and auditdeny */ 600 - if (node->key.specified & AVTAB_XPERMS_ALLOWED) 601 - xperms->len = 1; 599 + xperms->len = 1; 602 600 } 603 601 604 602 /*
+7 -6
security/selinux/xfrm.c
··· 175 175 */ 176 176 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 177 177 struct xfrm_policy *xp, 178 - const struct flowi *fl) 178 + const struct flowi_common *flic) 179 179 { 180 180 u32 state_sid; 181 + u32 flic_sid; 181 182 182 183 if (!xp->security) 183 184 if (x->security) ··· 197 196 return 0; 198 197 199 198 state_sid = x->security->ctx_sid; 199 + flic_sid = flic->flowic_secid; 200 200 201 - if (fl->flowi_secid != state_sid) 201 + if (flic_sid != state_sid) 202 202 return 0; 203 203 204 204 /* We don't need a separate SA Vs. policy polmatch check since the SA 205 205 * is now of the same label as the flow and a flow Vs. policy polmatch 206 206 * check had already happened in selinux_xfrm_policy_lookup() above. */ 207 - return (avc_has_perm(&selinux_state, 208 - fl->flowi_secid, state_sid, 209 - SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, 210 - NULL) ? 0 : 1); 207 + return (avc_has_perm(&selinux_state, flic_sid, state_sid, 208 + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, 209 + NULL) ? 0 : 1); 211 210 } 212 211 213 212 static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)