Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lsm,selinux: pass flowi_common instead of flowi to the LSM hooks

As pointed out by Herbert in a recent related patch, the LSM hooks do
not have the necessary address family information to use the flowi
struct safely. As none of the LSMs currently use any of the protocol
specific flowi information, replace the flowi pointers with pointers
to the address family independent flowi_common struct.

Reported-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: James Morris <jamorris@linux.microsoft.com>
Signed-off-by: Paul Moore <paul@paul-moore.com>

+85 -66
+1 -1
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
··· 1148 1148 fl6.daddr = ip6h->saddr; 1149 1149 fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; 1150 1150 fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); 1151 - security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); 1151 + security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6)); 1152 1152 dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); 1153 1153 if (IS_ERR(dst)) 1154 1154 goto free_sk;
+2 -2
drivers/net/wireguard/socket.c
··· 49 49 rt = dst_cache_get_ip4(cache, &fl.saddr); 50 50 51 51 if (!rt) { 52 - security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); 52 + security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl)); 53 53 if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, 54 54 fl.saddr, RT_SCOPE_HOST))) { 55 55 endpoint->src4.s_addr = 0; ··· 129 129 dst = dst_cache_get_ip6(cache, &fl.saddr); 130 130 131 131 if (!dst) { 132 - security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); 132 + security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl)); 133 133 if (unlikely(!ipv6_addr_any(&fl.saddr) && 134 134 !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { 135 135 endpoint->src6 = fl.saddr = in6addr_any;
+2 -2
include/linux/lsm_hook_defs.h
··· 311 311 LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) 312 312 LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) 313 313 LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, 314 - struct flowi *fl) 314 + struct flowi_common *flic) 315 315 LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) 316 316 LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) 317 317 LSM_HOOK(int, 0, tun_dev_create, void) ··· 351 351 LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid, 352 352 u8 dir) 353 353 LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, 354 - struct xfrm_policy *xp, const struct flowi *fl) 354 + struct xfrm_policy *xp, const struct flowi_common *flic) 355 355 LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, 356 356 int ckall) 357 357 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
+1 -1
include/linux/lsm_hooks.h
··· 1105 1105 * @xfrm_state_pol_flow_match: 1106 1106 * @x contains the state to match. 1107 1107 * @xp contains the policy to check for a match. 1108 - * @fl contains the flow to check for a match. 1108 + * @flic contains the flowi_common struct to check for a match. 1109 1109 * Return 1 if there is a match. 1110 1110 * @xfrm_decode_session: 1111 1111 * @skb points to skb to decode.
+14 -9
include/linux/security.h
··· 167 167 struct sock; 168 168 struct sockaddr; 169 169 struct socket; 170 - struct flowi; 170 + struct flowi_common; 171 171 struct dst_entry; 172 172 struct xfrm_selector; 173 173 struct xfrm_policy; ··· 1355 1355 int security_sk_alloc(struct sock *sk, int family, gfp_t priority); 1356 1356 void security_sk_free(struct sock *sk); 1357 1357 void security_sk_clone(const struct sock *sk, struct sock *newsk); 1358 - void security_sk_classify_flow(struct sock *sk, struct flowi *fl); 1359 - void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); 1358 + void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic); 1359 + void security_req_classify_flow(const struct request_sock *req, 1360 + struct flowi_common *flic); 1360 1361 void security_sock_graft(struct sock*sk, struct socket *parent); 1361 1362 int security_inet_conn_request(struct sock *sk, 1362 1363 struct sk_buff *skb, struct request_sock *req); ··· 1508 1507 { 1509 1508 } 1510 1509 1511 - static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 1510 + static inline void security_sk_classify_flow(struct sock *sk, 1511 + struct flowi_common *flic) 1512 1512 { 1513 1513 } 1514 1514 1515 - static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) 1515 + static inline void security_req_classify_flow(const struct request_sock *req, 1516 + struct flowi_common *flic) 1516 1517 { 1517 1518 } 1518 1519 ··· 1641 1638 int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 1642 1639 int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 1643 1640 struct xfrm_policy *xp, 1644 - const struct flowi *fl); 1641 + const struct flowi_common *flic); 1645 1642 int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid); 1646 - void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); 1643 + void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic); 1647 1644 1648 1645 #else /* CONFIG_SECURITY_NETWORK_XFRM */ 1649 1646 ··· 1695 1692 } 1696 1693 1697 1694 static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 1698 - struct xfrm_policy *xp, const struct flowi *fl) 1695 + struct xfrm_policy *xp, 1696 + const struct flowi_common *flic) 1699 1697 { 1700 1698 return 1; 1701 1699 } ··· 1706 1702 return 0; 1707 1703 } 1708 1704 1709 - static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) 1705 + static inline void security_skb_classify_flow(struct sk_buff *skb, 1706 + struct flowi_common *flic) 1710 1707 { 1711 1708 } 1712 1709
+10
include/net/flow.h
··· 195 195 return container_of(fl4, struct flowi, u.ip4); 196 196 } 197 197 198 + static inline struct flowi_common *flowi4_to_flowi_common(struct flowi4 *fl4) 199 + { 200 + return &(flowi4_to_flowi(fl4)->u.__fl_common); 201 + } 202 + 198 203 static inline struct flowi *flowi6_to_flowi(struct flowi6 *fl6) 199 204 { 200 205 return container_of(fl6, struct flowi, u.ip6); 206 + } 207 + 208 + static inline struct flowi_common *flowi6_to_flowi_common(struct flowi6 *fl6) 209 + { 210 + return &(flowi6_to_flowi(fl6)->u.__fl_common); 201 211 } 202 212 203 213 static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
+3 -3
include/net/route.h
··· 165 165 sk ? inet_sk_flowi_flags(sk) : 0, 166 166 daddr, saddr, dport, sport, sock_net_uid(net, sk)); 167 167 if (sk) 168 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 168 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 169 169 return ip_route_output_flow(net, fl4, sk); 170 170 } 171 171 ··· 322 322 ip_rt_put(rt); 323 323 flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr); 324 324 } 325 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 325 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 326 326 return ip_route_output_flow(net, fl4, sk); 327 327 } 328 328 ··· 338 338 flowi4_update_output(fl4, sk->sk_bound_dev_if, 339 339 RT_CONN_FLAGS(sk), fl4->daddr, 340 340 fl4->saddr); 341 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 341 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 342 342 return ip_route_output_flow(sock_net(sk), fl4, sk); 343 343 } 344 344 return rt;
+1 -1
net/dccp/ipv4.c
··· 464 464 .fl4_dport = dccp_hdr(skb)->dccph_sport, 465 465 }; 466 466 467 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 467 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 468 468 rt = ip_route_output_flow(net, &fl4, sk); 469 469 if (IS_ERR(rt)) { 470 470 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+3 -3
net/dccp/ipv6.c
··· 203 203 fl6.flowi6_oif = ireq->ir_iif; 204 204 fl6.fl6_dport = ireq->ir_rmt_port; 205 205 fl6.fl6_sport = htons(ireq->ir_num); 206 - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 206 + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); 207 207 208 208 209 209 rcu_read_lock(); ··· 279 279 fl6.flowi6_oif = inet6_iif(rxskb); 280 280 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; 281 281 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; 282 - security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); 282 + security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6)); 283 283 284 284 /* sk = NULL, but it is safe for now. RST socket required. */ 285 285 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL); ··· 907 907 fl6.flowi6_oif = sk->sk_bound_dev_if; 908 908 fl6.fl6_dport = usin->sin6_port; 909 909 fl6.fl6_sport = inet->inet_sport; 910 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 910 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 911 911 912 912 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 913 913 final_p = fl6_update_dst(&fl6, opt, &final);
+2 -2
net/ipv4/icmp.c
··· 447 447 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 448 448 fl4.flowi4_proto = IPPROTO_ICMP; 449 449 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); 450 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 450 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 451 451 rt = ip_route_output_key(net, &fl4); 452 452 if (IS_ERR(rt)) 453 453 goto out_unlock; ··· 503 503 route_lookup_dev = icmp_get_route_lookup_dev(skb_in); 504 504 fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); 505 505 506 - security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 506 + security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4)); 507 507 rt = ip_route_output_key_hash(net, fl4, skb_in); 508 508 if (IS_ERR(rt)) 509 509 return rt;
+2 -2
net/ipv4/inet_connection_sock.c
··· 602 602 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 603 603 ireq->ir_loc_addr, ireq->ir_rmt_port, 604 604 htons(ireq->ir_num), sk->sk_uid); 605 - security_req_classify_flow(req, flowi4_to_flowi(fl4)); 605 + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 606 606 rt = ip_route_output_flow(net, fl4, sk); 607 607 if (IS_ERR(rt)) 608 608 goto no_route; ··· 640 640 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 641 641 ireq->ir_loc_addr, ireq->ir_rmt_port, 642 642 htons(ireq->ir_num), sk->sk_uid); 643 - security_req_classify_flow(req, flowi4_to_flowi(fl4)); 643 + security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); 644 644 rt = ip_route_output_flow(net, fl4, sk); 645 645 if (IS_ERR(rt)) 646 646 goto no_route;
+1 -1
net/ipv4/ip_output.c
··· 1700 1700 daddr, saddr, 1701 1701 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, 1702 1702 arg->uid); 1703 - security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1703 + security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 1704 1704 rt = ip_route_output_key(net, &fl4); 1705 1705 if (IS_ERR(rt)) 1706 1706 return;
+1 -1
net/ipv4/ping.c
··· 778 778 fl4.fl4_icmp_type = user_icmph.type; 779 779 fl4.fl4_icmp_code = user_icmph.code; 780 780 781 - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 781 + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); 782 782 rt = ip_route_output_flow(net, &fl4, sk); 783 783 if (IS_ERR(rt)) { 784 784 err = PTR_ERR(rt);
+1 -1
net/ipv4/raw.c
··· 640 640 goto done; 641 641 } 642 642 643 - security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 643 + security_sk_classify_flow(sk, flowi4_to_flowi_common(&fl4)); 644 644 rt = ip_route_output_flow(net, &fl4, sk); 645 645 if (IS_ERR(rt)) { 646 646 err = PTR_ERR(rt);
+1 -1
net/ipv4/syncookies.c
··· 418 418 inet_sk_flowi_flags(sk), 419 419 opt->srr ? opt->faddr : ireq->ir_rmt_addr, 420 420 ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); 421 - security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 421 + security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); 422 422 rt = ip_route_output_key(sock_net(sk), &fl4); 423 423 if (IS_ERR(rt)) { 424 424 reqsk_free(req);
+1 -1
net/ipv4/udp.c
··· 1197 1197 faddr, saddr, dport, inet->inet_sport, 1198 1198 sk->sk_uid); 1199 1199 1200 - security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); 1200 + security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); 1201 1201 rt = ip_route_output_flow(net, fl4, sk); 1202 1202 if (IS_ERR(rt)) { 1203 1203 err = PTR_ERR(rt);
+1 -1
net/ipv6/af_inet6.c
··· 819 819 fl6.fl6_dport = inet->inet_dport; 820 820 fl6.fl6_sport = inet->inet_sport; 821 821 fl6.flowi6_uid = sk->sk_uid; 822 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 822 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 823 823 824 824 rcu_read_lock(); 825 825 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
+1 -1
net/ipv6/datagram.c
··· 60 60 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) 61 61 fl6->flowi6_oif = np->mcast_oif; 62 62 63 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 63 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 64 64 } 65 65 66 66 int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+3 -3
net/ipv6/icmp.c
··· 567 567 fl6.fl6_icmp_code = code; 568 568 fl6.flowi6_uid = sock_net_uid(net, NULL); 569 569 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL); 570 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 570 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 571 571 572 572 np = inet6_sk(sk); 573 573 ··· 749 749 fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; 750 750 fl6.flowi6_mark = mark; 751 751 fl6.flowi6_uid = sock_net_uid(net, NULL); 752 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 752 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 753 753 754 754 local_bh_disable(); 755 755 sk = icmpv6_xmit_lock(net); ··· 1002 1002 fl6->fl6_icmp_type = type; 1003 1003 fl6->fl6_icmp_code = 0; 1004 1004 fl6->flowi6_oif = oif; 1005 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 1005 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 1006 1006 } 1007 1007 1008 1008 static void __net_exit icmpv6_sk_exit(struct net *net)
+2 -2
net/ipv6/inet6_connection_sock.c
··· 46 46 fl6->fl6_dport = ireq->ir_rmt_port; 47 47 fl6->fl6_sport = htons(ireq->ir_num); 48 48 fl6->flowi6_uid = sk->sk_uid; 49 - security_req_classify_flow(req, flowi6_to_flowi(fl6)); 49 + security_req_classify_flow(req, flowi6_to_flowi_common(fl6)); 50 50 51 51 dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); 52 52 if (IS_ERR(dst)) ··· 95 95 fl6->fl6_sport = inet->inet_sport; 96 96 fl6->fl6_dport = inet->inet_dport; 97 97 fl6->flowi6_uid = sk->sk_uid; 98 - security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 98 + security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); 99 99 100 100 rcu_read_lock(); 101 101 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
+1 -1
net/ipv6/netfilter/nf_reject_ipv6.c
··· 179 179 180 180 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 181 181 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); 182 - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 182 + security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6)); 183 183 dst = ip6_route_output(net, NULL, &fl6); 184 184 if (dst->error) { 185 185 dst_release(dst);
+1 -1
net/ipv6/ping.c
··· 111 111 fl6.flowi6_uid = sk->sk_uid; 112 112 fl6.fl6_icmp_type = user_icmph.icmp6_type; 113 113 fl6.fl6_icmp_code = user_icmph.icmp6_code; 114 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 114 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 115 115 116 116 ipcm6_init_sk(&ipc6, np); 117 117 ipc6.sockc.mark = sk->sk_mark;
+1 -1
net/ipv6/raw.c
··· 915 915 fl6.flowi6_oif = np->mcast_oif; 916 916 else if (!fl6.flowi6_oif) 917 917 fl6.flowi6_oif = np->ucast_oif; 918 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 918 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 919 919 920 920 if (hdrincl) 921 921 fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+1 -1
net/ipv6/syncookies.c
··· 233 233 fl6.fl6_dport = ireq->ir_rmt_port; 234 234 fl6.fl6_sport = inet_sk(sk)->inet_sport; 235 235 fl6.flowi6_uid = sk->sk_uid; 236 - security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 236 + security_req_classify_flow(req, flowi6_to_flowi_common(&fl6)); 237 237 238 238 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 239 239 if (IS_ERR(dst))
+2 -2
net/ipv6/tcp_ipv6.c
··· 278 278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); 279 279 final_p = fl6_update_dst(&fl6, opt, &final); 280 280 281 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 281 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 282 282 283 283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); 284 284 if (IS_ERR(dst)) { ··· 954 954 fl6.fl6_dport = t1->dest; 955 955 fl6.fl6_sport = t1->source; 956 956 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); 957 - security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); 957 + security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); 958 958 959 959 /* Pass a socket to ip6_dst_lookup either it is for RST 960 960 * Underlying function will use this to retrieve the network
+1 -1
net/ipv6/udp.c
··· 1496 1496 } else if (!fl6.flowi6_oif) 1497 1497 fl6.flowi6_oif = np->ucast_oif; 1498 1498 1499 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 1499 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 1500 1500 1501 1501 if (ipc6.tclass < 0) 1502 1502 ipc6.tclass = np->tclass;
+1 -1
net/l2tp/l2tp_ip6.c
··· 606 606 else if (!fl6.flowi6_oif) 607 607 fl6.flowi6_oif = np->ucast_oif; 608 608 609 - security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 609 + security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); 610 610 611 611 if (ipc6.tclass < 0) 612 612 ipc6.tclass = np->tclass;
+1 -1
net/netfilter/nf_synproxy_core.c
··· 849 849 fl6.fl6_sport = nth->source; 850 850 fl6.fl6_dport = nth->dest; 851 851 security_skb_classify_flow((struct sk_buff *)skb, 852 - flowi6_to_flowi(&fl6)); 852 + flowi6_to_flowi_common(&fl6)); 853 853 err = nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); 854 854 if (err) { 855 855 goto free_nskb;
+4 -2
net/xfrm/xfrm_state.c
··· 1021 1021 if ((x->sel.family && 1022 1022 (x->sel.family != family || 1023 1023 !xfrm_selector_match(&x->sel, fl, family))) || 1024 - !security_xfrm_state_pol_flow_match(x, pol, fl)) 1024 + !security_xfrm_state_pol_flow_match(x, pol, 1025 + &fl->u.__fl_common)) 1025 1026 return; 1026 1027 1027 1028 if (!*best || ··· 1037 1036 if ((!x->sel.family || 1038 1037 (x->sel.family == family && 1039 1038 xfrm_selector_match(&x->sel, fl, family))) && 1040 - security_xfrm_state_pol_flow_match(x, pol, fl)) 1039 + security_xfrm_state_pol_flow_match(x, pol, 1040 + &fl->u.__fl_common)) 1041 1041 *error = -ESRCH; 1042 1042 } 1043 1043 }
+9 -8
security/security.c
··· 2207 2207 } 2208 2208 EXPORT_SYMBOL(security_sk_clone); 2209 2209 2210 - void security_sk_classify_flow(struct sock *sk, struct flowi *fl) 2210 + void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic) 2211 2211 { 2212 - call_void_hook(sk_getsecid, sk, &fl->flowi_secid); 2212 + call_void_hook(sk_getsecid, sk, &flic->flowic_secid); 2213 2213 } 2214 2214 EXPORT_SYMBOL(security_sk_classify_flow); 2215 2215 2216 - void security_req_classify_flow(const struct request_sock *req, struct flowi *fl) 2216 + void security_req_classify_flow(const struct request_sock *req, 2217 + struct flowi_common *flic) 2217 2218 { 2218 - call_void_hook(req_classify_flow, req, fl); 2219 + call_void_hook(req_classify_flow, req, flic); 2219 2220 } 2220 2221 EXPORT_SYMBOL(security_req_classify_flow); 2221 2222 ··· 2408 2407 2409 2408 int security_xfrm_state_pol_flow_match(struct xfrm_state *x, 2410 2409 struct xfrm_policy *xp, 2411 - const struct flowi *fl) 2410 + const struct flowi_common *flic) 2412 2411 { 2413 2412 struct security_hook_list *hp; 2414 2413 int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); ··· 2424 2423 */ 2425 2424 hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match, 2426 2425 list) { 2427 - rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl); 2426 + rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic); 2428 2427 break; 2429 2428 } 2430 2429 return rc; ··· 2435 2434 return call_int_hook(xfrm_decode_session, 0, skb, secid, 1); 2436 2435 } 2437 2436 2438 - void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) 2437 + void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) 2439 2438 { 2440 - int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid, 2439 + int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid, 2441 2440 0); 2442 2441 2443 2442 BUG_ON(rc);
+2 -2
security/selinux/hooks.c
··· 5437 5437 } 5438 5438 5439 5439 static void selinux_req_classify_flow(const struct request_sock *req, 5440 - struct flowi *fl) 5440 + struct flowi_common *flic) 5441 5441 { 5442 - fl->flowi_secid = req->secid; 5442 + flic->flowic_secid = req->secid; 5443 5443 } 5444 5444 5445 5445 static int selinux_tun_dev_alloc_security(void **security)
+1 -1
security/selinux/include/xfrm.h
··· 26 26 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); 27 27 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 28 28 struct xfrm_policy *xp, 29 - const struct flowi *fl); 29 + const struct flowi_common *flic); 30 30 31 31 #ifdef CONFIG_SECURITY_NETWORK_XFRM 32 32 extern atomic_t selinux_xfrm_refcount;
+7 -6
security/selinux/xfrm.c
··· 175 175 */ 176 176 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, 177 177 struct xfrm_policy *xp, 178 - const struct flowi *fl) 178 + const struct flowi_common *flic) 179 179 { 180 180 u32 state_sid; 181 + u32 flic_sid; 181 182 182 183 if (!xp->security) 183 184 if (x->security) ··· 197 196 return 0; 198 197 199 198 state_sid = x->security->ctx_sid; 199 + flic_sid = flic->flowic_secid; 200 200 201 - if (fl->flowi_secid != state_sid) 201 + if (flic_sid != state_sid) 202 202 return 0; 203 203 204 204 /* We don't need a separate SA Vs. policy polmatch check since the SA 205 205 * is now of the same label as the flow and a flow Vs. policy polmatch 206 206 * check had already happened in selinux_xfrm_policy_lookup() above. */ 207 - return (avc_has_perm(&selinux_state, 208 - fl->flowi_secid, state_sid, 209 - SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, 210 - NULL) ? 0 : 1); 207 + return (avc_has_perm(&selinux_state, flic_sid, state_sid, 208 + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, 209 + NULL) ? 0 : 1); 211 210 } 212 211 213 212 static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)