Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: core: add UID to flows, rules, and routes

- Define a new FIB rule attributes, FRA_UID_RANGE, to describe a
range of UIDs.
- Define a RTA_UID attribute for per-UID route lookups and dumps.
- Support passing these attributes to and from userspace via
rtnetlink. The value INVALID_UID indicates no UID was
specified.
- Add a UID field to the flow structures.

Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Lorenzo Colitti and committed by
David S. Miller
622ec2c9 86741ec2

+111 -3
+8 -1
include/net/fib_rules.h
··· 8 8 #include <net/flow.h> 9 9 #include <net/rtnetlink.h> 10 10 11 + struct fib_kuid_range { 12 + kuid_t start; 13 + kuid_t end; 14 + }; 15 + 11 16 struct fib_rule { 12 17 struct list_head list; 13 18 int iifindex; ··· 35 30 int suppress_prefixlen; 36 31 char iifname[IFNAMSIZ]; 37 32 char oifname[IFNAMSIZ]; 33 + struct fib_kuid_range uid_range; 38 34 struct rcu_head rcu; 39 35 }; 40 36 ··· 98 92 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \ 99 93 [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \ 100 94 [FRA_GOTO] = { .type = NLA_U32 }, \ 101 - [FRA_L3MDEV] = { .type = NLA_U8 } 95 + [FRA_L3MDEV] = { .type = NLA_U8 }, \ 96 + [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) } 102 97 103 98 static inline void fib_rule_get(struct fib_rule *rule) 104 99 {
+5
include/net/flow.h
··· 11 11 #include <linux/in6.h> 12 12 #include <linux/atomic.h> 13 13 #include <net/flow_dissector.h> 14 + #include <linux/uidgid.h> 14 15 15 16 /* 16 17 * ifindex generation is per-net namespace, and loopback is ··· 38 37 #define FLOWI_FLAG_SKIP_NH_OIF 0x04 39 38 __u32 flowic_secid; 40 39 struct flowi_tunnel flowic_tun_key; 40 + kuid_t flowic_uid; 41 41 }; 42 42 43 43 union flowi_uli { ··· 76 74 #define flowi4_flags __fl_common.flowic_flags 77 75 #define flowi4_secid __fl_common.flowic_secid 78 76 #define flowi4_tun_key __fl_common.flowic_tun_key 77 + #define flowi4_uid __fl_common.flowic_uid 79 78 80 79 /* (saddr,daddr) must be grouped, same order as in IP header */ 81 80 __be32 saddr; ··· 134 131 #define flowi6_flags __fl_common.flowic_flags 135 132 #define flowi6_secid __fl_common.flowic_secid 136 133 #define flowi6_tun_key __fl_common.flowic_tun_key 134 + #define flowi6_uid __fl_common.flowic_uid 137 135 struct in6_addr daddr; 138 136 struct in6_addr saddr; 139 137 /* Note: flowi6_tos is encoded in flowlabel, too. */ ··· 180 176 #define flowi_flags u.__fl_common.flowic_flags 181 177 #define flowi_secid u.__fl_common.flowic_secid 182 178 #define flowi_tun_key u.__fl_common.flowic_tun_key 179 + #define flowi_uid u.__fl_common.flowic_uid 183 180 } __attribute__((__aligned__(BITS_PER_LONG/8))); 184 181 185 182 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
+6
include/uapi/linux/fib_rules.h
··· 29 29 __u32 flags; 30 30 }; 31 31 32 + struct fib_rule_uid_range { 33 + __u32 start; 34 + __u32 end; 35 + }; 36 + 32 37 enum { 33 38 FRA_UNSPEC, 34 39 FRA_DST, /* destination address */ ··· 56 51 FRA_OIFNAME, 57 52 FRA_PAD, 58 53 FRA_L3MDEV, /* iif or oif is l3mdev goto its table */ 54 + FRA_UID_RANGE, /* UID range */ 59 55 __FRA_MAX 60 56 }; 61 57
+1
include/uapi/linux/rtnetlink.h
··· 318 318 RTA_ENCAP, 319 319 RTA_EXPIRES, 320 320 RTA_PAD, 321 + RTA_UID, 321 322 __RTA_MAX 322 323 }; 323 324
+72 -2
net/core/fib_rules.c
··· 18 18 #include <net/fib_rules.h> 19 19 #include <net/ip_tunnels.h> 20 20 21 + static const struct fib_kuid_range fib_kuid_range_unset = { 22 + KUIDT_INIT(0), 23 + KUIDT_INIT(~0), 24 + }; 25 + 21 26 int fib_default_rule_add(struct fib_rules_ops *ops, 22 27 u32 pref, u32 table, u32 flags) 23 28 { ··· 38 33 r->table = table; 39 34 r->flags = flags; 40 35 r->fr_net = ops->fro_net; 36 + r->uid_range = fib_kuid_range_unset; 41 37 42 38 r->suppress_prefixlen = -1; 43 39 r->suppress_ifgroup = -1; ··· 178 172 } 179 173 EXPORT_SYMBOL_GPL(fib_rules_unregister); 180 174 175 + static int uid_range_set(struct fib_kuid_range *range) 176 + { 177 + return uid_valid(range->start) && uid_valid(range->end); 178 + } 179 + 180 + static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb) 181 + { 182 + struct fib_rule_uid_range *in; 183 + struct fib_kuid_range out; 184 + 185 + in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]); 186 + 187 + out.start = make_kuid(current_user_ns(), in->start); 188 + out.end = make_kuid(current_user_ns(), in->end); 189 + 190 + return out; 191 + } 192 + 193 + static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range) 194 + { 195 + struct fib_rule_uid_range out = { 196 + from_kuid_munged(current_user_ns(), range->start), 197 + from_kuid_munged(current_user_ns(), range->end) 198 + }; 199 + 200 + return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out); 201 + } 202 + 181 203 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 182 204 struct flowi *fl, int flags, 183 205 struct fib_lookup_arg *arg) ··· 225 191 goto out; 226 192 227 193 if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) 194 + goto out; 195 + 196 + if (uid_lt(fl->flowi_uid, rule->uid_range.start) || 197 + uid_gt(fl->flowi_uid, rule->uid_range.end)) 228 198 goto out; 229 199 230 200 ret = ops->match(rule, fl, flags); ··· 467 429 if (rule->l3mdev && rule->table) 468 430 goto errout_free; 469 431 432 + if (tb[FRA_UID_RANGE]) { 433 + if (current_user_ns() != net->user_ns) { 434 + err = -EPERM; 435 + goto errout_free; 436 + } 437 + 438 + rule->uid_range = nla_get_kuid_range(tb); 439 + 440 + if (!uid_range_set(&rule->uid_range) || 441 + !uid_lte(rule->uid_range.start, rule->uid_range.end)) 442 + goto errout_free; 443 + } else { 444 + rule->uid_range = fib_kuid_range_unset; 445 + } 446 + 470 447 if ((nlh->nlmsg_flags & NLM_F_EXCL) && 471 448 rule_exists(ops, frh, tb, rule)) { 472 449 err = -EEXIST; ··· 550 497 struct fib_rules_ops *ops = NULL; 551 498 struct fib_rule *rule, *tmp; 552 499 struct nlattr *tb[FRA_MAX+1]; 500 + struct fib_kuid_range range; 553 501 int err = -EINVAL; 554 502 555 503 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) ··· 569 515 err = validate_rulemsg(frh, tb, ops); 570 516 if (err < 0) 571 517 goto errout; 518 + 519 + if (tb[FRA_UID_RANGE]) { 520 + range = nla_get_kuid_range(tb); 521 + if (!uid_range_set(&range)) 522 + goto errout; 523 + } else { 524 + range = fib_kuid_range_unset; 525 + } 572 526 573 527 list_for_each_entry(rule, &ops->rules_list, list) { 574 528 if (frh->action && (frh->action != rule->action)) ··· 612 550 613 551 if (tb[FRA_L3MDEV] && 614 552 (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV]))) 553 + continue; 554 + 555 + if (uid_range_set(&range) && 556 + (!uid_eq(rule->uid_range.start, range.start) || 557 + !uid_eq(rule->uid_range.end, range.end))) 615 558 continue; 616 559 617 560 if (!ops->compare(rule, frh, tb)) ··· 686 619 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ 687 620 + nla_total_size(4) /* FRA_FWMARK */ 688 621 + nla_total_size(4) /* FRA_FWMASK */ 689 - + nla_total_size_64bit(8); /* FRA_TUN_ID */ 622 + + nla_total_size_64bit(8) /* FRA_TUN_ID */ 623 + + nla_total_size(sizeof(struct fib_kuid_range)); 690 624 691 625 if (ops->nlmsg_payload) 692 626 payload += ops->nlmsg_payload(rule); ··· 747 679 (rule->tun_id && 748 680 nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || 749 681 (rule->l3mdev && 750 - nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev))) 682 + nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) || 683 + (uid_range_set(&rule->uid_range) && 684 + nla_put_uid_range(skb, &rule->uid_range))) 751 685 goto nla_put_failure; 752 686 753 687 if (rule->suppress_ifgroup != -1) {
+1
net/ipv4/fib_frontend.c
··· 610 610 [RTA_FLOW] = { .type = NLA_U32 }, 611 611 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 612 612 [RTA_ENCAP] = { .type = NLA_NESTED }, 613 + [RTA_UID] = { .type = NLA_U32 }, 613 614 }; 614 615 615 616 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+11
net/ipv4/route.c
··· 2504 2504 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark)) 2505 2505 goto nla_put_failure; 2506 2506 2507 + if (!uid_eq(fl4->flowi4_uid, INVALID_UID) && 2508 + nla_put_u32(skb, RTA_UID, 2509 + from_kuid_munged(current_user_ns(), fl4->flowi4_uid))) 2510 + goto nla_put_failure; 2511 + 2507 2512 error = rt->dst.error; 2508 2513 2509 2514 if (rt_is_input_route(rt)) { ··· 2561 2556 int mark; 2562 2557 struct sk_buff *skb; 2563 2558 u32 table_id = RT_TABLE_MAIN; 2559 + kuid_t uid; 2564 2560 2565 2561 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); 2566 2562 if (err < 0) ··· 2589 2583 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; 2590 2584 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 2591 2585 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; 2586 + if (tb[RTA_UID]) 2587 + uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); 2588 + else 2589 + uid = (iif ? INVALID_UID : current_uid()); 2592 2590 2593 2591 memset(&fl4, 0, sizeof(fl4)); 2594 2592 fl4.daddr = dst; ··· 2600 2590 fl4.flowi4_tos = rtm->rtm_tos; 2601 2591 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; 2602 2592 fl4.flowi4_mark = mark; 2593 + fl4.flowi4_uid = uid; 2603 2594 2604 2595 if (iif) { 2605 2596 struct net_device *dev;
+7
net/ipv6/route.c
··· 2797 2797 [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, 2798 2798 [RTA_ENCAP] = { .type = NLA_NESTED }, 2799 2799 [RTA_EXPIRES] = { .type = NLA_U32 }, 2800 + [RTA_UID] = { .type = NLA_U32 }, 2800 2801 }; 2801 2802 2802 2803 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, ··· 3371 3370 3372 3371 if (tb[RTA_MARK]) 3373 3372 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); 3373 + 3374 + if (tb[RTA_UID]) 3375 + fl6.flowi6_uid = make_kuid(current_user_ns(), 3376 + nla_get_u32(tb[RTA_UID])); 3377 + else 3378 + fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); 3374 3379 3375 3380 if (iif) { 3376 3381 struct net_device *dev;