Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-bridge-add-per-vlan-state-option'

Nikolay Aleksandrov says:

====================
net: bridge: add per-vlan state option

This set adds the first per-vlan option - state, which uses the new vlan
infrastructure that was recently added. It gives us forwarding control on
per-vlan basis. The first 3 patches prepare the vlan code to support option
dumping and modification. We still compress vlan ranges which have equal
options, each new option will have to add its own equality check to
br_vlan_opts_eq(). The vlans are created in forwarding state by default to
be backwards compatible and vlan state is considered only when the port
state is forwarding (more info in patch 4).
I'll send the selftest for the vlan state with the iproute2 patch-set.

v2: patch 3: do full (all-vlan) notification only on vlan
create/delete, otherwise use the per-vlan notifications only,
rework how option change ranges are detected, add more verbose error
messages when setting options and add checks if a vlan should be used.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+311 -32
+2
include/uapi/linux/if_bridge.h
··· 130 130 #define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */ 131 131 #define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */ 132 132 #define BRIDGE_VLAN_INFO_BRENTRY (1<<5) /* Global bridge VLAN entry */ 133 + #define BRIDGE_VLAN_INFO_ONLY_OPTS (1<<6) /* Skip create/delete/flags */ 133 134 134 135 struct bridge_vlan_info { 135 136 __u16 flags; ··· 191 190 BRIDGE_VLANDB_ENTRY_UNSPEC, 192 191 BRIDGE_VLANDB_ENTRY_INFO, 193 192 BRIDGE_VLANDB_ENTRY_RANGE, 193 + BRIDGE_VLANDB_ENTRY_STATE, 194 194 __BRIDGE_VLANDB_ENTRY_MAX, 195 195 }; 196 196 #define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
+1 -1
net/bridge/Makefile
··· 20 20 21 21 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o 22 22 23 - bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o 23 + bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o 24 24 25 25 bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o 26 26
+2 -1
net/bridge/br_device.c
··· 32 32 struct net_bridge_mdb_entry *mdst; 33 33 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 34 34 const struct nf_br_ops *nf_ops; 35 + u8 state = BR_STATE_FORWARDING; 35 36 const unsigned char *dest; 36 37 struct ethhdr *eth; 37 38 u16 vid = 0; ··· 57 56 eth = eth_hdr(skb); 58 57 skb_pull(skb, ETH_HLEN); 59 58 60 - if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid)) 59 + if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state)) 61 60 goto out; 62 61 63 62 if (IS_ENABLED(CONFIG_INET) &&
+1 -1
net/bridge/br_forward.c
··· 25 25 26 26 vg = nbp_vlan_group_rcu(p); 27 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 28 - br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING && 28 + p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && 29 29 nbp_switchdev_allowed_egress(p, skb) && 30 30 !br_skb_isolated(p, skb); 31 31 }
+5 -2
net/bridge/br_input.c
··· 76 76 bool local_rcv, mcast_hit = false; 77 77 struct net_bridge *br; 78 78 u16 vid = 0; 79 + u8 state; 79 80 80 81 if (!p || p->state == BR_STATE_DISABLED) 81 82 goto drop; 82 83 83 - if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid)) 84 + state = p->state; 85 + if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, 86 + &state)) 84 87 goto out; 85 88 86 89 nbp_switchdev_frame_mark(p, skb); ··· 106 103 } 107 104 } 108 105 109 - if (p->state == BR_STATE_LEARNING) 106 + if (state == BR_STATE_LEARNING) 110 107 goto drop; 111 108 112 109 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
+57 -2
net/bridge/br_private.h
··· 113 113 * @vid: VLAN id 114 114 * @flags: bridge vlan flags 115 115 * @priv_flags: private (in-kernel) bridge vlan flags 116 + * @state: STP state (e.g. blocking, learning, forwarding) 116 117 * @stats: per-cpu VLAN statistics 117 118 * @br: if MASTER flag set, this points to a bridge struct 118 119 * @port: if MASTER flag unset, this points to a port struct ··· 134 133 u16 vid; 135 134 u16 flags; 136 135 u16 priv_flags; 136 + u8 state; 137 137 struct br_vlan_stats __percpu *stats; 138 138 union { 139 139 struct net_bridge *br; ··· 159 157 * @vlan_list: sorted VLAN entry list 160 158 * @num_vlans: number of total VLAN entries 161 159 * @pvid: PVID VLAN id 160 + * @pvid_state: PVID's STP state (e.g. forwarding, learning, blocking) 162 161 * 163 162 * IMPORTANT: Be careful when checking if there're VLAN entries using list 164 163 * primitives because the bridge can have entries in its list which ··· 173 170 struct list_head vlan_list; 174 171 u16 num_vlans; 175 172 u16 pvid; 173 + u8 pvid_state; 176 174 }; 177 175 178 176 /* bridge fdb flags */ ··· 939 935 #ifdef CONFIG_BRIDGE_VLAN_FILTERING 940 936 bool br_allowed_ingress(const struct net_bridge *br, 941 937 struct net_bridge_vlan_group *vg, struct sk_buff *skb, 942 - u16 *vid); 938 + u16 *vid, u8 *state); 943 939 bool br_allowed_egress(struct net_bridge_vlan_group *vg, 944 940 const struct sk_buff *skb); 945 941 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); ··· 980 976 const struct net_bridge_port *p, 981 977 u16 vid, u16 vid_range, 982 978 int cmd); 979 + bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, 980 + const struct net_bridge_vlan *range_end); 983 981 984 982 static inline struct net_bridge_vlan_group *br_vlan_group( 985 983 const struct net_bridge *br) ··· 1041 1035 static inline bool br_allowed_ingress(const struct net_bridge *br, 1042 1036 struct net_bridge_vlan_group *vg, 1043 1037 struct sk_buff *skb, 1044 - u16 *vid) 1038 + u16 *vid, u8 *state) 1045 1039 { 1046 1040 return true; 1047 1041 } ··· 1194 1188 u16 vid, u16 vid_range, 1195 1189 int cmd) 1196 1190 { 1191 + } 1192 + #endif 1193 + 1194 + /* br_vlan_options.c */ 1195 + #ifdef CONFIG_BRIDGE_VLAN_FILTERING 1196 + bool br_vlan_opts_eq(const struct net_bridge_vlan *v1, 1197 + const struct net_bridge_vlan *v2); 1198 + bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v); 1199 + size_t br_vlan_opts_nl_size(void); 1200 + int br_vlan_process_options(const struct net_bridge *br, 1201 + const struct net_bridge_port *p, 1202 + struct net_bridge_vlan *range_start, 1203 + struct net_bridge_vlan *range_end, 1204 + struct nlattr **tb, 1205 + struct netlink_ext_ack *extack); 1206 + 1207 + /* vlan state manipulation helpers using *_ONCE to annotate lock-free access */ 1208 + static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v) 1209 + { 1210 + return READ_ONCE(v->state); 1211 + } 1212 + 1213 + static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state) 1214 + { 1215 + WRITE_ONCE(v->state, state); 1216 + } 1217 + 1218 + static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg) 1219 + { 1220 + return READ_ONCE(vg->pvid_state); 1221 + } 1222 + 1223 + static inline void br_vlan_set_pvid_state(struct net_bridge_vlan_group *vg, 1224 + u8 state) 1225 + { 1226 + WRITE_ONCE(vg->pvid_state, state); 1227 + } 1228 + 1229 + /* learn_allow is true at ingress and false at egress */ 1230 + static inline bool br_vlan_state_allowed(u8 state, bool learn_allow) 1231 + { 1232 + switch (state) { 1233 + case BR_STATE_LEARNING: 1234 + return learn_allow; 1235 + case BR_STATE_FORWARDING: 1236 + return true; 1237 + default: 1238 + return false; 1239 + } 1197 1240 } 1198 1241 #endif 1199 1242
+83 -25
net/bridge/br_vlan.c
··· 34 34 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); 35 35 } 36 36 37 - static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid) 37 + static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, 38 + const struct net_bridge_vlan *v) 38 39 { 39 - if (vg->pvid == vid) 40 + if (vg->pvid == v->vid) 40 41 return false; 41 42 42 43 smp_wmb(); 43 - vg->pvid = vid; 44 + br_vlan_set_pvid_state(vg, v->state); 45 + vg->pvid = v->vid; 44 46 45 47 return true; 46 48 } ··· 71 69 vg = nbp_vlan_group(v->port); 72 70 73 71 if (flags & BRIDGE_VLAN_INFO_PVID) 74 - ret = __vlan_add_pvid(vg, v->vid); 72 + ret = __vlan_add_pvid(vg, v); 75 73 else 76 74 ret = __vlan_delete_pvid(vg, v->vid); 77 75 ··· 295 293 vg->num_vlans++; 296 294 } 297 295 296 + /* set the state before publishing */ 297 + v->state = BR_STATE_FORWARDING; 298 + 298 299 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode, 299 300 br_vlan_rht_params); 300 301 if (err) ··· 471 466 /* Called under RCU */ 472 467 static bool __allowed_ingress(const struct net_bridge *br, 473 468 struct net_bridge_vlan_group *vg, 474 - struct sk_buff *skb, u16 *vid) 469 + struct sk_buff *skb, u16 *vid, 470 + u8 *state) 475 471 { 476 472 struct br_vlan_stats *stats; 477 473 struct net_bridge_vlan *v; ··· 538 532 skb->vlan_tci |= pvid; 539 533 540 534 /* if stats are disabled we can avoid the lookup */ 541 - if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) 542 - return true; 535 + if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { 536 + if (*state == BR_STATE_FORWARDING) { 537 + *state = br_vlan_get_pvid_state(vg); 538 + return br_vlan_state_allowed(*state, true); 539 + } else { 540 + return true; 541 + } 542 + } 543 543 } 544 544 v = br_vlan_find(vg, *vid); 545 545 if (!v || !br_vlan_should_use(v)) 546 546 goto drop; 547 + 548 + if (*state == BR_STATE_FORWARDING) { 549 + *state = br_vlan_get_state(v); 550 + if (!br_vlan_state_allowed(*state, true)) 551 + goto drop; 552 + } 547 553 548 554 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { 549 555 stats = this_cpu_ptr(v->stats); ··· 574 556 575 557 bool br_allowed_ingress(const struct net_bridge *br, 576 558 struct net_bridge_vlan_group *vg, struct sk_buff *skb, 577 - u16 *vid) 559 + u16 *vid, u8 *state) 578 560 { 579 561 /* If VLAN filtering is disabled on the bridge, all packets are 580 562 * permitted. ··· 584 566 return true; 585 567 } 586 568 587 - return __allowed_ingress(br, vg, skb, vid); 569 + return __allowed_ingress(br, vg, skb, vid, state); 588 570 } 589 571 590 572 /* Called under RCU. */ ··· 600 582 601 583 br_vlan_get_tag(skb, &vid); 602 584 v = br_vlan_find(vg, vid); 603 - if (v && br_vlan_should_use(v)) 585 + if (v && br_vlan_should_use(v) && 586 + br_vlan_state_allowed(br_vlan_get_state(v), false)) 604 587 return true; 605 588 606 589 return false; ··· 612 593 { 613 594 struct net_bridge_vlan_group *vg; 614 595 struct net_bridge *br = p->br; 596 + struct net_bridge_vlan *v; 615 597 616 598 /* If filtering was disabled at input, let it pass. */ 617 599 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) ··· 627 607 628 608 if (!*vid) { 629 609 *vid = br_get_pvid(vg); 630 - if (!*vid) 610 + if (!*vid || 611 + !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true)) 631 612 return false; 632 613 633 614 return true; 634 615 } 635 616 636 - if (br_vlan_find(vg, *vid)) 617 + v = br_vlan_find(vg, *vid); 618 + if (v && br_vlan_state_allowed(br_vlan_get_state(v), true)) 637 619 return true; 638 620 639 621 return false; ··· 1569 1547 } 1570 1548 } 1571 1549 1550 + /* v_opts is used to dump the options which must be equal in the whole range */ 1572 1551 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range, 1552 + const struct net_bridge_vlan *v_opts, 1573 1553 u16 flags) 1574 1554 { 1575 1555 struct bridge_vlan_info info; ··· 1596 1572 nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range)) 1597 1573 goto out_err; 1598 1574 1575 + if (v_opts && !br_vlan_opts_fill(skb, v_opts)) 1576 + goto out_err; 1577 + 1599 1578 nla_nest_end(skb, nest); 1600 1579 1601 1580 return true; ··· 1613 1586 return NLMSG_ALIGN(sizeof(struct br_vlan_msg)) 1614 1587 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */ 1615 1588 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */ 1616 - + nla_total_size(sizeof(struct bridge_vlan_info)); /* BRIDGE_VLANDB_ENTRY_INFO */ 1589 + + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */ 1590 + + br_vlan_opts_nl_size(); /* bridge vlan options */ 1617 1591 } 1618 1592 1619 1593 void br_vlan_notify(const struct net_bridge *br, ··· 1623 1595 int cmd) 1624 1596 { 1625 1597 struct net_bridge_vlan_group *vg; 1626 - struct net_bridge_vlan *v; 1598 + struct net_bridge_vlan *v = NULL; 1627 1599 struct br_vlan_msg *bvm; 1628 1600 struct nlmsghdr *nlh; 1629 1601 struct sk_buff *skb; ··· 1675 1647 goto out_kfree; 1676 1648 } 1677 1649 1678 - if (!br_vlan_fill_vids(skb, vid, vid_range, flags)) 1650 + if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags)) 1679 1651 goto out_err; 1680 1652 1681 1653 nlmsg_end(skb, nlh); ··· 1689 1661 } 1690 1662 1691 1663 /* check if v_curr can enter a range ending in range_end */ 1692 - static bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, 1693 - const struct net_bridge_vlan *range_end) 1664 + bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, 1665 + const struct net_bridge_vlan *range_end) 1694 1666 { 1695 1667 return v_curr->vid - range_end->vid == 1 && 1696 - range_end->flags == v_curr->flags; 1668 + range_end->flags == v_curr->flags && 1669 + br_vlan_opts_eq(v_curr, range_end); 1697 1670 } 1698 1671 1699 1672 static int br_vlan_dump_dev(const struct net_device *dev, ··· 1758 1729 u16 flags = br_vlan_flags(range_start, pvid); 1759 1730 1760 1731 if (!br_vlan_fill_vids(skb, range_start->vid, 1761 - range_end->vid, flags)) { 1732 + range_end->vid, range_start, 1733 + flags)) { 1762 1734 err = -EMSGSIZE; 1763 1735 break; 1764 1736 } ··· 1778 1748 */ 1779 1749 if (!err && range_start && 1780 1750 !br_vlan_fill_vids(skb, range_start->vid, range_end->vid, 1781 - br_vlan_flags(range_start, pvid))) 1751 + range_start, br_vlan_flags(range_start, pvid))) 1782 1752 err = -EMSGSIZE; 1783 1753 1784 1754 cb->args[1] = err ? idx : 0; ··· 1838 1808 [BRIDGE_VLANDB_ENTRY_INFO] = { .type = NLA_EXACT_LEN, 1839 1809 .len = sizeof(struct bridge_vlan_info) }, 1840 1810 [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 }, 1811 + [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 }, 1841 1812 }; 1842 1813 1843 1814 static int br_vlan_rtm_process_one(struct net_device *dev, ··· 1847 1816 { 1848 1817 struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL; 1849 1818 struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1]; 1819 + bool changed = false, skip_processing = false; 1850 1820 struct net_bridge_vlan_group *vg; 1851 1821 struct net_bridge_port *p = NULL; 1852 1822 int err = 0, cmdmap = 0; 1853 1823 struct net_bridge *br; 1854 - bool changed = false; 1855 1824 1856 1825 if (netif_is_bridge_master(dev)) { 1857 1826 br = netdev_priv(dev); ··· 1905 1874 switch (cmd) { 1906 1875 case RTM_NEWVLAN: 1907 1876 cmdmap = RTM_SETLINK; 1877 + skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS); 1908 1878 break; 1909 1879 case RTM_DELVLAN: 1910 1880 cmdmap = RTM_DELLINK; 1911 1881 break; 1912 1882 } 1913 1883 1914 - err = br_process_vlan_info(br, p, cmdmap, vinfo, &vinfo_last, &changed, 1915 - extack); 1916 - if (changed) 1917 - br_ifinfo_notify(cmdmap, br, p); 1884 + if (!skip_processing) { 1885 + struct bridge_vlan_info *tmp_last = vinfo_last; 1886 + 1887 + /* br_process_vlan_info may overwrite vinfo_last */ 1888 + err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last, 1889 + &changed, extack); 1890 + 1891 + /* notify first if anything changed */ 1892 + if (changed) 1893 + br_ifinfo_notify(cmdmap, br, p); 1894 + 1895 + if (err) 1896 + return err; 1897 + } 1898 + 1899 + /* deal with options */ 1900 + if (cmd == RTM_NEWVLAN) { 1901 + struct net_bridge_vlan *range_start, *range_end; 1902 + 1903 + if (vinfo_last) { 1904 + range_start = br_vlan_find(vg, vinfo_last->vid); 1905 + range_end = br_vlan_find(vg, vinfo->vid); 1906 + } else { 1907 + range_start = br_vlan_find(vg, vinfo->vid); 1908 + range_end = range_start; 1909 + } 1910 + 1911 + err = br_vlan_process_options(br, p, range_start, range_end, 1912 + tb, extack); 1913 + } 1918 1914 1919 1915 return err; 1920 1916 }
+160
net/bridge/br_vlan_options.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com> 3 + #include <linux/kernel.h> 4 + #include <linux/netdevice.h> 5 + #include <linux/rtnetlink.h> 6 + #include <linux/slab.h> 7 + 8 + #include "br_private.h" 9 + 10 + /* check if the options between two vlans are equal */ 11 + bool br_vlan_opts_eq(const struct net_bridge_vlan *v1, 12 + const struct net_bridge_vlan *v2) 13 + { 14 + return v1->state == v2->state; 15 + } 16 + 17 + bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v) 18 + { 19 + return !nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, 20 + br_vlan_get_state(v)); 21 + } 22 + 23 + size_t br_vlan_opts_nl_size(void) 24 + { 25 + return nla_total_size(sizeof(u8)); /* BRIDGE_VLANDB_ENTRY_STATE */ 26 + } 27 + 28 + static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, 29 + struct net_bridge_vlan *v, 30 + u8 state, 31 + bool *changed, 32 + struct netlink_ext_ack *extack) 33 + { 34 + struct net_bridge *br; 35 + 36 + ASSERT_RTNL(); 37 + 38 + if (state > BR_STATE_BLOCKING) { 39 + NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state"); 40 + return -EINVAL; 41 + } 42 + 43 + if (br_vlan_is_brentry(v)) 44 + br = v->br; 45 + else 46 + br = v->port->br; 47 + 48 + if (br->stp_enabled == BR_KERNEL_STP) { 49 + NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP"); 50 + return -EBUSY; 51 + } 52 + 53 + if (v->state == state) 54 + return 0; 55 + 56 + if (v->vid == br_get_pvid(vg)) 57 + br_vlan_set_pvid_state(vg, state); 58 + 59 + br_vlan_set_state(v, state); 60 + *changed = true; 61 + 62 + return 0; 63 + } 64 + 65 + static int br_vlan_process_one_opts(const struct net_bridge *br, 66 + const struct net_bridge_port *p, 67 + struct net_bridge_vlan_group *vg, 68 + struct net_bridge_vlan *v, 69 + struct nlattr **tb, 70 + bool *changed, 71 + struct netlink_ext_ack *extack) 72 + { 73 + int err; 74 + 75 + *changed = false; 76 + if (tb[BRIDGE_VLANDB_ENTRY_STATE]) { 77 + u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]); 78 + 79 + err = br_vlan_modify_state(vg, v, state, changed, extack); 80 + if (err) 81 + return err; 82 + } 83 + 84 + return 0; 85 + } 86 + 87 + int br_vlan_process_options(const struct net_bridge *br, 88 + const struct net_bridge_port *p, 89 + struct net_bridge_vlan *range_start, 90 + struct net_bridge_vlan *range_end, 91 + struct nlattr **tb, 92 + struct netlink_ext_ack *extack) 93 + { 94 + struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; 95 + struct net_bridge_vlan_group *vg; 96 + int vid, err = 0; 97 + u16 pvid; 98 + 99 + if (p) 100 + vg = nbp_vlan_group(p); 101 + else 102 + vg = br_vlan_group(br); 103 + 104 + if (!range_start || !br_vlan_should_use(range_start)) { 105 + NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options"); 106 + return -ENOENT; 107 + } 108 + if (!range_end || !br_vlan_should_use(range_end)) { 109 + NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options"); 110 + return -ENOENT; 111 + } 112 + 113 + pvid = br_get_pvid(vg); 114 + for (vid = range_start->vid; vid <= range_end->vid; vid++) { 115 + bool changed = false; 116 + 117 + v = br_vlan_find(vg, vid); 118 + if (!v || !br_vlan_should_use(v)) { 119 + NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options"); 120 + err = -ENOENT; 121 + break; 122 + } 123 + 124 + err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed, 125 + extack); 126 + if (err) 127 + break; 128 + 129 + if (changed) { 130 + /* vlan options changed, check for range */ 131 + if (!curr_start) { 132 + curr_start = v; 133 + curr_end = v; 134 + continue; 135 + } 136 + 137 + if (v->vid == pvid || 138 + !br_vlan_can_enter_range(v, curr_end)) { 139 + br_vlan_notify(br, p, curr_start->vid, 140 + curr_end->vid, RTM_NEWVLAN); 141 + curr_start = v; 142 + } 143 + curr_end = v; 144 + } else { 145 + /* nothing changed and nothing to notify yet */ 146 + if (!curr_start) 147 + continue; 148 + 149 + br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 150 + RTM_NEWVLAN); 151 + curr_start = NULL; 152 + curr_end = NULL; 153 + } 154 + } 155 + if (curr_start) 156 + br_vlan_notify(br, p, curr_start->vid, curr_end->vid, 157 + RTM_NEWVLAN); 158 + 159 + return err; 160 + }