at v3.17-rc2 280 lines 6.5 kB view raw
1/* 2 * Forwarding decision 3 * Linux ethernet bridge 4 * 5 * Authors: 6 * Lennert Buytenhek <buytenh@gnu.org> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#include <linux/err.h> 15#include <linux/slab.h> 16#include <linux/kernel.h> 17#include <linux/netdevice.h> 18#include <linux/netpoll.h> 19#include <linux/skbuff.h> 20#include <linux/if_vlan.h> 21#include <linux/netfilter_bridge.h> 22#include "br_private.h" 23 24static int deliver_clone(const struct net_bridge_port *prev, 25 struct sk_buff *skb, 26 void (*__packet_hook)(const struct net_bridge_port *p, 27 struct sk_buff *skb)); 28 29/* Don't forward packets to originating port or forwarding disabled */ 30static inline int should_deliver(const struct net_bridge_port *p, 31 const struct sk_buff *skb) 32{ 33 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 34 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && 35 p->state == BR_STATE_FORWARDING; 36} 37 38int br_dev_queue_push_xmit(struct sk_buff *skb) 39{ 40 /* ip_fragment doesn't copy the MAC header */ 41 if (nf_bridge_maybe_copy_header(skb) || 42 !is_skb_forwardable(skb->dev, skb)) { 43 kfree_skb(skb); 44 } else { 45 skb_push(skb, ETH_HLEN); 46 br_drop_fake_rtable(skb); 47 dev_queue_xmit(skb); 48 } 49 50 return 0; 51} 52 53int br_forward_finish(struct sk_buff *skb) 54{ 55 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, 56 br_dev_queue_push_xmit); 57 58} 59 60static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 61{ 62 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); 63 if (!skb) 64 return; 65 66 skb->dev = to->dev; 67 68 if (unlikely(netpoll_tx_running(to->br->dev))) { 69 if (!is_skb_forwardable(skb->dev, skb)) 70 kfree_skb(skb); 71 else { 72 skb_push(skb, ETH_HLEN); 73 br_netpoll_send_skb(to, skb); 74 } 75 return; 76 } 77 78 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 79 br_forward_finish); 80} 81 82static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 83{ 84 struct net_device *indev; 85 86 if (skb_warn_if_lro(skb)) { 87 kfree_skb(skb); 88 return; 89 } 90 91 skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); 92 if (!skb) 93 return; 94 95 indev = skb->dev; 96 skb->dev = to->dev; 97 skb_forward_csum(skb); 98 99 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, 100 br_forward_finish); 101} 102 103/* called with rcu_read_lock */ 104void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 105{ 106 if (to && should_deliver(to, skb)) { 107 __br_deliver(to, skb); 108 return; 109 } 110 111 kfree_skb(skb); 112} 113 114/* called with rcu_read_lock */ 115void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 116{ 117 if (should_deliver(to, skb)) { 118 if (skb0) 119 deliver_clone(to, skb, __br_forward); 120 else 121 __br_forward(to, skb); 122 return; 123 } 124 125 if (!skb0) 126 kfree_skb(skb); 127} 128 129static int deliver_clone(const struct net_bridge_port *prev, 130 struct sk_buff *skb, 131 void (*__packet_hook)(const struct net_bridge_port *p, 132 struct sk_buff *skb)) 133{ 134 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 135 136 skb = skb_clone(skb, GFP_ATOMIC); 137 if (!skb) { 138 dev->stats.tx_dropped++; 139 return -ENOMEM; 140 } 141 142 __packet_hook(prev, skb); 143 return 0; 144} 145 146static struct net_bridge_port *maybe_deliver( 147 struct net_bridge_port *prev, struct net_bridge_port *p, 148 struct sk_buff *skb, 149 void (*__packet_hook)(const struct net_bridge_port *p, 150 struct sk_buff *skb)) 151{ 152 int err; 153 154 if (!should_deliver(p, skb)) 155 return prev; 156 157 if (!prev) 158 goto out; 159 160 err = deliver_clone(prev, skb, __packet_hook); 161 if (err) 162 return ERR_PTR(err); 163 164out: 165 return p; 166} 167 168/* called under bridge lock */ 169static void br_flood(struct net_bridge *br, struct sk_buff *skb, 170 struct sk_buff *skb0, 171 void (*__packet_hook)(const struct net_bridge_port *p, 172 struct sk_buff *skb), 173 bool unicast) 174{ 175 struct net_bridge_port *p; 176 struct net_bridge_port *prev; 177 178 prev = NULL; 179 180 list_for_each_entry_rcu(p, &br->port_list, list) { 181 /* Do not flood unicast traffic to ports that turn it off */ 182 if (unicast && !(p->flags & BR_FLOOD)) 183 continue; 184 prev = maybe_deliver(prev, p, skb, __packet_hook); 185 if (IS_ERR(prev)) 186 goto out; 187 } 188 189 if (!prev) 190 goto out; 191 192 if (skb0) 193 deliver_clone(prev, skb, __packet_hook); 194 else 195 __packet_hook(prev, skb); 196 return; 197 198out: 199 if (!skb0) 200 kfree_skb(skb); 201} 202 203 204/* called with rcu_read_lock */ 205void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) 206{ 207 br_flood(br, skb, NULL, __br_deliver, unicast); 208} 209 210/* called under bridge lock */ 211void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, 212 struct sk_buff *skb2, bool unicast) 213{ 214 br_flood(br, skb, skb2, __br_forward, unicast); 215} 216 217#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 218/* called with rcu_read_lock */ 219static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, 220 struct sk_buff *skb, struct sk_buff *skb0, 221 void (*__packet_hook)( 222 const struct net_bridge_port *p, 223 struct sk_buff *skb)) 224{ 225 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 226 struct net_bridge *br = netdev_priv(dev); 227 struct net_bridge_port *prev = NULL; 228 struct net_bridge_port_group *p; 229 struct hlist_node *rp; 230 231 rp = rcu_dereference(hlist_first_rcu(&br->router_list)); 232 p = mdst ? rcu_dereference(mdst->ports) : NULL; 233 while (p || rp) { 234 struct net_bridge_port *port, *lport, *rport; 235 236 lport = p ? p->port : NULL; 237 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : 238 NULL; 239 240 port = (unsigned long)lport > (unsigned long)rport ? 241 lport : rport; 242 243 prev = maybe_deliver(prev, port, skb, __packet_hook); 244 if (IS_ERR(prev)) 245 goto out; 246 247 if ((unsigned long)lport >= (unsigned long)port) 248 p = rcu_dereference(p->next); 249 if ((unsigned long)rport >= (unsigned long)port) 250 rp = rcu_dereference(hlist_next_rcu(rp)); 251 } 252 253 if (!prev) 254 goto out; 255 256 if (skb0) 257 deliver_clone(prev, skb, __packet_hook); 258 else 259 __packet_hook(prev, skb); 260 return; 261 262out: 263 if (!skb0) 264 kfree_skb(skb); 265} 266 267/* called with rcu_read_lock */ 268void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, 269 struct sk_buff *skb) 270{ 271 br_multicast_flood(mdst, skb, NULL, __br_deliver); 272} 273 274/* called with rcu_read_lock */ 275void br_multicast_forward(struct net_bridge_mdb_entry *mdst, 276 struct sk_buff *skb, struct sk_buff *skb2) 277{ 278 br_multicast_flood(mdst, skb, skb2, __br_forward); 279} 280#endif