at master 5.4 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (c) 2007-2012 Nicira, Inc. 4 */ 5 6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8#include <linux/if_arp.h> 9#include <linux/if_bridge.h> 10#include <linux/if_vlan.h> 11#include <linux/kernel.h> 12#include <linux/llc.h> 13#include <linux/rtnetlink.h> 14#include <linux/skbuff.h> 15#include <linux/openvswitch.h> 16#include <linux/export.h> 17 18#include <net/ip_tunnels.h> 19#include <net/rtnetlink.h> 20 21#include "datapath.h" 22#include "vport.h" 23#include "vport-internal_dev.h" 24#include "vport-netdev.h" 25 26static struct vport_ops ovs_netdev_vport_ops; 27 28/* Must be called with rcu_read_lock. */ 29static void netdev_port_receive(struct sk_buff *skb) 30{ 31 struct vport *vport; 32 33 vport = ovs_netdev_get_vport(skb->dev); 34 if (unlikely(!vport)) 35 goto error; 36 37 if (unlikely(skb_warn_if_lro(skb))) 38 goto error; 39 40 /* Make our own copy of the packet. Otherwise we will mangle the 41 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). 42 */ 43 skb = skb_share_check(skb, GFP_ATOMIC); 44 if (unlikely(!skb)) 45 return; 46 47 if (skb->dev->type == ARPHRD_ETHER) 48 skb_push_rcsum(skb, ETH_HLEN); 49 50 ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); 51 return; 52error: 53 kfree_skb(skb); 54} 55 56/* Called with rcu_read_lock and bottom-halves disabled. */ 57static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) 58{ 59 struct sk_buff *skb = *pskb; 60 61 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 62 return RX_HANDLER_PASS; 63 64 netdev_port_receive(skb); 65 return RX_HANDLER_CONSUMED; 66} 67 68static struct net_device *get_dpdev(const struct datapath *dp) 69{ 70 struct vport *local; 71 72 local = ovs_vport_ovsl(dp, OVSP_LOCAL); 73 return local->dev; 74} 75 76struct vport *ovs_netdev_link(struct vport *vport, const char *name) 77{ 78 int err; 79 80 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name); 81 if (!vport->dev) { 82 err = -ENODEV; 83 goto error_free_vport; 84 } 85 /* Ensure that the device exists and that the provided 86 * name is not one of its aliases. 87 */ 88 if (strcmp(name, ovs_vport_name(vport))) { 89 err = -ENODEV; 90 goto error_put; 91 } 92 netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL); 93 if (vport->dev->flags & IFF_LOOPBACK || 94 (vport->dev->type != ARPHRD_ETHER && 95 vport->dev->type != ARPHRD_NONE) || 96 ovs_is_internal_dev(vport->dev)) { 97 err = -EINVAL; 98 goto error_put; 99 } 100 101 rtnl_lock(); 102 err = netdev_master_upper_dev_link(vport->dev, 103 get_dpdev(vport->dp), 104 NULL, NULL, NULL); 105 if (err) 106 goto error_unlock; 107 108 err = netdev_rx_handler_register(vport->dev, netdev_frame_hook, 109 vport); 110 if (err) 111 goto error_master_upper_dev_unlink; 112 113 dev_disable_lro(vport->dev); 114 dev_set_promiscuity(vport->dev, 1); 115 vport->dev->priv_flags |= IFF_OVS_DATAPATH; 116 rtnl_unlock(); 117 118 return vport; 119 120error_master_upper_dev_unlink: 121 netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp)); 122error_unlock: 123 rtnl_unlock(); 124error_put: 125 netdev_put(vport->dev, &vport->dev_tracker); 126error_free_vport: 127 ovs_vport_free(vport); 128 return ERR_PTR(err); 129} 130EXPORT_SYMBOL_GPL(ovs_netdev_link); 131 132static struct vport *netdev_create(const struct vport_parms *parms) 133{ 134 struct vport *vport; 135 136 vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms); 137 if (IS_ERR(vport)) 138 return vport; 139 140 return ovs_netdev_link(vport, parms->name); 141} 142 143static void vport_netdev_free(struct rcu_head *rcu) 144{ 145 struct vport *vport = container_of(rcu, struct vport, rcu); 146 147 netdev_put(vport->dev, &vport->dev_tracker); 148 ovs_vport_free(vport); 149} 150 151void ovs_netdev_detach_dev(struct vport *vport) 152{ 153 ASSERT_RTNL(); 154 vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; 155 netdev_rx_handler_unregister(vport->dev); 156 netdev_upper_dev_unlink(vport->dev, 157 netdev_master_upper_dev_get(vport->dev)); 158 dev_set_promiscuity(vport->dev, -1); 159} 160 161static void netdev_destroy(struct vport *vport) 162{ 163 /* When called from ovs_db_notify_wq() after a dp_device_event(), the 164 * port has already been detached, so we can avoid taking the RTNL by 165 * checking this first. 166 */ 167 if (netif_is_ovs_port(vport->dev)) { 168 rtnl_lock(); 169 /* Check again while holding the lock to ensure we don't race 170 * with the netdev notifier and detach twice. 171 */ 172 if (netif_is_ovs_port(vport->dev)) 173 ovs_netdev_detach_dev(vport); 174 rtnl_unlock(); 175 } 176 177 call_rcu(&vport->rcu, vport_netdev_free); 178} 179 180void ovs_netdev_tunnel_destroy(struct vport *vport) 181{ 182 rtnl_lock(); 183 if (netif_is_ovs_port(vport->dev)) 184 ovs_netdev_detach_dev(vport); 185 186 /* We can be invoked by both explicit vport deletion and 187 * underlying netdev deregistration; delete the link only 188 * if it's not already shutting down. 189 */ 190 if (vport->dev->reg_state == NETREG_REGISTERED) 191 rtnl_delete_link(vport->dev, 0, NULL); 192 netdev_put(vport->dev, &vport->dev_tracker); 193 vport->dev = NULL; 194 rtnl_unlock(); 195 196 call_rcu(&vport->rcu, vport_netdev_free); 197} 198EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy); 199 200/* Returns null if this device is not attached to a datapath. */ 201struct vport *ovs_netdev_get_vport(struct net_device *dev) 202{ 203 if (likely(netif_is_ovs_port(dev))) 204 return (struct vport *) 205 rcu_dereference_rtnl(dev->rx_handler_data); 206 else 207 return NULL; 208} 209 210static struct vport_ops ovs_netdev_vport_ops = { 211 .type = OVS_VPORT_TYPE_NETDEV, 212 .create = netdev_create, 213 .destroy = netdev_destroy, 214 .send = dev_queue_xmit, 215}; 216 217int __init ovs_netdev_init(void) 218{ 219 return ovs_vport_ops_register(&ovs_netdev_vport_ops); 220} 221 222void ovs_netdev_exit(void) 223{ 224 ovs_vport_ops_unregister(&ovs_netdev_vport_ops); 225}