Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * drivers/net/veth.c
4 *
5 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
6 *
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
9 *
10 */
11
12#include <linux/netdevice.h>
13#include <linux/slab.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/u64_stats_sync.h>
17
18#include <net/rtnetlink.h>
19#include <net/dst.h>
20#include <net/xfrm.h>
21#include <net/xdp.h>
22#include <linux/veth.h>
23#include <linux/module.h>
24#include <linux/bpf.h>
25#include <linux/filter.h>
26#include <linux/ptr_ring.h>
27#include <linux/bpf_trace.h>
28#include <linux/net_tstamp.h>
29
30#define DRV_NAME "veth"
31#define DRV_VERSION "1.0"
32
33#define VETH_XDP_FLAG BIT(0)
34#define VETH_RING_SIZE 256
35#define VETH_XDP_HEADROOM (XDP_PACKET_HEADROOM + NET_IP_ALIGN)
36
37#define VETH_XDP_TX_BULK_SIZE 16
38#define VETH_XDP_BATCH 16
39
40struct veth_stats {
41 u64 rx_drops;
42 /* xdp */
43 u64 xdp_packets;
44 u64 xdp_bytes;
45 u64 xdp_redirect;
46 u64 xdp_drops;
47 u64 xdp_tx;
48 u64 xdp_tx_err;
49 u64 peer_tq_xdp_xmit;
50 u64 peer_tq_xdp_xmit_err;
51};
52
53struct veth_rq_stats {
54 struct veth_stats vs;
55 struct u64_stats_sync syncp;
56};
57
58struct veth_rq {
59 struct napi_struct xdp_napi;
60 struct napi_struct __rcu *napi; /* points to xdp_napi when the latter is initialized */
61 struct net_device *dev;
62 struct bpf_prog __rcu *xdp_prog;
63 struct xdp_mem_info xdp_mem;
64 struct veth_rq_stats stats;
65 bool rx_notify_masked;
66 struct ptr_ring xdp_ring;
67 struct xdp_rxq_info xdp_rxq;
68};
69
70struct veth_priv {
71 struct net_device __rcu *peer;
72 atomic64_t dropped;
73 struct bpf_prog *_xdp_prog;
74 struct veth_rq *rq;
75 unsigned int requested_headroom;
76};
77
78struct veth_xdp_tx_bq {
79 struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE];
80 unsigned int count;
81};
82
83/*
84 * ethtool interface
85 */
86
87struct veth_q_stat_desc {
88 char desc[ETH_GSTRING_LEN];
89 size_t offset;
90};
91
92#define VETH_RQ_STAT(m) offsetof(struct veth_stats, m)
93
94static const struct veth_q_stat_desc veth_rq_stats_desc[] = {
95 { "xdp_packets", VETH_RQ_STAT(xdp_packets) },
96 { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) },
97 { "drops", VETH_RQ_STAT(rx_drops) },
98 { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) },
99 { "xdp_drops", VETH_RQ_STAT(xdp_drops) },
100 { "xdp_tx", VETH_RQ_STAT(xdp_tx) },
101 { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) },
102};
103
104#define VETH_RQ_STATS_LEN ARRAY_SIZE(veth_rq_stats_desc)
105
106static const struct veth_q_stat_desc veth_tq_stats_desc[] = {
107 { "xdp_xmit", VETH_RQ_STAT(peer_tq_xdp_xmit) },
108 { "xdp_xmit_errors", VETH_RQ_STAT(peer_tq_xdp_xmit_err) },
109};
110
111#define VETH_TQ_STATS_LEN ARRAY_SIZE(veth_tq_stats_desc)
112
113static struct {
114 const char string[ETH_GSTRING_LEN];
115} ethtool_stats_keys[] = {
116 { "peer_ifindex" },
117};
118
119struct veth_xdp_buff {
120 struct xdp_buff xdp;
121 struct sk_buff *skb;
122};
123
124static int veth_get_link_ksettings(struct net_device *dev,
125 struct ethtool_link_ksettings *cmd)
126{
127 cmd->base.speed = SPEED_10000;
128 cmd->base.duplex = DUPLEX_FULL;
129 cmd->base.port = PORT_TP;
130 cmd->base.autoneg = AUTONEG_DISABLE;
131 return 0;
132}
133
134static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
135{
136 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
137 strscpy(info->version, DRV_VERSION, sizeof(info->version));
138}
139
140static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
141{
142 u8 *p = buf;
143 int i, j;
144
145 switch(stringset) {
146 case ETH_SS_STATS:
147 memcpy(p, ðtool_stats_keys, sizeof(ethtool_stats_keys));
148 p += sizeof(ethtool_stats_keys);
149 for (i = 0; i < dev->real_num_rx_queues; i++)
150 for (j = 0; j < VETH_RQ_STATS_LEN; j++)
151 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
152 i, veth_rq_stats_desc[j].desc);
153
154 for (i = 0; i < dev->real_num_tx_queues; i++)
155 for (j = 0; j < VETH_TQ_STATS_LEN; j++)
156 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
157 i, veth_tq_stats_desc[j].desc);
158 break;
159 }
160}
161
162static int veth_get_sset_count(struct net_device *dev, int sset)
163{
164 switch (sset) {
165 case ETH_SS_STATS:
166 return ARRAY_SIZE(ethtool_stats_keys) +
167 VETH_RQ_STATS_LEN * dev->real_num_rx_queues +
168 VETH_TQ_STATS_LEN * dev->real_num_tx_queues;
169 default:
170 return -EOPNOTSUPP;
171 }
172}
173
174static void veth_get_ethtool_stats(struct net_device *dev,
175 struct ethtool_stats *stats, u64 *data)
176{
177 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
178 struct net_device *peer = rtnl_dereference(priv->peer);
179 int i, j, idx;
180
181 data[0] = peer ? peer->ifindex : 0;
182 idx = 1;
183 for (i = 0; i < dev->real_num_rx_queues; i++) {
184 const struct veth_rq_stats *rq_stats = &priv->rq[i].stats;
185 const void *stats_base = (void *)&rq_stats->vs;
186 unsigned int start;
187 size_t offset;
188
189 do {
190 start = u64_stats_fetch_begin(&rq_stats->syncp);
191 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
192 offset = veth_rq_stats_desc[j].offset;
193 data[idx + j] = *(u64 *)(stats_base + offset);
194 }
195 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
196 idx += VETH_RQ_STATS_LEN;
197 }
198
199 if (!peer)
200 return;
201
202 rcv_priv = netdev_priv(peer);
203 for (i = 0; i < peer->real_num_rx_queues; i++) {
204 const struct veth_rq_stats *rq_stats = &rcv_priv->rq[i].stats;
205 const void *base = (void *)&rq_stats->vs;
206 unsigned int start, tx_idx = idx;
207 size_t offset;
208
209 tx_idx += (i % dev->real_num_tx_queues) * VETH_TQ_STATS_LEN;
210 do {
211 start = u64_stats_fetch_begin(&rq_stats->syncp);
212 for (j = 0; j < VETH_TQ_STATS_LEN; j++) {
213 offset = veth_tq_stats_desc[j].offset;
214 data[tx_idx + j] += *(u64 *)(base + offset);
215 }
216 } while (u64_stats_fetch_retry(&rq_stats->syncp, start));
217 }
218}
219
220static void veth_get_channels(struct net_device *dev,
221 struct ethtool_channels *channels)
222{
223 channels->tx_count = dev->real_num_tx_queues;
224 channels->rx_count = dev->real_num_rx_queues;
225 channels->max_tx = dev->num_tx_queues;
226 channels->max_rx = dev->num_rx_queues;
227}
228
229static int veth_set_channels(struct net_device *dev,
230 struct ethtool_channels *ch);
231
232static const struct ethtool_ops veth_ethtool_ops = {
233 .get_drvinfo = veth_get_drvinfo,
234 .get_link = ethtool_op_get_link,
235 .get_strings = veth_get_strings,
236 .get_sset_count = veth_get_sset_count,
237 .get_ethtool_stats = veth_get_ethtool_stats,
238 .get_link_ksettings = veth_get_link_ksettings,
239 .get_ts_info = ethtool_op_get_ts_info,
240 .get_channels = veth_get_channels,
241 .set_channels = veth_set_channels,
242};
243
244/* general routines */
245
246static bool veth_is_xdp_frame(void *ptr)
247{
248 return (unsigned long)ptr & VETH_XDP_FLAG;
249}
250
251static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
252{
253 return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
254}
255
256static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
257{
258 return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
259}
260
261static void veth_ptr_free(void *ptr)
262{
263 if (veth_is_xdp_frame(ptr))
264 xdp_return_frame(veth_ptr_to_xdp(ptr));
265 else
266 kfree_skb(ptr);
267}
268
269static void __veth_xdp_flush(struct veth_rq *rq)
270{
271 /* Write ptr_ring before reading rx_notify_masked */
272 smp_mb();
273 if (!READ_ONCE(rq->rx_notify_masked) &&
274 napi_schedule_prep(&rq->xdp_napi)) {
275 WRITE_ONCE(rq->rx_notify_masked, true);
276 __napi_schedule(&rq->xdp_napi);
277 }
278}
279
280static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
281{
282 if (unlikely(ptr_ring_produce(&rq->xdp_ring, skb))) {
283 dev_kfree_skb_any(skb);
284 return NET_RX_DROP;
285 }
286
287 return NET_RX_SUCCESS;
288}
289
290static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
291 struct veth_rq *rq, bool xdp)
292{
293 return __dev_forward_skb(dev, skb) ?: xdp ?
294 veth_xdp_rx(rq, skb) :
295 __netif_rx(skb);
296}
297
298/* return true if the specified skb has chances of GRO aggregation
299 * Don't strive for accuracy, but try to avoid GRO overhead in the most
300 * common scenarios.
301 * When XDP is enabled, all traffic is considered eligible, as the xmit
302 * device has TSO off.
303 * When TSO is enabled on the xmit device, we are likely interested only
304 * in UDP aggregation, explicitly check for that if the skb is suspected
305 * - the sock_wfree destructor is used by UDP, ICMP and XDP sockets -
306 * to belong to locally generated UDP traffic.
307 */
308static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
309 const struct net_device *rcv,
310 const struct sk_buff *skb)
311{
312 return !(dev->features & NETIF_F_ALL_TSO) ||
313 (skb->destructor == sock_wfree &&
314 rcv->features & (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD));
315}
316
317static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
318{
319 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
320 struct veth_rq *rq = NULL;
321 struct net_device *rcv;
322 int length = skb->len;
323 bool use_napi = false;
324 int rxq;
325
326 rcu_read_lock();
327 rcv = rcu_dereference(priv->peer);
328 if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
329 kfree_skb(skb);
330 goto drop;
331 }
332
333 rcv_priv = netdev_priv(rcv);
334 rxq = skb_get_queue_mapping(skb);
335 if (rxq < rcv->real_num_rx_queues) {
336 rq = &rcv_priv->rq[rxq];
337
338 /* The napi pointer is available when an XDP program is
339 * attached or when GRO is enabled
340 * Don't bother with napi/GRO if the skb can't be aggregated
341 */
342 use_napi = rcu_access_pointer(rq->napi) &&
343 veth_skb_is_eligible_for_gro(dev, rcv, skb);
344 }
345
346 skb_tx_timestamp(skb);
347 if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
348 if (!use_napi)
349 dev_lstats_add(dev, length);
350 } else {
351drop:
352 atomic64_inc(&priv->dropped);
353 }
354
355 if (use_napi)
356 __veth_xdp_flush(rq);
357
358 rcu_read_unlock();
359
360 return NETDEV_TX_OK;
361}
362
363static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
364{
365 struct veth_priv *priv = netdev_priv(dev);
366
367 dev_lstats_read(dev, packets, bytes);
368 return atomic64_read(&priv->dropped);
369}
370
371static void veth_stats_rx(struct veth_stats *result, struct net_device *dev)
372{
373 struct veth_priv *priv = netdev_priv(dev);
374 int i;
375
376 result->peer_tq_xdp_xmit_err = 0;
377 result->xdp_packets = 0;
378 result->xdp_tx_err = 0;
379 result->xdp_bytes = 0;
380 result->rx_drops = 0;
381 for (i = 0; i < dev->num_rx_queues; i++) {
382 u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err;
383 struct veth_rq_stats *stats = &priv->rq[i].stats;
384 unsigned int start;
385
386 do {
387 start = u64_stats_fetch_begin(&stats->syncp);
388 peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err;
389 xdp_tx_err = stats->vs.xdp_tx_err;
390 packets = stats->vs.xdp_packets;
391 bytes = stats->vs.xdp_bytes;
392 drops = stats->vs.rx_drops;
393 } while (u64_stats_fetch_retry(&stats->syncp, start));
394 result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err;
395 result->xdp_tx_err += xdp_tx_err;
396 result->xdp_packets += packets;
397 result->xdp_bytes += bytes;
398 result->rx_drops += drops;
399 }
400}
401
402static void veth_get_stats64(struct net_device *dev,
403 struct rtnl_link_stats64 *tot)
404{
405 struct veth_priv *priv = netdev_priv(dev);
406 struct net_device *peer;
407 struct veth_stats rx;
408 u64 packets, bytes;
409
410 tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes);
411 tot->tx_bytes = bytes;
412 tot->tx_packets = packets;
413
414 veth_stats_rx(&rx, dev);
415 tot->tx_dropped += rx.xdp_tx_err;
416 tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err;
417 tot->rx_bytes = rx.xdp_bytes;
418 tot->rx_packets = rx.xdp_packets;
419
420 rcu_read_lock();
421 peer = rcu_dereference(priv->peer);
422 if (peer) {
423 veth_stats_tx(peer, &packets, &bytes);
424 tot->rx_bytes += bytes;
425 tot->rx_packets += packets;
426
427 veth_stats_rx(&rx, peer);
428 tot->tx_dropped += rx.peer_tq_xdp_xmit_err;
429 tot->rx_dropped += rx.xdp_tx_err;
430 tot->tx_bytes += rx.xdp_bytes;
431 tot->tx_packets += rx.xdp_packets;
432 }
433 rcu_read_unlock();
434}
435
436/* fake multicast ability */
437static void veth_set_multicast_list(struct net_device *dev)
438{
439}
440
441static int veth_select_rxq(struct net_device *dev)
442{
443 return smp_processor_id() % dev->real_num_rx_queues;
444}
445
446static struct net_device *veth_peer_dev(struct net_device *dev)
447{
448 struct veth_priv *priv = netdev_priv(dev);
449
450 /* Callers must be under RCU read side. */
451 return rcu_dereference(priv->peer);
452}
453
454static int veth_xdp_xmit(struct net_device *dev, int n,
455 struct xdp_frame **frames,
456 u32 flags, bool ndo_xmit)
457{
458 struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
459 int i, ret = -ENXIO, nxmit = 0;
460 struct net_device *rcv;
461 unsigned int max_len;
462 struct veth_rq *rq;
463
464 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
465 return -EINVAL;
466
467 rcu_read_lock();
468 rcv = rcu_dereference(priv->peer);
469 if (unlikely(!rcv))
470 goto out;
471
472 rcv_priv = netdev_priv(rcv);
473 rq = &rcv_priv->rq[veth_select_rxq(rcv)];
474 /* The napi pointer is set if NAPI is enabled, which ensures that
475 * xdp_ring is initialized on receive side and the peer device is up.
476 */
477 if (!rcu_access_pointer(rq->napi))
478 goto out;
479
480 max_len = rcv->mtu + rcv->hard_header_len + VLAN_HLEN;
481
482 spin_lock(&rq->xdp_ring.producer_lock);
483 for (i = 0; i < n; i++) {
484 struct xdp_frame *frame = frames[i];
485 void *ptr = veth_xdp_to_ptr(frame);
486
487 if (unlikely(xdp_get_frame_len(frame) > max_len ||
488 __ptr_ring_produce(&rq->xdp_ring, ptr)))
489 break;
490 nxmit++;
491 }
492 spin_unlock(&rq->xdp_ring.producer_lock);
493
494 if (flags & XDP_XMIT_FLUSH)
495 __veth_xdp_flush(rq);
496
497 ret = nxmit;
498 if (ndo_xmit) {
499 u64_stats_update_begin(&rq->stats.syncp);
500 rq->stats.vs.peer_tq_xdp_xmit += nxmit;
501 rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
502 u64_stats_update_end(&rq->stats.syncp);
503 }
504
505out:
506 rcu_read_unlock();
507
508 return ret;
509}
510
511static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
512 struct xdp_frame **frames, u32 flags)
513{
514 int err;
515
516 err = veth_xdp_xmit(dev, n, frames, flags, true);
517 if (err < 0) {
518 struct veth_priv *priv = netdev_priv(dev);
519
520 atomic64_add(n, &priv->dropped);
521 }
522
523 return err;
524}
525
526static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
527{
528 int sent, i, err = 0, drops;
529
530 sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
531 if (sent < 0) {
532 err = sent;
533 sent = 0;
534 }
535
536 for (i = sent; unlikely(i < bq->count); i++)
537 xdp_return_frame(bq->q[i]);
538
539 drops = bq->count - sent;
540 trace_xdp_bulk_tx(rq->dev, sent, drops, err);
541
542 u64_stats_update_begin(&rq->stats.syncp);
543 rq->stats.vs.xdp_tx += sent;
544 rq->stats.vs.xdp_tx_err += drops;
545 u64_stats_update_end(&rq->stats.syncp);
546
547 bq->count = 0;
548}
549
550static void veth_xdp_flush(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
551{
552 struct veth_priv *rcv_priv, *priv = netdev_priv(rq->dev);
553 struct net_device *rcv;
554 struct veth_rq *rcv_rq;
555
556 rcu_read_lock();
557 veth_xdp_flush_bq(rq, bq);
558 rcv = rcu_dereference(priv->peer);
559 if (unlikely(!rcv))
560 goto out;
561
562 rcv_priv = netdev_priv(rcv);
563 rcv_rq = &rcv_priv->rq[veth_select_rxq(rcv)];
564 /* xdp_ring is initialized on receive side? */
565 if (unlikely(!rcu_access_pointer(rcv_rq->xdp_prog)))
566 goto out;
567
568 __veth_xdp_flush(rcv_rq);
569out:
570 rcu_read_unlock();
571}
572
573static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
574 struct veth_xdp_tx_bq *bq)
575{
576 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
577
578 if (unlikely(!frame))
579 return -EOVERFLOW;
580
581 if (unlikely(bq->count == VETH_XDP_TX_BULK_SIZE))
582 veth_xdp_flush_bq(rq, bq);
583
584 bq->q[bq->count++] = frame;
585
586 return 0;
587}
588
589static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
590 struct xdp_frame *frame,
591 struct veth_xdp_tx_bq *bq,
592 struct veth_stats *stats)
593{
594 struct xdp_frame orig_frame;
595 struct bpf_prog *xdp_prog;
596
597 rcu_read_lock();
598 xdp_prog = rcu_dereference(rq->xdp_prog);
599 if (likely(xdp_prog)) {
600 struct veth_xdp_buff vxbuf;
601 struct xdp_buff *xdp = &vxbuf.xdp;
602 u32 act;
603
604 xdp_convert_frame_to_buff(frame, xdp);
605 xdp->rxq = &rq->xdp_rxq;
606 vxbuf.skb = NULL;
607
608 act = bpf_prog_run_xdp(xdp_prog, xdp);
609
610 switch (act) {
611 case XDP_PASS:
612 if (xdp_update_frame_from_buff(xdp, frame))
613 goto err_xdp;
614 break;
615 case XDP_TX:
616 orig_frame = *frame;
617 xdp->rxq->mem = frame->mem;
618 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
619 trace_xdp_exception(rq->dev, xdp_prog, act);
620 frame = &orig_frame;
621 stats->rx_drops++;
622 goto err_xdp;
623 }
624 stats->xdp_tx++;
625 rcu_read_unlock();
626 goto xdp_xmit;
627 case XDP_REDIRECT:
628 orig_frame = *frame;
629 xdp->rxq->mem = frame->mem;
630 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
631 frame = &orig_frame;
632 stats->rx_drops++;
633 goto err_xdp;
634 }
635 stats->xdp_redirect++;
636 rcu_read_unlock();
637 goto xdp_xmit;
638 default:
639 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
640 fallthrough;
641 case XDP_ABORTED:
642 trace_xdp_exception(rq->dev, xdp_prog, act);
643 fallthrough;
644 case XDP_DROP:
645 stats->xdp_drops++;
646 goto err_xdp;
647 }
648 }
649 rcu_read_unlock();
650
651 return frame;
652err_xdp:
653 rcu_read_unlock();
654 xdp_return_frame(frame);
655xdp_xmit:
656 return NULL;
657}
658
659/* frames array contains VETH_XDP_BATCH at most */
660static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
661 int n_xdpf, struct veth_xdp_tx_bq *bq,
662 struct veth_stats *stats)
663{
664 void *skbs[VETH_XDP_BATCH];
665 int i;
666
667 if (xdp_alloc_skb_bulk(skbs, n_xdpf,
668 GFP_ATOMIC | __GFP_ZERO) < 0) {
669 for (i = 0; i < n_xdpf; i++)
670 xdp_return_frame(frames[i]);
671 stats->rx_drops += n_xdpf;
672
673 return;
674 }
675
676 for (i = 0; i < n_xdpf; i++) {
677 struct sk_buff *skb = skbs[i];
678
679 skb = __xdp_build_skb_from_frame(frames[i], skb,
680 rq->dev);
681 if (!skb) {
682 xdp_return_frame(frames[i]);
683 stats->rx_drops++;
684 continue;
685 }
686 napi_gro_receive(&rq->xdp_napi, skb);
687 }
688}
689
690static void veth_xdp_get(struct xdp_buff *xdp)
691{
692 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
693 int i;
694
695 get_page(virt_to_page(xdp->data));
696 if (likely(!xdp_buff_has_frags(xdp)))
697 return;
698
699 for (i = 0; i < sinfo->nr_frags; i++)
700 __skb_frag_ref(&sinfo->frags[i]);
701}
702
703static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
704 struct xdp_buff *xdp,
705 struct sk_buff **pskb)
706{
707 struct sk_buff *skb = *pskb;
708 u32 frame_sz;
709
710 if (skb_shared(skb) || skb_head_is_locked(skb) ||
711 skb_shinfo(skb)->nr_frags ||
712 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
713 u32 size, len, max_head_size, off;
714 struct sk_buff *nskb;
715 struct page *page;
716 int i, head_off;
717
718 /* We need a private copy of the skb and data buffers since
719 * the ebpf program can modify it. We segment the original skb
720 * into order-0 pages without linearize it.
721 *
722 * Make sure we have enough space for linear and paged area
723 */
724 max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
725 VETH_XDP_HEADROOM);
726 if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
727 goto drop;
728
729 /* Allocate skb head */
730 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
731 if (!page)
732 goto drop;
733
734 nskb = build_skb(page_address(page), PAGE_SIZE);
735 if (!nskb) {
736 put_page(page);
737 goto drop;
738 }
739
740 skb_reserve(nskb, VETH_XDP_HEADROOM);
741 size = min_t(u32, skb->len, max_head_size);
742 if (skb_copy_bits(skb, 0, nskb->data, size)) {
743 consume_skb(nskb);
744 goto drop;
745 }
746 skb_put(nskb, size);
747
748 skb_copy_header(nskb, skb);
749 head_off = skb_headroom(nskb) - skb_headroom(skb);
750 skb_headers_offset_update(nskb, head_off);
751
752 /* Allocate paged area of new skb */
753 off = size;
754 len = skb->len - off;
755
756 for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
757 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
758 if (!page) {
759 consume_skb(nskb);
760 goto drop;
761 }
762
763 size = min_t(u32, len, PAGE_SIZE);
764 skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
765 if (skb_copy_bits(skb, off, page_address(page),
766 size)) {
767 consume_skb(nskb);
768 goto drop;
769 }
770
771 len -= size;
772 off += size;
773 }
774
775 consume_skb(skb);
776 skb = nskb;
777 }
778
779 /* SKB "head" area always have tailroom for skb_shared_info */
780 frame_sz = skb_end_pointer(skb) - skb->head;
781 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
782 xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
783 xdp_prepare_buff(xdp, skb->head, skb_headroom(skb),
784 skb_headlen(skb), true);
785
786 if (skb_is_nonlinear(skb)) {
787 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
788 xdp_buff_set_frags_flag(xdp);
789 } else {
790 xdp_buff_clear_frags_flag(xdp);
791 }
792 *pskb = skb;
793
794 return 0;
795drop:
796 consume_skb(skb);
797 *pskb = NULL;
798
799 return -ENOMEM;
800}
801
802static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
803 struct sk_buff *skb,
804 struct veth_xdp_tx_bq *bq,
805 struct veth_stats *stats)
806{
807 void *orig_data, *orig_data_end;
808 struct bpf_prog *xdp_prog;
809 struct veth_xdp_buff vxbuf;
810 struct xdp_buff *xdp = &vxbuf.xdp;
811 u32 act, metalen;
812 int off;
813
814 skb_prepare_for_gro(skb);
815
816 rcu_read_lock();
817 xdp_prog = rcu_dereference(rq->xdp_prog);
818 if (unlikely(!xdp_prog)) {
819 rcu_read_unlock();
820 goto out;
821 }
822
823 __skb_push(skb, skb->data - skb_mac_header(skb));
824 if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
825 goto drop;
826 vxbuf.skb = skb;
827
828 orig_data = xdp->data;
829 orig_data_end = xdp->data_end;
830
831 act = bpf_prog_run_xdp(xdp_prog, xdp);
832
833 switch (act) {
834 case XDP_PASS:
835 break;
836 case XDP_TX:
837 veth_xdp_get(xdp);
838 consume_skb(skb);
839 xdp->rxq->mem = rq->xdp_mem;
840 if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
841 trace_xdp_exception(rq->dev, xdp_prog, act);
842 stats->rx_drops++;
843 goto err_xdp;
844 }
845 stats->xdp_tx++;
846 rcu_read_unlock();
847 goto xdp_xmit;
848 case XDP_REDIRECT:
849 veth_xdp_get(xdp);
850 consume_skb(skb);
851 xdp->rxq->mem = rq->xdp_mem;
852 if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
853 stats->rx_drops++;
854 goto err_xdp;
855 }
856 stats->xdp_redirect++;
857 rcu_read_unlock();
858 goto xdp_xmit;
859 default:
860 bpf_warn_invalid_xdp_action(rq->dev, xdp_prog, act);
861 fallthrough;
862 case XDP_ABORTED:
863 trace_xdp_exception(rq->dev, xdp_prog, act);
864 fallthrough;
865 case XDP_DROP:
866 stats->xdp_drops++;
867 goto xdp_drop;
868 }
869 rcu_read_unlock();
870
871 /* check if bpf_xdp_adjust_head was used */
872 off = orig_data - xdp->data;
873 if (off > 0)
874 __skb_push(skb, off);
875 else if (off < 0)
876 __skb_pull(skb, -off);
877
878 skb_reset_mac_header(skb);
879
880 /* check if bpf_xdp_adjust_tail was used */
881 off = xdp->data_end - orig_data_end;
882 if (off != 0)
883 __skb_put(skb, off); /* positive on grow, negative on shrink */
884
885 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
886 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
887 */
888 if (xdp_buff_has_frags(xdp))
889 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
890 else
891 skb->data_len = 0;
892
893 skb->protocol = eth_type_trans(skb, rq->dev);
894
895 metalen = xdp->data - xdp->data_meta;
896 if (metalen)
897 skb_metadata_set(skb, metalen);
898out:
899 return skb;
900drop:
901 stats->rx_drops++;
902xdp_drop:
903 rcu_read_unlock();
904 kfree_skb(skb);
905 return NULL;
906err_xdp:
907 rcu_read_unlock();
908 xdp_return_buff(xdp);
909xdp_xmit:
910 return NULL;
911}
912
913static int veth_xdp_rcv(struct veth_rq *rq, int budget,
914 struct veth_xdp_tx_bq *bq,
915 struct veth_stats *stats)
916{
917 int i, done = 0, n_xdpf = 0;
918 void *xdpf[VETH_XDP_BATCH];
919
920 for (i = 0; i < budget; i++) {
921 void *ptr = __ptr_ring_consume(&rq->xdp_ring);
922
923 if (!ptr)
924 break;
925
926 if (veth_is_xdp_frame(ptr)) {
927 /* ndo_xdp_xmit */
928 struct xdp_frame *frame = veth_ptr_to_xdp(ptr);
929
930 stats->xdp_bytes += xdp_get_frame_len(frame);
931 frame = veth_xdp_rcv_one(rq, frame, bq, stats);
932 if (frame) {
933 /* XDP_PASS */
934 xdpf[n_xdpf++] = frame;
935 if (n_xdpf == VETH_XDP_BATCH) {
936 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf,
937 bq, stats);
938 n_xdpf = 0;
939 }
940 }
941 } else {
942 /* ndo_start_xmit */
943 struct sk_buff *skb = ptr;
944
945 stats->xdp_bytes += skb->len;
946 skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
947 if (skb) {
948 if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
949 netif_receive_skb(skb);
950 else
951 napi_gro_receive(&rq->xdp_napi, skb);
952 }
953 }
954 done++;
955 }
956
957 if (n_xdpf)
958 veth_xdp_rcv_bulk_skb(rq, xdpf, n_xdpf, bq, stats);
959
960 u64_stats_update_begin(&rq->stats.syncp);
961 rq->stats.vs.xdp_redirect += stats->xdp_redirect;
962 rq->stats.vs.xdp_bytes += stats->xdp_bytes;
963 rq->stats.vs.xdp_drops += stats->xdp_drops;
964 rq->stats.vs.rx_drops += stats->rx_drops;
965 rq->stats.vs.xdp_packets += done;
966 u64_stats_update_end(&rq->stats.syncp);
967
968 return done;
969}
970
971static int veth_poll(struct napi_struct *napi, int budget)
972{
973 struct veth_rq *rq =
974 container_of(napi, struct veth_rq, xdp_napi);
975 struct veth_stats stats = {};
976 struct veth_xdp_tx_bq bq;
977 int done;
978
979 bq.count = 0;
980
981 xdp_set_return_frame_no_direct();
982 done = veth_xdp_rcv(rq, budget, &bq, &stats);
983
984 if (stats.xdp_redirect > 0)
985 xdp_do_flush();
986
987 if (done < budget && napi_complete_done(napi, done)) {
988 /* Write rx_notify_masked before reading ptr_ring */
989 smp_store_mb(rq->rx_notify_masked, false);
990 if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
991 if (napi_schedule_prep(&rq->xdp_napi)) {
992 WRITE_ONCE(rq->rx_notify_masked, true);
993 __napi_schedule(&rq->xdp_napi);
994 }
995 }
996 }
997
998 if (stats.xdp_tx > 0)
999 veth_xdp_flush(rq, &bq);
1000 xdp_clear_return_frame_no_direct();
1001
1002 return done;
1003}
1004
1005static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
1006{
1007 struct veth_priv *priv = netdev_priv(dev);
1008 int err, i;
1009
1010 for (i = start; i < end; i++) {
1011 struct veth_rq *rq = &priv->rq[i];
1012
1013 err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
1014 if (err)
1015 goto err_xdp_ring;
1016 }
1017
1018 for (i = start; i < end; i++) {
1019 struct veth_rq *rq = &priv->rq[i];
1020
1021 napi_enable(&rq->xdp_napi);
1022 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1023 }
1024
1025 return 0;
1026
1027err_xdp_ring:
1028 for (i--; i >= start; i--)
1029 ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
1030
1031 return err;
1032}
1033
1034static int __veth_napi_enable(struct net_device *dev)
1035{
1036 return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1037}
1038
1039static void veth_napi_del_range(struct net_device *dev, int start, int end)
1040{
1041 struct veth_priv *priv = netdev_priv(dev);
1042 int i;
1043
1044 for (i = start; i < end; i++) {
1045 struct veth_rq *rq = &priv->rq[i];
1046
1047 rcu_assign_pointer(priv->rq[i].napi, NULL);
1048 napi_disable(&rq->xdp_napi);
1049 __netif_napi_del(&rq->xdp_napi);
1050 }
1051 synchronize_net();
1052
1053 for (i = start; i < end; i++) {
1054 struct veth_rq *rq = &priv->rq[i];
1055
1056 rq->rx_notify_masked = false;
1057 ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
1058 }
1059}
1060
1061static void veth_napi_del(struct net_device *dev)
1062{
1063 veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
1064}
1065
1066static bool veth_gro_requested(const struct net_device *dev)
1067{
1068 return !!(dev->wanted_features & NETIF_F_GRO);
1069}
1070
1071static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
1072 bool napi_already_on)
1073{
1074 struct veth_priv *priv = netdev_priv(dev);
1075 int err, i;
1076
1077 for (i = start; i < end; i++) {
1078 struct veth_rq *rq = &priv->rq[i];
1079
1080 if (!napi_already_on)
1081 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1082 err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
1083 if (err < 0)
1084 goto err_rxq_reg;
1085
1086 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
1087 MEM_TYPE_PAGE_SHARED,
1088 NULL);
1089 if (err < 0)
1090 goto err_reg_mem;
1091
1092 /* Save original mem info as it can be overwritten */
1093 rq->xdp_mem = rq->xdp_rxq.mem;
1094 }
1095 return 0;
1096
1097err_reg_mem:
1098 xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
1099err_rxq_reg:
1100 for (i--; i >= start; i--) {
1101 struct veth_rq *rq = &priv->rq[i];
1102
1103 xdp_rxq_info_unreg(&rq->xdp_rxq);
1104 if (!napi_already_on)
1105 netif_napi_del(&rq->xdp_napi);
1106 }
1107
1108 return err;
1109}
1110
1111static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
1112 bool delete_napi)
1113{
1114 struct veth_priv *priv = netdev_priv(dev);
1115 int i;
1116
1117 for (i = start; i < end; i++) {
1118 struct veth_rq *rq = &priv->rq[i];
1119
1120 rq->xdp_rxq.mem = rq->xdp_mem;
1121 xdp_rxq_info_unreg(&rq->xdp_rxq);
1122
1123 if (delete_napi)
1124 netif_napi_del(&rq->xdp_napi);
1125 }
1126}
1127
1128static int veth_enable_xdp(struct net_device *dev)
1129{
1130 bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
1131 struct veth_priv *priv = netdev_priv(dev);
1132 int err, i;
1133
1134 if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
1135 err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
1136 if (err)
1137 return err;
1138
1139 if (!napi_already_on) {
1140 err = __veth_napi_enable(dev);
1141 if (err) {
1142 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
1143 return err;
1144 }
1145
1146 if (!veth_gro_requested(dev)) {
1147 /* user-space did not require GRO, but adding XDP
1148 * is supposed to get GRO working
1149 */
1150 dev->features |= NETIF_F_GRO;
1151 netdev_features_change(dev);
1152 }
1153 }
1154 }
1155
1156 for (i = 0; i < dev->real_num_rx_queues; i++) {
1157 rcu_assign_pointer(priv->rq[i].xdp_prog, priv->_xdp_prog);
1158 rcu_assign_pointer(priv->rq[i].napi, &priv->rq[i].xdp_napi);
1159 }
1160
1161 return 0;
1162}
1163
1164static void veth_disable_xdp(struct net_device *dev)
1165{
1166 struct veth_priv *priv = netdev_priv(dev);
1167 int i;
1168
1169 for (i = 0; i < dev->real_num_rx_queues; i++)
1170 rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
1171
1172 if (!netif_running(dev) || !veth_gro_requested(dev)) {
1173 veth_napi_del(dev);
1174
1175 /* if user-space did not require GRO, since adding XDP
1176 * enabled it, clear it now
1177 */
1178 if (!veth_gro_requested(dev) && netif_running(dev)) {
1179 dev->features &= ~NETIF_F_GRO;
1180 netdev_features_change(dev);
1181 }
1182 }
1183
1184 veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
1185}
1186
1187static int veth_napi_enable_range(struct net_device *dev, int start, int end)
1188{
1189 struct veth_priv *priv = netdev_priv(dev);
1190 int err, i;
1191
1192 for (i = start; i < end; i++) {
1193 struct veth_rq *rq = &priv->rq[i];
1194
1195 netif_napi_add(dev, &rq->xdp_napi, veth_poll);
1196 }
1197
1198 err = __veth_napi_enable_range(dev, start, end);
1199 if (err) {
1200 for (i = start; i < end; i++) {
1201 struct veth_rq *rq = &priv->rq[i];
1202
1203 netif_napi_del(&rq->xdp_napi);
1204 }
1205 return err;
1206 }
1207 return err;
1208}
1209
1210static int veth_napi_enable(struct net_device *dev)
1211{
1212 return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
1213}
1214
1215static void veth_disable_range_safe(struct net_device *dev, int start, int end)
1216{
1217 struct veth_priv *priv = netdev_priv(dev);
1218
1219 if (start >= end)
1220 return;
1221
1222 if (priv->_xdp_prog) {
1223 veth_napi_del_range(dev, start, end);
1224 veth_disable_xdp_range(dev, start, end, false);
1225 } else if (veth_gro_requested(dev)) {
1226 veth_napi_del_range(dev, start, end);
1227 }
1228}
1229
1230static int veth_enable_range_safe(struct net_device *dev, int start, int end)
1231{
1232 struct veth_priv *priv = netdev_priv(dev);
1233 int err;
1234
1235 if (start >= end)
1236 return 0;
1237
1238 if (priv->_xdp_prog) {
1239 /* these channels are freshly initialized, napi is not on there even
1240 * when GRO is requeste
1241 */
1242 err = veth_enable_xdp_range(dev, start, end, false);
1243 if (err)
1244 return err;
1245
1246 err = __veth_napi_enable_range(dev, start, end);
1247 if (err) {
1248 /* on error always delete the newly added napis */
1249 veth_disable_xdp_range(dev, start, end, true);
1250 return err;
1251 }
1252 } else if (veth_gro_requested(dev)) {
1253 return veth_napi_enable_range(dev, start, end);
1254 }
1255 return 0;
1256}
1257
1258static void veth_set_xdp_features(struct net_device *dev)
1259{
1260 struct veth_priv *priv = netdev_priv(dev);
1261 struct net_device *peer;
1262
1263 peer = rtnl_dereference(priv->peer);
1264 if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) {
1265 xdp_features_t val = NETDEV_XDP_ACT_BASIC |
1266 NETDEV_XDP_ACT_REDIRECT |
1267 NETDEV_XDP_ACT_RX_SG;
1268
1269 if (priv->_xdp_prog || veth_gro_requested(dev))
1270 val |= NETDEV_XDP_ACT_NDO_XMIT |
1271 NETDEV_XDP_ACT_NDO_XMIT_SG;
1272 xdp_set_features_flag(dev, val);
1273 } else {
1274 xdp_clear_features_flag(dev);
1275 }
1276}
1277
1278static int veth_set_channels(struct net_device *dev,
1279 struct ethtool_channels *ch)
1280{
1281 struct veth_priv *priv = netdev_priv(dev);
1282 unsigned int old_rx_count, new_rx_count;
1283 struct veth_priv *peer_priv;
1284 struct net_device *peer;
1285 int err;
1286
1287 /* sanity check. Upper bounds are already enforced by the caller */
1288 if (!ch->rx_count || !ch->tx_count)
1289 return -EINVAL;
1290
1291 /* avoid braking XDP, if that is enabled */
1292 peer = rtnl_dereference(priv->peer);
1293 peer_priv = peer ? netdev_priv(peer) : NULL;
1294 if (priv->_xdp_prog && peer && ch->rx_count < peer->real_num_tx_queues)
1295 return -EINVAL;
1296
1297 if (peer && peer_priv && peer_priv->_xdp_prog && ch->tx_count > peer->real_num_rx_queues)
1298 return -EINVAL;
1299
1300 old_rx_count = dev->real_num_rx_queues;
1301 new_rx_count = ch->rx_count;
1302 if (netif_running(dev)) {
1303 /* turn device off */
1304 netif_carrier_off(dev);
1305 if (peer)
1306 netif_carrier_off(peer);
1307
1308 /* try to allocate new resurces, as needed*/
1309 err = veth_enable_range_safe(dev, old_rx_count, new_rx_count);
1310 if (err)
1311 goto out;
1312 }
1313
1314 err = netif_set_real_num_rx_queues(dev, ch->rx_count);
1315 if (err)
1316 goto revert;
1317
1318 err = netif_set_real_num_tx_queues(dev, ch->tx_count);
1319 if (err) {
1320 int err2 = netif_set_real_num_rx_queues(dev, old_rx_count);
1321
1322 /* this error condition could happen only if rx and tx change
1323 * in opposite directions (e.g. tx nr raises, rx nr decreases)
1324 * and we can't do anything to fully restore the original
1325 * status
1326 */
1327 if (err2)
1328 pr_warn("Can't restore rx queues config %d -> %d %d",
1329 new_rx_count, old_rx_count, err2);
1330 else
1331 goto revert;
1332 }
1333
1334out:
1335 if (netif_running(dev)) {
1336 /* note that we need to swap the arguments WRT the enable part
1337 * to identify the range we have to disable
1338 */
1339 veth_disable_range_safe(dev, new_rx_count, old_rx_count);
1340 netif_carrier_on(dev);
1341 if (peer)
1342 netif_carrier_on(peer);
1343 }
1344
1345 /* update XDP supported features */
1346 veth_set_xdp_features(dev);
1347 if (peer)
1348 veth_set_xdp_features(peer);
1349
1350 return err;
1351
1352revert:
1353 new_rx_count = old_rx_count;
1354 old_rx_count = ch->rx_count;
1355 goto out;
1356}
1357
1358static int veth_open(struct net_device *dev)
1359{
1360 struct veth_priv *priv = netdev_priv(dev);
1361 struct net_device *peer = rtnl_dereference(priv->peer);
1362 int err;
1363
1364 if (!peer)
1365 return -ENOTCONN;
1366
1367 if (priv->_xdp_prog) {
1368 err = veth_enable_xdp(dev);
1369 if (err)
1370 return err;
1371 } else if (veth_gro_requested(dev)) {
1372 err = veth_napi_enable(dev);
1373 if (err)
1374 return err;
1375 }
1376
1377 if (peer->flags & IFF_UP) {
1378 netif_carrier_on(dev);
1379 netif_carrier_on(peer);
1380 }
1381
1382 return 0;
1383}
1384
1385static int veth_close(struct net_device *dev)
1386{
1387 struct veth_priv *priv = netdev_priv(dev);
1388 struct net_device *peer = rtnl_dereference(priv->peer);
1389
1390 netif_carrier_off(dev);
1391 if (peer)
1392 netif_carrier_off(peer);
1393
1394 if (priv->_xdp_prog)
1395 veth_disable_xdp(dev);
1396 else if (veth_gro_requested(dev))
1397 veth_napi_del(dev);
1398
1399 return 0;
1400}
1401
1402static int is_valid_veth_mtu(int mtu)
1403{
1404 return mtu >= ETH_MIN_MTU && mtu <= ETH_MAX_MTU;
1405}
1406
1407static int veth_alloc_queues(struct net_device *dev)
1408{
1409 struct veth_priv *priv = netdev_priv(dev);
1410 int i;
1411
1412 priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
1413 if (!priv->rq)
1414 return -ENOMEM;
1415
1416 for (i = 0; i < dev->num_rx_queues; i++) {
1417 priv->rq[i].dev = dev;
1418 u64_stats_init(&priv->rq[i].stats.syncp);
1419 }
1420
1421 return 0;
1422}
1423
1424static void veth_free_queues(struct net_device *dev)
1425{
1426 struct veth_priv *priv = netdev_priv(dev);
1427
1428 kfree(priv->rq);
1429}
1430
1431static int veth_dev_init(struct net_device *dev)
1432{
1433 int err;
1434
1435 dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
1436 if (!dev->lstats)
1437 return -ENOMEM;
1438
1439 err = veth_alloc_queues(dev);
1440 if (err) {
1441 free_percpu(dev->lstats);
1442 return err;
1443 }
1444
1445 return 0;
1446}
1447
1448static void veth_dev_free(struct net_device *dev)
1449{
1450 veth_free_queues(dev);
1451 free_percpu(dev->lstats);
1452}
1453
1454#ifdef CONFIG_NET_POLL_CONTROLLER
1455static void veth_poll_controller(struct net_device *dev)
1456{
1457 /* veth only receives frames when its peer sends one
1458 * Since it has nothing to do with disabling irqs, we are guaranteed
1459 * never to have pending data when we poll for it so
1460 * there is nothing to do here.
1461 *
1462 * We need this though so netpoll recognizes us as an interface that
1463 * supports polling, which enables bridge devices in virt setups to
1464 * still use netconsole
1465 */
1466}
1467#endif /* CONFIG_NET_POLL_CONTROLLER */
1468
1469static int veth_get_iflink(const struct net_device *dev)
1470{
1471 struct veth_priv *priv = netdev_priv(dev);
1472 struct net_device *peer;
1473 int iflink;
1474
1475 rcu_read_lock();
1476 peer = rcu_dereference(priv->peer);
1477 iflink = peer ? peer->ifindex : 0;
1478 rcu_read_unlock();
1479
1480 return iflink;
1481}
1482
1483static netdev_features_t veth_fix_features(struct net_device *dev,
1484 netdev_features_t features)
1485{
1486 struct veth_priv *priv = netdev_priv(dev);
1487 struct net_device *peer;
1488
1489 peer = rtnl_dereference(priv->peer);
1490 if (peer) {
1491 struct veth_priv *peer_priv = netdev_priv(peer);
1492
1493 if (peer_priv->_xdp_prog)
1494 features &= ~NETIF_F_GSO_SOFTWARE;
1495 }
1496 if (priv->_xdp_prog)
1497 features |= NETIF_F_GRO;
1498
1499 return features;
1500}
1501
1502static int veth_set_features(struct net_device *dev,
1503 netdev_features_t features)
1504{
1505 netdev_features_t changed = features ^ dev->features;
1506 struct veth_priv *priv = netdev_priv(dev);
1507 int err;
1508
1509 if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
1510 return 0;
1511
1512 if (features & NETIF_F_GRO) {
1513 err = veth_napi_enable(dev);
1514 if (err)
1515 return err;
1516
1517 xdp_features_set_redirect_target(dev, true);
1518 } else {
1519 xdp_features_clear_redirect_target(dev);
1520 veth_napi_del(dev);
1521 }
1522 return 0;
1523}
1524
1525static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
1526{
1527 struct veth_priv *peer_priv, *priv = netdev_priv(dev);
1528 struct net_device *peer;
1529
1530 if (new_hr < 0)
1531 new_hr = 0;
1532
1533 rcu_read_lock();
1534 peer = rcu_dereference(priv->peer);
1535 if (unlikely(!peer))
1536 goto out;
1537
1538 peer_priv = netdev_priv(peer);
1539 priv->requested_headroom = new_hr;
1540 new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
1541 dev->needed_headroom = new_hr;
1542 peer->needed_headroom = new_hr;
1543
1544out:
1545 rcu_read_unlock();
1546}
1547
1548static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1549 struct netlink_ext_ack *extack)
1550{
1551 struct veth_priv *priv = netdev_priv(dev);
1552 struct bpf_prog *old_prog;
1553 struct net_device *peer;
1554 unsigned int max_mtu;
1555 int err;
1556
1557 old_prog = priv->_xdp_prog;
1558 priv->_xdp_prog = prog;
1559 peer = rtnl_dereference(priv->peer);
1560
1561 if (prog) {
1562 if (!peer) {
1563 NL_SET_ERR_MSG_MOD(extack, "Cannot set XDP when peer is detached");
1564 err = -ENOTCONN;
1565 goto err;
1566 }
1567
1568 max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
1569 peer->hard_header_len;
1570 /* Allow increasing the max_mtu if the program supports
1571 * XDP fragments.
1572 */
1573 if (prog->aux->xdp_has_frags)
1574 max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
1575
1576 if (peer->mtu > max_mtu) {
1577 NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
1578 err = -ERANGE;
1579 goto err;
1580 }
1581
1582 if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
1583 NL_SET_ERR_MSG_MOD(extack, "XDP expects number of rx queues not less than peer tx queues");
1584 err = -ENOSPC;
1585 goto err;
1586 }
1587
1588 if (dev->flags & IFF_UP) {
1589 err = veth_enable_xdp(dev);
1590 if (err) {
1591 NL_SET_ERR_MSG_MOD(extack, "Setup for XDP failed");
1592 goto err;
1593 }
1594 }
1595
1596 if (!old_prog) {
1597 peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
1598 peer->max_mtu = max_mtu;
1599 }
1600
1601 xdp_features_set_redirect_target(dev, true);
1602 }
1603
1604 if (old_prog) {
1605 if (!prog) {
1606 if (!veth_gro_requested(dev))
1607 xdp_features_clear_redirect_target(dev);
1608
1609 if (dev->flags & IFF_UP)
1610 veth_disable_xdp(dev);
1611
1612 if (peer) {
1613 peer->hw_features |= NETIF_F_GSO_SOFTWARE;
1614 peer->max_mtu = ETH_MAX_MTU;
1615 }
1616 }
1617 bpf_prog_put(old_prog);
1618 }
1619
1620 if ((!!old_prog ^ !!prog) && peer)
1621 netdev_update_features(peer);
1622
1623 return 0;
1624err:
1625 priv->_xdp_prog = old_prog;
1626
1627 return err;
1628}
1629
1630static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1631{
1632 switch (xdp->command) {
1633 case XDP_SETUP_PROG:
1634 return veth_xdp_set(dev, xdp->prog, xdp->extack);
1635 default:
1636 return -EINVAL;
1637 }
1638}
1639
1640static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
1641{
1642 struct veth_xdp_buff *_ctx = (void *)ctx;
1643
1644 if (!_ctx->skb)
1645 return -ENODATA;
1646
1647 *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
1648 return 0;
1649}
1650
1651static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
1652{
1653 struct veth_xdp_buff *_ctx = (void *)ctx;
1654
1655 if (!_ctx->skb)
1656 return -ENODATA;
1657
1658 *hash = skb_get_hash(_ctx->skb);
1659 return 0;
1660}
1661
1662static const struct net_device_ops veth_netdev_ops = {
1663 .ndo_init = veth_dev_init,
1664 .ndo_open = veth_open,
1665 .ndo_stop = veth_close,
1666 .ndo_start_xmit = veth_xmit,
1667 .ndo_get_stats64 = veth_get_stats64,
1668 .ndo_set_rx_mode = veth_set_multicast_list,
1669 .ndo_set_mac_address = eth_mac_addr,
1670#ifdef CONFIG_NET_POLL_CONTROLLER
1671 .ndo_poll_controller = veth_poll_controller,
1672#endif
1673 .ndo_get_iflink = veth_get_iflink,
1674 .ndo_fix_features = veth_fix_features,
1675 .ndo_set_features = veth_set_features,
1676 .ndo_features_check = passthru_features_check,
1677 .ndo_set_rx_headroom = veth_set_rx_headroom,
1678 .ndo_bpf = veth_xdp,
1679 .ndo_xdp_xmit = veth_ndo_xdp_xmit,
1680 .ndo_get_peer_dev = veth_peer_dev,
1681};
1682
1683static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
1684 .xmo_rx_timestamp = veth_xdp_rx_timestamp,
1685 .xmo_rx_hash = veth_xdp_rx_hash,
1686};
1687
1688#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
1689 NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
1690 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
1691 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
1692 NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
1693
1694static void veth_setup(struct net_device *dev)
1695{
1696 ether_setup(dev);
1697
1698 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1699 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1700 dev->priv_flags |= IFF_NO_QUEUE;
1701 dev->priv_flags |= IFF_PHONY_HEADROOM;
1702
1703 dev->netdev_ops = &veth_netdev_ops;
1704 dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
1705 dev->ethtool_ops = &veth_ethtool_ops;
1706 dev->features |= NETIF_F_LLTX;
1707 dev->features |= VETH_FEATURES;
1708 dev->vlan_features = dev->features &
1709 ~(NETIF_F_HW_VLAN_CTAG_TX |
1710 NETIF_F_HW_VLAN_STAG_TX |
1711 NETIF_F_HW_VLAN_CTAG_RX |
1712 NETIF_F_HW_VLAN_STAG_RX);
1713 dev->needs_free_netdev = true;
1714 dev->priv_destructor = veth_dev_free;
1715 dev->max_mtu = ETH_MAX_MTU;
1716
1717 dev->hw_features = VETH_FEATURES;
1718 dev->hw_enc_features = VETH_FEATURES;
1719 dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
1720 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
1721}
1722
1723/*
1724 * netlink interface
1725 */
1726
1727static int veth_validate(struct nlattr *tb[], struct nlattr *data[],
1728 struct netlink_ext_ack *extack)
1729{
1730 if (tb[IFLA_ADDRESS]) {
1731 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1732 return -EINVAL;
1733 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1734 return -EADDRNOTAVAIL;
1735 }
1736 if (tb[IFLA_MTU]) {
1737 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
1738 return -EINVAL;
1739 }
1740 return 0;
1741}
1742
1743static struct rtnl_link_ops veth_link_ops;
1744
1745static void veth_disable_gro(struct net_device *dev)
1746{
1747 dev->features &= ~NETIF_F_GRO;
1748 dev->wanted_features &= ~NETIF_F_GRO;
1749 netdev_update_features(dev);
1750}
1751
1752static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
1753{
1754 int err;
1755
1756 if (!tb[IFLA_NUM_TX_QUEUES] && dev->num_tx_queues > 1) {
1757 err = netif_set_real_num_tx_queues(dev, 1);
1758 if (err)
1759 return err;
1760 }
1761 if (!tb[IFLA_NUM_RX_QUEUES] && dev->num_rx_queues > 1) {
1762 err = netif_set_real_num_rx_queues(dev, 1);
1763 if (err)
1764 return err;
1765 }
1766 return 0;
1767}
1768
1769static int veth_newlink(struct net *src_net, struct net_device *dev,
1770 struct nlattr *tb[], struct nlattr *data[],
1771 struct netlink_ext_ack *extack)
1772{
1773 int err;
1774 struct net_device *peer;
1775 struct veth_priv *priv;
1776 char ifname[IFNAMSIZ];
1777 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
1778 unsigned char name_assign_type;
1779 struct ifinfomsg *ifmp;
1780 struct net *net;
1781
1782 /*
1783 * create and register peer first
1784 */
1785 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
1786 struct nlattr *nla_peer;
1787
1788 nla_peer = data[VETH_INFO_PEER];
1789 ifmp = nla_data(nla_peer);
1790 err = rtnl_nla_parse_ifla(peer_tb,
1791 nla_data(nla_peer) + sizeof(struct ifinfomsg),
1792 nla_len(nla_peer) - sizeof(struct ifinfomsg),
1793 NULL);
1794 if (err < 0)
1795 return err;
1796
1797 err = veth_validate(peer_tb, NULL, extack);
1798 if (err < 0)
1799 return err;
1800
1801 tbp = peer_tb;
1802 } else {
1803 ifmp = NULL;
1804 tbp = tb;
1805 }
1806
1807 if (ifmp && tbp[IFLA_IFNAME]) {
1808 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
1809 name_assign_type = NET_NAME_USER;
1810 } else {
1811 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
1812 name_assign_type = NET_NAME_ENUM;
1813 }
1814
1815 net = rtnl_link_get_net(src_net, tbp);
1816 if (IS_ERR(net))
1817 return PTR_ERR(net);
1818
1819 peer = rtnl_create_link(net, ifname, name_assign_type,
1820 &veth_link_ops, tbp, extack);
1821 if (IS_ERR(peer)) {
1822 put_net(net);
1823 return PTR_ERR(peer);
1824 }
1825
1826 if (!ifmp || !tbp[IFLA_ADDRESS])
1827 eth_hw_addr_random(peer);
1828
1829 if (ifmp && (dev->ifindex != 0))
1830 peer->ifindex = ifmp->ifi_index;
1831
1832 netif_inherit_tso_max(peer, dev);
1833
1834 err = register_netdevice(peer);
1835 put_net(net);
1836 net = NULL;
1837 if (err < 0)
1838 goto err_register_peer;
1839
1840 /* keep GRO disabled by default to be consistent with the established
1841 * veth behavior
1842 */
1843 veth_disable_gro(peer);
1844 netif_carrier_off(peer);
1845
1846 err = rtnl_configure_link(peer, ifmp, 0, NULL);
1847 if (err < 0)
1848 goto err_configure_peer;
1849
1850 /*
1851 * register dev last
1852 *
1853 * note, that since we've registered new device the dev's name
1854 * should be re-allocated
1855 */
1856
1857 if (tb[IFLA_ADDRESS] == NULL)
1858 eth_hw_addr_random(dev);
1859
1860 if (tb[IFLA_IFNAME])
1861 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
1862 else
1863 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
1864
1865 err = register_netdevice(dev);
1866 if (err < 0)
1867 goto err_register_dev;
1868
1869 netif_carrier_off(dev);
1870
1871 /*
1872 * tie the deviced together
1873 */
1874
1875 priv = netdev_priv(dev);
1876 rcu_assign_pointer(priv->peer, peer);
1877 err = veth_init_queues(dev, tb);
1878 if (err)
1879 goto err_queues;
1880
1881 priv = netdev_priv(peer);
1882 rcu_assign_pointer(priv->peer, dev);
1883 err = veth_init_queues(peer, tb);
1884 if (err)
1885 goto err_queues;
1886
1887 veth_disable_gro(dev);
1888 /* update XDP supported features */
1889 veth_set_xdp_features(dev);
1890 veth_set_xdp_features(peer);
1891
1892 return 0;
1893
1894err_queues:
1895 unregister_netdevice(dev);
1896err_register_dev:
1897 /* nothing to do */
1898err_configure_peer:
1899 unregister_netdevice(peer);
1900 return err;
1901
1902err_register_peer:
1903 free_netdev(peer);
1904 return err;
1905}
1906
1907static void veth_dellink(struct net_device *dev, struct list_head *head)
1908{
1909 struct veth_priv *priv;
1910 struct net_device *peer;
1911
1912 priv = netdev_priv(dev);
1913 peer = rtnl_dereference(priv->peer);
1914
1915 /* Note : dellink() is called from default_device_exit_batch(),
1916 * before a rcu_synchronize() point. The devices are guaranteed
1917 * not being freed before one RCU grace period.
1918 */
1919 RCU_INIT_POINTER(priv->peer, NULL);
1920 unregister_netdevice_queue(dev, head);
1921
1922 if (peer) {
1923 priv = netdev_priv(peer);
1924 RCU_INIT_POINTER(priv->peer, NULL);
1925 unregister_netdevice_queue(peer, head);
1926 }
1927}
1928
1929static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
1930 [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
1931};
1932
1933static struct net *veth_get_link_net(const struct net_device *dev)
1934{
1935 struct veth_priv *priv = netdev_priv(dev);
1936 struct net_device *peer = rtnl_dereference(priv->peer);
1937
1938 return peer ? dev_net(peer) : dev_net(dev);
1939}
1940
1941static unsigned int veth_get_num_queues(void)
1942{
1943 /* enforce the same queue limit as rtnl_create_link */
1944 int queues = num_possible_cpus();
1945
1946 if (queues > 4096)
1947 queues = 4096;
1948 return queues;
1949}
1950
1951static struct rtnl_link_ops veth_link_ops = {
1952 .kind = DRV_NAME,
1953 .priv_size = sizeof(struct veth_priv),
1954 .setup = veth_setup,
1955 .validate = veth_validate,
1956 .newlink = veth_newlink,
1957 .dellink = veth_dellink,
1958 .policy = veth_policy,
1959 .maxtype = VETH_INFO_MAX,
1960 .get_link_net = veth_get_link_net,
1961 .get_num_tx_queues = veth_get_num_queues,
1962 .get_num_rx_queues = veth_get_num_queues,
1963};
1964
1965/*
1966 * init/fini
1967 */
1968
1969static __init int veth_init(void)
1970{
1971 return rtnl_link_register(&veth_link_ops);
1972}
1973
1974static __exit void veth_exit(void)
1975{
1976 rtnl_link_unregister(&veth_link_ops);
1977}
1978
1979module_init(veth_init);
1980module_exit(veth_exit);
1981
1982MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
1983MODULE_LICENSE("GPL v2");
1984MODULE_ALIAS_RTNL_LINK(DRV_NAME);